diff --git a/.github/workflows/autodeploy.yml b/.github/workflows/autodeploy.yml index e6bfecff2..78cb9b8b0 100644 --- a/.github/workflows/autodeploy.yml +++ b/.github/workflows/autodeploy.yml @@ -8,7 +8,7 @@ on: branches: [ "master" ] # Pattern matched against refs/tags tags: - - '*' # Push events to every tag not containing '/' (use '**' for hierarchical tags) + - 'v*' # Push events to every tag not containing '/' (use '**' for hierarchical tags) # Dont allow running manually from Actions tab -- use manualdeploy for this #workflow_dispatch: @@ -35,7 +35,7 @@ jobs: - name: Build wheels uses: pypa/cibuildwheel@v2.1.2 env: - CIBW_BUILD: cp36-* cp37-* cp38-* cp39-* + CIBW_BUILD: cp37-* cp38-* cp39-* cp310-* CIBW_BUILD_VERBOSITY: 1 CIBW_BEFORE_ALL_LINUX: ./.github/ci-scripts/before_install.sh diff --git a/.github/workflows/extras.yml b/.github/workflows/extras.yml index 2f04d4e31..e0e3fd3dd 100644 --- a/.github/workflows/extras.yml +++ b/.github/workflows/extras.yml @@ -46,7 +46,7 @@ jobs: - name: Install package run: | python -m pip install --upgrade pip - # Installing with -e to keep installation local (for NOSE_NOPATH) + # Installing with -e to keep installation local # but still compile Cython extensions python -m pip install -e .[testing] python setup.py build_ext --inplace diff --git a/.github/workflows/notebook.yml b/.github/workflows/notebook.yml index b2d5b1fdb..1622e0a65 100644 --- a/.github/workflows/notebook.yml +++ b/.github/workflows/notebook.yml @@ -52,7 +52,7 @@ jobs: - name: Install package run: | python -m pip install --upgrade pip - # Installing with -e to keep installation local (for NOSE_NOPATH) + # Installing with -e to keep installation local # but still compile Cython extensions python -m pip install -e .[testing] python setup.py build_ext --inplace diff --git a/.gitignore b/.gitignore index 171239ff8..e89bbe20f 100644 --- a/.gitignore +++ b/.gitignore @@ -20,8 +20,10 @@ hooks/etc/permissions.yml .ropeproject .vscode doc/_autosummary +doc/autoapi doc/build -.venv +.venv* + # Test Metadata # ################# @@ -66,6 +68,7 @@ dist/* /pygsti.egg-info /pyGSTi.egg-info parsetab_string.py +*.pyd # These are handwritten, not generated by cython, and should not be ignored !/pygsti/objects/replib/fastreps.cpp diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 000000000..70e2db7c7 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,58 @@ +# Read the Docs configuration file for Sphinx projects + +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + + +# Required + +version: 2 + + +# Set the OS, Python version and other tools you might need + +build: + + os: ubuntu-22.04 + + tools: + + python: "3.11" + + # You can also specify other tool versions: + + # nodejs: "19" + + # rust: "1.64" + + # golang: "1.19" + + +# Build documentation in the "docs/" directory with Sphinx + +sphinx: + + configuration: doc/conf.py + + +# Optionally build your docs in additional formats such as PDF and ePub + +# formats: + +# - pdf + +# - epub + + +# Optional but recommended, declare the Python requirements required + +# to build your documentation + +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html + +python: + + install: + - requirements: rtd-requirements.txt + + - method: pip + path: . \ No newline at end of file diff --git a/CHANGELOG b/CHANGELOG index b4fa48cb3..fe9a37ba6 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,5 +1,26 @@ # CHANGELOG +## [0.9.11.2] - 2023-08-11 + +### Fixed + +- Fixed FAQ hyperlinks (#304) +- Removed deprecated functions for NumPy 1.25+ (#335) +- Fixed pickling of TPPOVM objects (#336) +- Updated the ReadTheDocs builds (#331) +- Fixed dataset pickling (#326) +- Removed deprecated functions for notebook 7+ (#337) + +### Changed + +- Slight performance improvements for GST fitting (#305) + +## [0.9.11.1] - 2023-05-19 + +### Fixed + +- Guarded an optional markupsafe import for report generation + ## [0.9.11] - 2023-05-16 ### Added diff --git a/doc/_templates/autosummary/base.rst b/doc/_templates/autosummary/base.rst deleted file mode 100644 index 3fe9858d7..000000000 --- a/doc/_templates/autosummary/base.rst +++ /dev/null @@ -1,5 +0,0 @@ -{{ objname | escape | underline }} - -.. currentmodule:: {{ module }} - -.. auto{{ objtype }}:: {{ objname }} diff --git a/doc/_templates/autosummary/class.rst b/doc/_templates/autosummary/class.rst deleted file mode 100644 index 3cdcd0f2a..000000000 --- a/doc/_templates/autosummary/class.rst +++ /dev/null @@ -1,5 +0,0 @@ -{{ objname | escape | underline }} - -.. currentmodule:: {{ module }} - -.. autoclass:: {{ objname }} diff --git a/doc/_templates/autosummary/module.rst b/doc/_templates/autosummary/module.rst deleted file mode 100644 index ab1b581df..000000000 --- a/doc/_templates/autosummary/module.rst +++ /dev/null @@ -1,43 +0,0 @@ -{{ fullname | escape | underline }} - -.. automodule:: {{ fullname }} - - {% block functions %} - {% if functions %} - .. rubric:: Functions - - .. autosummary:: - :toctree: - - {% for item in functions %} - {{ item }} - {%- endfor %} - - {% endif %} - {% endblock %} - - {% block classes %} - {% if classes %} - .. rubric:: Classes - - .. autosummary:: - :toctree: - - {% for item in classes %} - {{ item }} - {%- endfor %} - {% endif %} - {% endblock %} - - {% block exceptions %} - {% if exceptions %} - .. rubric:: Exceptions - - .. autosummary:: - :toctree: - - {% for item in exceptions %} - {{ item }} - {%- endfor %} - {% endif %} - {% endblock %} diff --git a/doc/_templates/custom-module-template.rst b/doc/_templates/custom-module-template.rst deleted file mode 100644 index 5391c073f..000000000 --- a/doc/_templates/custom-module-template.rst +++ /dev/null @@ -1,65 +0,0 @@ -{{ fullname | escape | underline}} - -.. automodule:: {{ fullname }} - - {% block attributes %} - {% if attributes %} - .. rubric:: Module Attributes - - .. autosummary:: - :toctree: - {% for item in attributes %} - {{ item }} - {%- endfor %} - {% endif %} - {% endblock %} - - {% block functions %} - {% if functions %} - .. rubric:: {{ _('Functions') }} - - .. autosummary:: - :toctree: - {% for item in functions %} - {{ item }} - {%- endfor %} - {% endif %} - {% endblock %} - - {% block classes %} - {% if classes %} - .. rubric:: {{ _('Classes') }} - - .. autosummary:: - :toctree: - {% for item in classes %} - {{ item }} - {%- endfor %} - {% endif %} - {% endblock %} - - {% block exceptions %} - {% if exceptions %} - .. rubric:: {{ _('Exceptions') }} - - .. autosummary:: - :toctree: - {% for item in exceptions %} - {{ item }} - {%- endfor %} - {% endif %} - {% endblock %} - -{% block modules %} -{% if modules %} -.. rubric:: Modules - -.. autosummary:: - :toctree: - :template: custom-module-template.rst - :recursive: -{% for item in modules %} - {{ item }} -{%- endfor %} -{% endif %} -{% endblock %} \ No newline at end of file diff --git a/doc/_templates/index.rst b/doc/_templates/index.rst new file mode 100644 index 000000000..95d0ad891 --- /dev/null +++ b/doc/_templates/index.rst @@ -0,0 +1,15 @@ +API Reference +============= + +This page contains auto-generated API reference documentation [#f1]_. + +.. toctree:: + :titlesonly: + + {% for page in pages %} + {% if page.top_level_object and page.display %} + {{ page.include_path }} + {% endif %} + {% endfor %} + +.. [#f1] Created with `sphinx-autoapi `_ diff --git a/doc/_templates/python/attribute.rst b/doc/_templates/python/attribute.rst new file mode 100644 index 000000000..ebaba555a --- /dev/null +++ b/doc/_templates/python/attribute.rst @@ -0,0 +1 @@ +{% extends "python/data.rst" %} diff --git a/doc/_templates/python/class.rst b/doc/_templates/python/class.rst new file mode 100644 index 000000000..a584dcd6e --- /dev/null +++ b/doc/_templates/python/class.rst @@ -0,0 +1,60 @@ +{% if obj.display %} +.. py:{{ obj.type }}:: {{ obj.short_name }}{% if obj.args %}({{ obj.args }}){% endif %} + +{% for (args, return_annotation) in obj.overloads %} + {{ " " * (obj.type | length) }} {{ obj.short_name }}{% if args %}({{ args }}){% endif %} + +{% endfor %} + + + {% if obj.bases %} + {% if "show-inheritance" in autoapi_options %} + Bases: {% for base in obj.bases %}{{ base|link_objs }}{% if not loop.last %}, {% endif %}{% endfor %} + {% endif %} + + + {% if "show-inheritance-diagram" in autoapi_options and obj.bases != ["object"] %} + .. autoapi-inheritance-diagram:: {{ obj.obj["full_name"] }} + :parts: 1 + {% if "private-members" in autoapi_options %} + :private-bases: + {% endif %} + + {% endif %} + {% endif %} + {% if obj.docstring %} + {{ obj.docstring|indent(3) }} + {% endif %} + {% if "inherited-members" in autoapi_options %} + {% set visible_classes = obj.classes|selectattr("display")|list %} + {% else %} + {% set visible_classes = obj.classes|rejectattr("inherited")|selectattr("display")|list %} + {% endif %} + {% for klass in visible_classes %} + {{ klass.render()|indent(3) }} + {% endfor %} + {% if "inherited-members" in autoapi_options %} + {% set visible_properties = obj.properties|selectattr("display")|list %} + {% else %} + {% set visible_properties = obj.properties|rejectattr("inherited")|selectattr("display")|list %} + {% endif %} + {% for property in visible_properties %} + {{ property.render()|indent(3) }} + {% endfor %} + {% if "inherited-members" in autoapi_options %} + {% set visible_attributes = obj.attributes|selectattr("display")|list %} + {% else %} + {% set visible_attributes = obj.attributes|rejectattr("inherited")|selectattr("display")|list %} + {% endif %} + {% for attribute in visible_attributes %} + {{ attribute.render()|indent(3) }} + {% endfor %} + {% if "inherited-members" in autoapi_options %} + {% set visible_methods = obj.methods|selectattr("display")|list %} + {% else %} + {% set visible_methods = obj.methods|rejectattr("inherited")|selectattr("display")|list %} + {% endif %} + {% for method in visible_methods %} + {{ method.render()|indent(3) }} + {% endfor %} +{% endif %} diff --git a/doc/_templates/python/data.rst b/doc/_templates/python/data.rst new file mode 100644 index 000000000..3d12b2d0c --- /dev/null +++ b/doc/_templates/python/data.rst @@ -0,0 +1,37 @@ +{% if obj.display %} +.. py:{{ obj.type }}:: {{ obj.name }} + {%- if obj.annotation is not none %} + + :type: {%- if obj.annotation %} {{ obj.annotation }}{%- endif %} + + {%- endif %} + + {%- if obj.value is not none %} + + :value: {% if obj.value is string and obj.value.splitlines()|count > 1 -%} + Multiline-String + + .. raw:: html + +
Show Value + + .. code-block:: python + + """{{ obj.value|indent(width=8,blank=true) }}""" + + .. raw:: html + +
+ + {%- else -%} + {%- if obj.value is string -%} + {{ "%r" % obj.value|string|truncate(100) }} + {%- else -%} + {{ obj.value|string|truncate(100) }} + {%- endif -%} + {%- endif %} + {%- endif %} + + + {{ obj.docstring|indent(3) }} +{% endif %} diff --git a/doc/_templates/python/exception.rst b/doc/_templates/python/exception.rst new file mode 100644 index 000000000..92f3d38fd --- /dev/null +++ b/doc/_templates/python/exception.rst @@ -0,0 +1 @@ +{% extends "python/class.rst" %} diff --git a/doc/_templates/python/function.rst b/doc/_templates/python/function.rst new file mode 100644 index 000000000..778ba9a88 --- /dev/null +++ b/doc/_templates/python/function.rst @@ -0,0 +1,15 @@ +{% if obj.display %} +.. py:function:: {{ obj.short_name }}({{ obj.args }}){% if obj.return_annotation is not none %} -> {{ obj.return_annotation }}{% endif %} + +{% for (args, return_annotation) in obj.overloads %} + {{ obj.short_name }}({{ args }}){% if return_annotation is not none %} -> {{ return_annotation }}{% endif %} + +{% endfor %} + {% for property in obj.properties %} + :{{ property }}: + {% endfor %} + + {% if obj.docstring %} + {{ obj.docstring|indent(3) }} + {% endif %} +{% endif %} diff --git a/doc/_templates/python/method.rst b/doc/_templates/python/method.rst new file mode 100644 index 000000000..f50a1bd6e --- /dev/null +++ b/doc/_templates/python/method.rst @@ -0,0 +1,19 @@ +{%- if obj.display %} +.. py:method:: {{ obj.short_name }}({{ obj.args }}){% if obj.return_annotation is not none %} -> {{ obj.return_annotation }}{% endif %} + +{% for (args, return_annotation) in obj.overloads %} + {{ obj.short_name }}({{ args }}){% if return_annotation is not none %} -> {{ return_annotation }}{% endif %} + +{% endfor %} + {% if obj.properties %} + {% for property in obj.properties %} + :{{ property }}: + {% endfor %} + + {% else %} + + {% endif %} + {% if obj.docstring %} + {{ obj.docstring|indent(3) }} + {% endif %} +{% endif %} diff --git a/doc/_templates/python/module.rst b/doc/_templates/python/module.rst new file mode 100644 index 000000000..9f81c64a7 --- /dev/null +++ b/doc/_templates/python/module.rst @@ -0,0 +1,117 @@ +{% if not obj.display %} +:orphan: + +{% endif %} +:py:mod:`{{ obj.name }}` +=========={{ "=" * obj.name|length }} + +.. py:module:: {{ obj.name }} + +{% if obj.docstring %} +.. autoapi-nested-parse:: + + {{ obj.docstring|indent(3) }} + +{% endif %} + +{% block subpackages %} +{% set visible_subpackages = obj.subpackages|selectattr("display")|list %} +{% if visible_subpackages %} +Subpackages +----------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + +{% for subpackage in visible_subpackages %} + {{ subpackage.short_name }}/index.rst +{% endfor %} + + +{% endif %} +{% endblock %} + +{% block submodules %} +{% set visible_submodules = obj.submodules|selectattr("display")|list %} +{% if visible_submodules %} +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + +{% for submodule in visible_submodules %} + {{ submodule.short_name }}/index.rst +{% endfor %} + + +{% endif %} +{% endblock %} + + +{% block content %} +{% if obj.all is not none %} +{% set visible_children = obj.children|selectattr("short_name", "in", obj.all)|list %} +{% elif obj.type is equalto("package") %} +{% set visible_children = obj.children|selectattr("display")|list %} +{% else %} +{% set visible_children = obj.children|selectattr("display")|rejectattr("imported")|list %} +{% endif %} +{% if visible_children and not obj.top_level_object%} +{{ obj.type|title }} Contents +{{ "-" * obj.type|length }}--------- + +{% set visible_classes = visible_children|selectattr("type", "equalto", "class")|list %} +{% set visible_functions = visible_children|selectattr("type", "equalto", "function")|list %} +{% set visible_attributes = visible_children|selectattr("type", "equalto", "data")|list %} +{% if "show-module-summary" in autoapi_options and (visible_classes or visible_functions) %} +{% block classes scoped %} +{% if visible_classes and not obj.top_level_object %} +Classes +~~~~~~~ + +.. autoapisummary:: + +{% for klass in visible_classes %} + {{ klass.id }} +{% endfor %} + + +{% endif %} +{% endblock %} + +{% block functions scoped %} +{% if visible_functions and not obj.top_level_object %} +Functions +~~~~~~~~~ + +.. autoapisummary:: + +{% for function in visible_functions %} + {{ function.id }} +{% endfor %} + + +{% endif %} +{% endblock %} + +{% block attributes scoped %} +{% if visible_attributes and not obj.top_level_object %} +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + +{% for attribute in visible_attributes %} + {{ attribute.id }} +{% endfor %} + + +{% endif %} +{% endblock %} +{% endif %} +{% for obj_item in visible_children %} +{{ obj_item.render()|indent(0) }} +{% endfor %} +{% endif %} +{% endblock %} diff --git a/doc/_templates/python/package.rst b/doc/_templates/python/package.rst new file mode 100644 index 000000000..fb9a64965 --- /dev/null +++ b/doc/_templates/python/package.rst @@ -0,0 +1 @@ +{% extends "python/module.rst" %} diff --git a/doc/_templates/python/property.rst b/doc/_templates/python/property.rst new file mode 100644 index 000000000..70af24236 --- /dev/null +++ b/doc/_templates/python/property.rst @@ -0,0 +1,15 @@ +{%- if obj.display %} +.. py:property:: {{ obj.short_name }} + {% if obj.annotation %} + :type: {{ obj.annotation }} + {% endif %} + {% if obj.properties %} + {% for property in obj.properties %} + :{{ property }}: + {% endfor %} + {% endif %} + + {% if obj.docstring %} + {{ obj.docstring|indent(3) }} + {% endif %} +{% endif %} diff --git a/doc/conf.py b/doc/conf.py index 99d9e9e9b..4fb9f9445 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -34,22 +34,24 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.autosummary', - 'sphinx.ext.doctest', - 'sphinx.ext.coverage', - 'sphinx.ext.napoleon', - 'numpydoc' +extensions = ['autoapi.extension'] + +autoapi_dirs = ['../pygsti'] +autoapi_type = "python" + +autoapi_options = [ + "members", + "undoc-members", + "show-inheritance", + "show-module-summary", + "imported-members", ] -autosummary_generate = True -autosummary_imported_members = True -autodoc_default_options = { - 'members': None, - 'inherited-members': None, - 'show-inheritance': None, -} +autoapi_add_toctree_entry = False +autoapi_python_class_content = 'both' +autoapi_keep_files = True +autoapi_template_dir = "_templates" + napoleon_numpy_docstring = True napoleon_use_rtype = False @@ -86,7 +88,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: @@ -310,10 +312,3 @@ # Make the primary domain python since that's the only language we're using. primary_domain = 'py' - -# Define some default imports so that we don't have to include them in doctests. -doctest_global_setup = ''' -from __future__ import division, print_function -import numpy as np -import pygsti -''' diff --git a/doc/index.rst b/doc/index.rst index 99a9126d8..39679bab0 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -11,41 +11,16 @@ Welcome to pyGSTi's documentation Packages -------- -.. autosummary:: - :toctree: _autosummary - :template: custom-module-template.rst - :recursive: - +.. toctree:: + :maxdepth: 2 - .. This list must be manually updated with all - .. the pygsti subpackages we want documented - - pygsti.algorithms - pygsti.baseobjs - pygsti.circuits - pygsti.data - pygsti.drivers - pygsti.evotypes - pygsti.extras - pygsti.forwardsims - pygsti.io - pygsti.layouts - pygsti.modelmembers - pygsti.modelpacks - pygsti.models - pygsti.objectivefns - pygsti.optimize - pygsti.processors - pygsti.protocols - pygsti.report - pygsti.serialization - pygsti.tools + autoapi/pygsti/index License & Copyright ------------------- .. toctree:: :maxdepth: 1 - + LICENSE NOTICE diff --git a/doc/notes/repotools/test.md b/doc/notes/repotools/test.md index a6e5c0654..53874954c 100644 --- a/doc/notes/repotools/test.md +++ b/doc/notes/repotools/test.md @@ -2,6 +2,9 @@ ### By default, `./runTests.py` runs tests for all packages. +*Warning*. This file was designed around nosetests. When we converted to pytest +we didn't port all the available functionality (particularly the "parallel" +option). Correct behavior of coverage reporting has not been verified. ##### Optional flags: @@ -23,9 +26,9 @@ ex: *`./runTests.py tools io`* Runs only the `tools` and `io` packages *`./runTests.py report/testReport.py`* runs only tests in `report` *`./runTests.py report/testReport.py:TestReport.test_reports_logL_TP_wCIs`* runs the specific test `test_reports_logL_TP_wCIs`, which is a method of the test case `TestReport` -`runTests.py` now uses nose by default +`runTests.py` now uses pytest by default (So, the above example of `./runTests.py report/testReport.py` would expand to: -`python3.5 -m nose report/testReport.py`) +`pytest report/testReport.py`) - Current test coverage is outlined in test/coverage_status.txt diff --git a/doc/notes/travis.md b/doc/notes/travis.md deleted file mode 100644 index c33eabfef..000000000 --- a/doc/notes/travis.md +++ /dev/null @@ -1,173 +0,0 @@ -# Travis CI Notes - Rob - -(updated and continued from Lucas's previous notes) - -### Overview: - -When new history is pushed to `pyGSTio/pyGSTi`, Travis CI begins a new -build in a Xenial-based environment. The configuration of this build -depends on the branch, but consists of four sequential phases: - -1. [**Linting**](#linting), -2. [**Unit testing**](#unit-testing), -3. [**Extra tests**](#extra-tests) (on beta, master, and tags), and -4. [**Deployment**](#deployment) (on develop and tags) - -Phases are run sequentially, but jobs within a phase are run -concurrently. Once all jobs in a phase have finished _without error_, -the next phase begins. Should a job end with an error, it _fails_; -subsequent phases will be cancelled and the build is marked as -_failing._ - -### Linting - -[`flake8`][flake8] is used for static linting. We use two strategies -for linting: - -- _General linting_ checks for all linting errors we care about. It - uses the configuration defined in [`.flake8`][.flake8] -- _Critical linting_ checks only for major errors, including syntax - errors, runtime errors, and undefined names. It uses the - configuration defined in [`.flake8-critical`][.flake8-critical] - -Critical linting is less strict, in that it only checks for a strict -subset of the errors checked by general linting. - -The behavior of a build's **Linting** phase differs by branch: - -- On `beta`, `master`, and for tags, only _general linting_ is - run. Any errors will cause the build to fail. -- On all other branches, linting is split into two jobs. _Critical - linting_ errors will cause the build to fail. _General linting_ is - also run and will display errors on the build page, but will not - cause the build to fail. - -The net result is that developers can make a pull request with -less-serious style errors and merge into `develop` without issue, but -releases should be stylistically correct. Implicitly, that means a -maintainer (e.g. me or possibly even you) is needed to fix up style -errors before a build can be released. Furthermore, developers then -implicitly accept that a maintainer may need to re-format their code -in ways they might not like. - -### Unit testing - -Unit tests are included in [`test/unit`][unit-tests]. The -**Unit testing** build phase is run the same regardless of the branch, -and uses Travis CI's build matrix expansion. The unit test suite is -run with [`nose`][nose] using our [configuration](#nose-configuration) -_concurrently_ for each supported python version. - -As of writing, we support python versions: - -- 3.5 -- 3.6 -- 3.7 -- 3.8 - -### Extra tests - -In addition to [unit tests](#unit-testing), a number of additional -tests are included in [`test/test_packages`][test-packages]. These -additional tests cover various functionality not covered by unit -tests, including file I/O and multiprocessor tests. They also -typically take longer to run. - -The **Extra tests** build phase is run for the `beta` and `master` -branches, as well as tags. Like [unit tests](#unit-testing), these -tests are run with [`nose`][nose], but only for the earliest supported -python version (as of writing, this is 3.5). - -Because Travis CI jobs time out after 50 minutes, these tests are -split into several different jobs. The exact split is subject to -change. - -### Deployment - -At the end of a successful build, **Deployment** tasks are run. On -`develop`, the branch is pushed to `beta`, triggering a build for that -branch. On tags, the tag is packaged and deployed to [pyPI][pypi]. - -#### `develop` - -When a build on `develop` completes without error, the branch is -automatically pushed to `beta` using the script in -[`CI/push.sh`][push.sh]. Specifically, the `beta` branch is applied on -top of the new history in `develop` and pushed to origin; if `beta` -can't be automatically fast-forwarded, the job fails, and the issue -must be manually resolved. There are only a few circumstances in which -this merge can fail: - -- Something was (manually) pushed to `beta`. Don't do this. If it - happens, manually merge `beta` into `develop` and push both. -- History on `develop` was changed since the last automatic push to - `beta`. Don't do this either. If it happens, manually merge - `develop` into `beta` and push both. - -The push to github is authenticated using the encrypted private key at -[CI/github_deploy_key.enc][github_deploy_key.enc]. This key is -decrypted in the push stage's `before_install` script using secure -environment variables defined in the Travis CI repo settings. The -merge is performed by an automatic "Travis CI" user, but apparently, -because I made the key, Travis will show the builds as being triggered -by me. Don't worry about it. - - -#### tags - -When a build for a tag completes without error, the build is packaged -and deployed to [PyPI][pypi]. This uses Travis's built-in `deploy` -directive, but only after running a final job to build Cython extensions. - -As of writing, only a source distribution is published to PyPI. Wheels -can be published manually if desired. It would be nice if we could -publish wheels from our CI builds, but unfortunately for us, PyPI has -[strict requirements][manylinux] for wheels built in Linux -environments. Workarounds may be implemented in the future. - -Automatic deployments to PyPI are made using an automatic "pygsti-ci" -user. The password for this user is defined as an encrypted variable -in [`.travis.yml`][.travis.yml]. - -### Addendum - -#### Nose configuration - -We use a number of plugins and configuration options for -[`nose`][nose]. These are applied as environment variables in our -[`.travis.yml`][.travis.yml] configuration, but may be manually set by -developers via command-line arguments or under the `[nosetests]` -heading in `setup.cfg` - -- IDs (`--with-id`, `NOSE_WITH_ID=1`): gives a persistent ID number to - each test. These numbers are stored in `.noseids`, which is - gitignored, so IDs are local to each developer (but are still useful - for personal reference). -- Timer (`--with-timer`, `NOSE_WITH_TIMER=1`): shows a summary of the - time taken by each individual test at the end of a run. -- Coverage (`--with-coverage`, `NOSE_WITH_COVERAGE=1`): Shows a - coverage report after running tests. Currently, nothing is done with - this coverage report. You can also configure `nose` to generate an - HTML coverage report, which is useful. -- Rednose (`--rednose`, `NOSE_REDNOSE=1`): Adds color and readability - to test output. Currently has spotty support in the Travis build - logs, for unknown reasons. -- Verbosity 2 (`-v`, `NOSE_VERBOSE=2`): The default nose output is - much less useful. However, builds can get pretty big, so consider - disabling this in the future. -- Multiprocess (`--processes=-1`, `NOSE_PROCESSES=-1`): Run tests - concurrently. Faster builds, but may potentially cause issues. - - -[.flake8]: https://github.com/pyGSTio/pyGSTi/blob/master/.flake8 -[.flake8-critical]: https://github.com/pyGSTio/pyGSTi/blob/master/.flake8-critical -[.travis.yml]: https://github.com/pyGSTio/pyGSTi/blob/master/.travis.yml -[unit-tests]: https://github.com/pyGSTio/pyGSTi/tree/develop/test/unit -[test-packages]: https://github.com/pyGSTio/pyGSTi/tree/develop/test/test_packages -[push.sh]: https://github.com/pyGSTio/pyGSTi/blob/develop/CI/push.sh -[github_deploy_key.enc]: https://github.com/pyGSTio/pyGSTi/blob/develop/CI/github_deploy_key.enc - -[flake8]: http://flake8.pycqa.org/en/latest/ -[nose]: https://nose.readthedocs.io/en/latest/ -[pypi]: https://pypi.org/project/pyGSTi/ -[manylinux]: https://github.com/pypa/manylinux diff --git a/doc/requirements.txt b/doc/requirements.txt deleted file mode 100644 index 80dc9c858..000000000 --- a/doc/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -sphinx>=2.1 -numpydoc -sphinx_rtd_theme diff --git a/jupyter_notebooks/FAQ.ipynb b/jupyter_notebooks/FAQ.ipynb index b4b72a2a0..ec87574d0 100644 --- a/jupyter_notebooks/FAQ.ipynb +++ b/jupyter_notebooks/FAQ.ipynb @@ -8,23 +8,23 @@ "This notebook contains a list of frequently asked questions and their answers. If the answer is short, example code is given directly in this notebook; if not, then the reader is referred to other pyGSTi tutorials and examples.\n", "\n", "## Contents\n", - "- [What is GST?](#what_is_gst)\n", - "- [What is pyGSTi?](#what_is_pygsti)\n", - "- [How do I run GST (1 or 2 qubits)?](#how_do_I_run_GST)\n", - "- [How do I constrain my gates to be Trace-Preserving (TP)?](#how_constrain_TP)\n", - "- [How do I constrain my gates to be CPTP?](#how_constrain_CPTP)\n", - "- [What is gauge optimization?](#what_is_gaugeopt)\n", - "- [How do I specify the details of gauge optimization?](#how_specify_gaugeopt)\n", - "- [2-qubit GST is running very slowly - how do I speed it up?](#twoQ_GST_is_slow)\n", - "- [My report doesn't have error bars!](#reportGen_isnt_giving_errbars)\n", - "- [Generating reports takes a long time - can it go any faster?](#reportGen_is_slow)\n", - "- [Germ selection is slow - can it go faster?](#germsel_is_slow)\n", - "- [Can I generate bootstrapped error bars with pyGSTi?](#bootstrapped_error_bars)\n", - "- [How can I model context dependence?](#context_dependence)\n", - "- [I have an 2-ion system that doesn't have independent readout. Can I run GST on a qutrit?](#qutrit_gst)\n", - "- [I'd like to make a nice HTML report using only LGST results, is this possible?](#lgstonly_report)\n", - "- [Can I model leakage level(s) in pyGSTi?](#leakage)\n", - "- [Aaaah! Nothing works after updating to version 0.9.7! What do I do?](#v097woes)" + "- [What is GST?](#What-is-GST?)\r\n", + "- [What is pyGSTi?](#What-is-pyGSTi?)\r\n", + "- [How do I run GST (1 or 2 qubits)?](#How-do-I-run-GST-(1-or-2-qubits)?)\r\n", + "- [How do I constrain my gates to be Trace-Preserving (TP)?](#How-do-I-constrain-my-gates-to-be-Trace-Preserving-(TP)?)\r\n", + "- [How do I constrain my gates to be CPTP?](#How-do-I-constrain-my-gates-to-be-CPTP?)\r\n", + "- [What is gauge optimization?](#What-is-gauge-optimization?)\r\n", + "- [How do I specify the details of gauge optimization?](#How-do-I-specify-the-details-of-gauge-optimization?)\r\n", + "- [2-qubit GST is running very slowly - how do I speed it up?](#2-qubit-GST-is-running-very-slowly---how-do-I-speed-it-up?)\r\n", + "- [My report doesn't have error bars!](#My-report-doesn't-have-error-bars!)\r\n", + "- [Generating reports takes a long time - can it go any faster?](#Generating-reports-takes-a-long-time---can-it-go-any-faster?)\r\n", + "- [Germ selection is slow - can it go faster?](#Germ-selection-is-slow---can-it-go-faster?)\r\n", + "- [Can I generate bootstrapped error bars with pyGSTi?](#Can-I-generate-bootstrapped-error-bars-with-pyGSTi?)\r\n", + "- [How can I model context dependence?](#How-can-I-model-context-dependence?)\r\n", + "- [I have an 2-ion system that doesn't have independent readout. Can I run GST on a qutrit?](#I-have-an-2-ion-system-that-doesn't-have-independent-readout.--Can-I-run-GST-on-a-qutrit?)\r\n", + "- [I'd like to make a nice HTML report using only LGST results, is this possible?](#I'd-like-to-make-a-nice-HTML-report-using-only-LGST-results,-is-this-possible?)\r\n", + "- [Can I model leakage level(s) in pyGSTi?](#Can-I-model-leakage-level(s)-in-pyGSTi?)\r\n", + "- [Aaaah! Nothing works after updating to version 0.9.7! What do I do?](#Aaaah!-Nothing-works-after-updating-to-version-0.9.7!-What-do-I-do?)" ] }, { @@ -420,7 +420,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.11.0" } }, "nbformat": 4, diff --git a/jupyter_notebooks/Tutorials/objects/advanced/OperationFactories.ipynb b/jupyter_notebooks/Tutorials/objects/advanced/OperationFactories.ipynb index fb5fa375f..a0068c8d9 100644 --- a/jupyter_notebooks/Tutorials/objects/advanced/OperationFactories.ipynb +++ b/jupyter_notebooks/Tutorials/objects/advanced/OperationFactories.ipynb @@ -93,7 +93,7 @@ " op.OpFactory.__init__(self, state_space=1, evotype=\"densitymx\")\n", " \n", " def create_object(self, args=None, sslbls=None):\n", - " assert(sslbls is None) # don't worry about sslbls for now -- these are for factories that can create gates placed at arbitrary circuit locations\n", + " # Note: don't worry about sslbls (unused) -- this argument allow factories to create different operations on different target qubits\n", " assert(len(args) == 1)\n", " theta = float(args[0])/2.0 #note we convert to float b/c the args can be strings depending on how the circuit is specified\n", " b = 2*np.cos(theta)*np.sin(theta)\n", @@ -147,7 +147,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The above is readily extensible to systems with more qubits. The only nontrivial addition is that our factory, which creates 1-qubit gates, must be \"embedded\" within a larger collection of qubits to result in a n-qubit-gate factory. This step is easily accomplished using the builtin `EmbeddingOpFactory` object, which takes a tuple of all the qubits, e.g. `(0,1)` and a tuple of the subset of qubits therein to embed into, e.g. `(0,)`. This is illustrated below for the 2-qubit case, along with a demonstration of how a more complex 2-qubit circuit can be simulated:" + "The above is readily extensible to systems with more qubits. The only nontrivial addition is that our factory, which creates 1-qubit gates, must be \"embedded\" within a larger collection of qubits to result in a n-qubit-gate factory. This step is easily accomplished using the builtin `EmbeddedOpFactory` object, which takes a tuple of all the qubits, e.g. `(0,1)` and a tuple of the subset of qubits therein to embed into, e.g. `(0,)`. This is illustrated below for the 2-qubit case, along with a demonstration of how a more complex 2-qubit circuit can be simulated:" ] }, { @@ -193,7 +193,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.11" + "version": "3.10.4" } }, "nbformat": 4, diff --git a/optional-requirements.txt b/optional-requirements.txt index 54afacfbf..bbd007812 100644 --- a/optional-requirements.txt +++ b/optional-requirements.txt @@ -9,7 +9,6 @@ msgpack cython cvxopt cvxpy -nose seaborn qibo packaging diff --git a/pygsti/algorithms/compilers.py b/pygsti/algorithms/compilers.py index 606fc1bd5..ac0c79331 100644 --- a/pygsti/algorithms/compilers.py +++ b/pygsti/algorithms/compilers.py @@ -30,10 +30,10 @@ def _create_standard_costfunction(name): ---------- name : str Allowed values are: - - '2QGC' : the cost of the circuit is the number of 2-qubit gates it contains. - - 'depth' : the cost of the circuit is the depth of the circuit. - - '2QGC:x:depth:y' : the cost of the circuit is x * the number of 2-qubit gates in the circuit + - y * the depth of the circuit, where x and y are integers. + + * '2QGC' : the cost of the circuit is the number of 2-qubit gates it contains. + * 'depth' : the cost of the circuit is the depth of the circuit. + * '2QGC:x:depth:y' : the cost of the circuit is x * the number of 2-qubit gates in the circuit + y * the depth of the circuit, where x and y are integers. Returns ------- @@ -76,7 +76,7 @@ def compile_clifford(s, p, pspec=None, absolute_compilation=None, paulieq_compil Compiles an n-qubit Clifford gate, described by the symplectic matrix s and vector p, into a circuit over the gates given by a processor specification or a standard processor. Clifford gates/circuits can be converted to, or sampled in, the symplectic representation using the functions - in :module:`pygsti.tools.symplectic`. + in :mod:`pygsti.tools.symplectic`. The circuit created by this function will be over the gates in the given processor spec, respecting its connectivity, when a QubitProcessorSpec object is provided. Otherwise, it is over a canonical processor @@ -102,7 +102,7 @@ def compile_clifford(s, p, pspec=None, absolute_compilation=None, paulieq_compil If nbar > n it is necessary to provide `qubit_labels`, that specifies which of the qubits in `pspec` the Clifford acts on. (All other qubits will not be part of the returned circuit, regardless of whether that means an over-head is required to avoid using gates that act on those qubits. If these - additional qubits should be used, then the input Clifford needs to be ``padded'' to be the identity + additional qubits should be used, then the input Clifford needs to be "padded" to be the identity on those qubits). The ordering of the indices in (`s`,`p`) is w.r.t to ordering of the qubit labels in pspec.qubit_labels, @@ -131,18 +131,16 @@ def compile_clifford(s, p, pspec=None, absolute_compilation=None, paulieq_compil with `s` the symplectic matrix in its symplectic representation (a circuit that implements that desired Clifford up to Pauli operators). The allowed values of this are: - - 'BGGE': A basic, deterministic global Gaussian elimination algorithm. Circuits obtained from this algorithm + * 'BGGE': A basic, deterministic global Gaussian elimination algorithm. Circuits obtained from this algorithm contain, in expectation, O(n^2) 2-qubit gates. Although the returned circuit will respect device connectivity, this algorithm does *not* take connectivity into account in an intelligient way. More details on this algorithm are given in `compile_symplectic_with_ordered_global_gaussian_elimination()`; it is the algorithm described in that docstring but with the qubit ordering fixed to the order in the input `s`. - - - 'ROGGE': A randomized elimination order global Gaussian elimination algorithm. This is the same algorithm as + * 'ROGGE': A randomized elimination order global Gaussian elimination algorithm. This is the same algorithm as 'BGGE' except that the order that the qubits are eliminated in is randomized. This results in significantly lower-cost circuits than the 'BGGE' method (given sufficiently many iterations). More details are given in the `compile_symplectic_with_random_ordered_global_gaussian_elimination()` docstring. - - - 'iAGvGE': Our improved version of the Aaraonson-Gottesman method for compiling a Clifford circuit, which + * 'iAGvGE': Our improved version of the Aaraonson-Gottesman method for compiling a Clifford circuit, which uses 3 CNOT circuits and 3 1Q-gate layers (rather than the 5 CNOT circuit used in the algorithm of AG in Phys. Rev. A 70 052328 (2004)), with the CNOT circuits compiled using Gaussian elimination. Note that this algorithm appears to perform substantially worse than 'ROGGE', even though with an upgraded CNOT compiler @@ -162,10 +160,10 @@ def compile_clifford(s, p, pspec=None, absolute_compilation=None, paulieq_compil between different compilations when randomized algorithms are used: the lowest cost circuit is chosen. If a string it must be one of: - - '2QGC' : the cost of the circuit is the number of 2-qubit gates it contains. - - 'depth' : the cost of the circuit is the depth of the circuit. - - '2QGC:x:depth:y' : the cost of the circuit is x * the number of 2-qubit gates in the circuit + - y * the depth of the circuit, where x and y are integers. + * '2QGC' : the cost of the circuit is the number of 2-qubit gates it contains. + * 'depth' : the cost of the circuit is the depth of the circuit. + * '2QGC:x:depth:y' : the cost of the circuit is x * the number of 2-qubit gates in the circuit + + y * the depth of the circuit, where x and y are integers. prefixpaulis : bool, optional A Pauli layer is needed to compile the correct Clifford (and not just the correct Clifford up to Paulis). When @@ -265,15 +263,15 @@ def compile_symplectic(s, pspec=None, absolute_compilation=None, paulieq_compila pspec : QubitProcessorSpec, optional An nbar-qubit QubitProcessorSpec object that encodes the device that `s` is being compiled - for, where nbar >= n. If this is specified, the output circuit is over the gates available + for, where `nbar >= n`. If this is specified, the output circuit is over the gates available in this device. If this is None, the output circuit is over the "canonical" processor of CNOT gates between all qubits, consisting of "H", "HP", "PH", "HPH", "I", "X", "Y" and "Z", which is the set used internally for the compilation. - If nbar > n it is necessary to provide `qubit_labels`, that specifies which of the qubits in `pspec` + If `nbar > n` it is necessary to provide `qubit_labels`, that specifies which of the qubits in `pspec` the Clifford acts on. (All other qubits will not be part of the returned circuit, regardless of whether that means an over-head is required to avoid using gates that act on those qubits. If these - additional qubits should be used, then the input `s` needs to be ``padded'' to be the identity + additional qubits should be used, then the input `s` needs to be "padded" to be the identity on those qubits). The indexing `s` is assumed to be the same as that in the list pspec.qubit_labels, unless `qubit_labels` @@ -301,18 +299,16 @@ def compile_symplectic(s, pspec=None, absolute_compilation=None, paulieq_compila The allowed elements of this list are: - - 'BGGE': A basic, deterministic global Gaussian elimination algorithm. Circuits obtained from this algorithm + * 'BGGE': A basic, deterministic global Gaussian elimination algorithm. Circuits obtained from this algorithm contain, in expectation, O(n^2) 2-qubit gates. Although the returned circuit will respect device connectivity, this algorithm does *not* take connectivity into account in an intelligient way. More details on this algorithm are given in `compile_symplectic_with_ordered_global_gaussian_elimination()`; it is the algorithm described in that docstring but with the qubit ordering fixed to the order in the input `s`. - - - 'ROGGE': A randomized elimination order global Gaussian elimination algorithm. This is the same algorithm as + * 'ROGGE': A randomized elimination order global Gaussian elimination algorithm. This is the same algorithm as 'BGGE' except that the order that the qubits are eliminated in is randomized. This results in significantly lower-cost circuits than the 'BGGE' method (given sufficiently many iterations). More details are given in the `compile_symplectic_with_random_ordered_global_gaussian_elimination()` docstring. - - - 'iAGvGE': Our improved version of the Aaraonson-Gottesman method for compiling a symplectic matrix, which + * 'iAGvGE': Our improved version of the Aaraonson-Gottesman method for compiling a symplectic matrix, which uses 3 CNOT circuits and 3 1Q-gate layers (rather than the 5 CNOT circuit used in the algorithm of AG in Phys. Rev. A 70 052328 (2004)), with the CNOT circuits compiled using Gaussian elimination. Note that this algorithm appears to perform substantially worse than 'ROGGE', even though with an upgraded CNOT compiler @@ -328,10 +324,10 @@ def compile_symplectic(s, pspec=None, absolute_compilation=None, paulieq_compila between different compilations when randomized algorithms are used: the lowest cost circuit is chosen. If a string it must be one of: - - '2QGC' : the cost of the circuit is the number of 2-qubit gates it contains. - - 'depth' : the cost of the circuit is the depth of the circuit. - - '2QGC:x:depth:y' : the cost of the circuit is x * the number of 2-qubit gates in the circuit + - y * the depth of the circuit, where x and y are integers. + * '2QGC' : the cost of the circuit is the number of 2-qubit gates it contains. + * 'depth' : the cost of the circuit is the depth of the circuit. + * '2QGC:x:depth:y' : the cost of the circuit is x * the number of 2-qubit gates in the circuit + + y * the depth of the circuit, where x and y are integers. paulirandomize : bool, optional If True then independent, uniformly random Pauli layers (a Pauli on each qubit) are inserted in between @@ -511,7 +507,7 @@ def _compile_symplectic_using_rogge_algorithm(s, pspec=None, paulieq_compilation If nbar > n it is necessary to provide `qubit_labels`, that specifies which of the qubits in `pspec` the Clifford acts on. (All other qubits will not be part of the returned circuit, regardless of whether that means an over-head is required to avoid using gates that act on those qubits. If these - additional qubits should be used, then the input `s` needs to be ``padded'' to be the identity + additional qubits should be used, then the input `s` needs to be "padded" to be the identity on those qubits). The indexing `s` is assumed to be the same as that in the list pspec.qubit_labels, unless `qubit_labels` @@ -533,10 +529,10 @@ def _compile_symplectic_using_rogge_algorithm(s, pspec=None, paulieq_compilation between different compilations when randomized algorithms are used: the lowest cost circuit is chosen. If a string it must be one of: - - '2QGC' : the cost of the circuit is the number of 2-qubit gates it contains. - - 'depth' : the cost of the circuit is the depth of the circuit. - - '2QGC:x:depth:y' : the cost of the circuit is x * the number of 2-qubit gates in the circuit + - y * the depth of the circuit, where x and y are integers. + * '2QGC' : the cost of the circuit is the number of 2-qubit gates it contains. + * 'depth' : the cost of the circuit is the depth of the circuit. + * '2QGC:x:depth:y' : the cost of the circuit is x * the number of 2-qubit gates in the circuit + + y * the depth of the circuit, where x and y are integers. iterations : int, optional The number of different random orderings tried. The lowest "cost" circuit obtained from the @@ -632,7 +628,7 @@ def _compile_symplectic_using_ogge_algorithm(s, eliminationorder, pspec=None, pa If nbar > n it is necessary to provide `qubit_labels`, that specifies which of the qubits in `pspec` the Clifford acts on. (All other qubits will not be part of the returned circuit, regardless of whether that means an over-head is required to avoid using gates that act on those qubits. If these - additional qubits should be used, then the input `s` needs to be ``padded'' to be the identity + additional qubits should be used, then the input `s` needs to be "padded" to be the identity on those qubits). The indexing `s` is assumed to be the same as that in the list pspec.qubit_labels, unless `qubit_labels` @@ -953,14 +949,13 @@ def _compile_symplectic_using_ag_algorithm(s, pspec=None, qubit_labels=None, cno The Aaraonson-Gottesman method for compiling a symplectic matrix using 5 CNOT circuits + local layers. This algorithm is presented in PRA 70 052328 (2014). - - If `cnotmethod` = `GE` then the CNOT circuits are compiled using Gaussian elimination (which is O(n^2)). + * If `cnotmethod` = `GE` then the CNOT circuits are compiled using Gaussian elimination (which is O(n^2)). There are multiple GE algorithms for compiling a CNOT in pyGSTi. This function has the over-all best variant of this algorithm hard-coded into this function. - - - If `cnotmethod` = `PMH` then the CNOT circuits are compiled using the asymptotically optimal + * If `cnotmethod` = `PMH` then the CNOT circuits are compiled using the asymptotically optimal O(n^2/logn) CNOT circuit algorithm of PMH. - *** This function has not yet been implemented *** + **This function has not yet been implemented** Parameters ---------- @@ -1027,7 +1022,7 @@ def _compile_symplectic_using_riag_algoritm(s, pspec, paulieq_compilation, qubit If nbar > n it is necessary to provide `qubit_labels`, that specifies which of the qubits in `pspec` the Clifford acts on. (All other qubits will not be part of the returned circuit, regardless of whether that means an over-head is required to avoid using gates that act on those qubits. If these - additional qubits should be used, then the input `s` needs to be ``padded'' to be the identity + additional qubits should be used, then the input `s` needs to be "padded" to be the identity on those qubits). The indexing `s` is assumed to be the same as that in the list pspec.qubit_labels, unless `qubit_labels` @@ -1060,10 +1055,10 @@ def _compile_symplectic_using_riag_algoritm(s, pspec, paulieq_compilation, qubit between different compilations when randomized algorithms are used: the lowest cost circuit is chosen. If a string it must be one of: - - '2QGC' : the cost of the circuit is the number of 2-qubit gates it contains. - - 'depth' : the cost of the circuit is the depth of the circuit. - - '2QGC:x:depth:y' : the cost of the circuit is x * the number of 2-qubit gates in the circuit + - y * the depth of the circuit, where x and y are integers. + * '2QGC' : the cost of the circuit is the number of 2-qubit gates it contains. + * 'depth' : the cost of the circuit is the depth of the circuit. + * '2QGC:x:depth:y' : the cost of the circuit is x * the number of 2-qubit gates in the circuit + + y * the depth of the circuit, where x and y are integers. check : bool, optional Whether to check that the output circuit implements the correct symplectic matrix (i.e., tests for algorithm @@ -1139,11 +1134,11 @@ def _compile_symplectic_using_iag_algorithm(s, pspec, qubit_labels=None, cnotalg The ordering of the qubits in (`s`,`p`) is taken w.r.t the ordering of this list. cnotalg : str, optional - The `algorithm` argument to pass internally to :function:`compile_cnot_circuit` + The `algorithm` argument to pass internally to :func:`compile_cnot_circuit` when compiling CNOT gates. cargs : various, optional - The `aargs` argument to pass internally to :function:`compile_cnot_circuit` + The `aargs` argument to pass internally to :func:`compile_cnot_circuit` when compiling CNOT gates. check : bool, optional @@ -1286,7 +1281,7 @@ def compile_cnot_circuit(s, pspec, compilation, qubit_labels=None, algorithm='CO If nbar > n it is necessary to provide `qubit_labels`, that specifies which of the qubits in `pspec` the Clifford acts on. (All other qubits will not be part of the returned circuit, regardless of whether that means an over-head is required to avoid using gates that act on those qubits. If these - additional qubits should be used, then the input `s` needs to be ``padded'' to be the identity + additional qubits should be used, then the input `s` needs to be "padded" to be the identity on those qubits). The indexing `s` is assumed to be the same as that in the list pspec.qubit_labels, unless `qubit_labels` @@ -1303,22 +1298,22 @@ def compile_cnot_circuit(s, pspec, compilation, qubit_labels=None, algorithm='CO algorithm : str, optional The algorithm to use. The optionas are: - - 'BGE' : A basic Gaussian elimination algorithm, that uses CNOT to perform row-reduction on the upper - LHS (or lower RHS) of `s`. This algorithm does not take device connectivity into account. - - 'OCAGE' : User-ordered connectivity-adjusted Gaussian elimination. The qubits are eliminated in the - specified order; the first element of arrgs must be a list specify this order. The algorithm is - also "connectivity-adjusted" in the sense that it uses the connectivity graph (in pspec.qubit_graph) - to try and avoid using CNOTs between unconnected pairs whenever possible, and to decide the order - of various operations. - - 'OiCAGE' : The same as 'OCAGE' except that it has some improvements, and it requires connectivity - graph of the remaining qubits, after each qubit has been 'eliminated', to be connected. In the current - format this algorithm is only slightly better-performing than 'OCAGE', and only on average. (This - algorithm will possibly be improved in the future, whereas 'OCAGE' will remain as-is for reproducability - of previously obtained results.) - - 'ROCAGE': Same as 'OCAGE' except that the elimination order is chosen at random, rather than user- - specified. - - 'COCAGE', 'COiCAGE' : The same as 'OCAGE' and 'OiCAGE', respectively, except that the elimination order - is fixed to eliminate qubits with the worse connectivity before those with better connectivity. + * 'BGE' : A basic Gaussian elimination algorithm, that uses CNOT to perform row-reduction on the upper + LHS (or lower RHS) of `s`. This algorithm does not take device connectivity into account. + * 'OCAGE' : User-ordered connectivity-adjusted Gaussian elimination. The qubits are eliminated in the + specified order; the first element of arrgs must be a list specify this order. The algorithm is + also "connectivity-adjusted" in the sense that it uses the connectivity graph (in pspec.qubit_graph) + to try and avoid using CNOTs between unconnected pairs whenever possible, and to decide the order + of various operations. + * 'OiCAGE' : The same as 'OCAGE' except that it has some improvements, and it requires connectivity + graph of the remaining qubits, after each qubit has been 'eliminated', to be connected. In the current + format this algorithm is only slightly better-performing than 'OCAGE', and only on average. (This + algorithm will possibly be improved in the future, whereas 'OCAGE' will remain as-is for reproducability + of previously obtained results.) + * 'ROCAGE': Same as 'OCAGE' except that the elimination order is chosen at random, rather than user- + specified. + * 'COCAGE', 'COiCAGE' : The same as 'OCAGE' and 'OiCAGE', respectively, except that the elimination order + is fixed to eliminate qubits with the worse connectivity before those with better connectivity. compile_to_native : bool, optional Whether the circuit should be given in terms of the native gates of the processor defined in `pspec`. @@ -1447,7 +1442,7 @@ def _compile_cnot_circuit_using_bge_algorithm(s, pspec, qubit_labels=None, compi by pspec.qubit_graph. If nbar > n it is necessary to provide `qubit_labels`, to specify which qubits in `pspec` the CNOT circuit acts on (all other qubits will not be part of the returned circuit, regardless of whether that means an over-head is required to avoid using gates that act on those qubits. If these - additional qubits should be used, then the input CNOT circuit needs to be ``padded'' to be the identity + additional qubits should be used, then the input CNOT circuit needs to be "padded" to be the identity on those qubits). The ordering of the qubits in `s` is assumed to be the same as that in the list pspec.qubit_labels, unless `qubit_labels` is specified. Then, the ordering is taken w.r.t the ordering of the list `qubit_labels`. @@ -1553,12 +1548,13 @@ def _compile_cnot_circuit_using_ocage_algorithm(s, pspec, qubitorder, qubit_labe specified, whereby "eliminating" a qubit means mapping the column and row of `s` associated with that qubit to the identity column and row. To eliminate the ith qubit in the list we: - 1. Look at the current value of s[i,i]. If s[i,i] = 1 continue. Else, find the closest qubit to + #. Look at the current value of s[i,i]. If s[i,i] = 1 continue. Else, find the closest qubit to do a CNOT between i and that qubit to make s[i,i] = 1. - 2. List the remaining qubits to eliminate (the i+1th qubit onwards), and going from the qubit + #. List the remaining qubits to eliminate (the i+1th qubit onwards), and going from the qubit in this list that is further from i to the closest implement the following steps: - 2.1. Denote this qubit by ii. - 2.1 If s[ii,i] = 0, pass. Else, map s[ii,i] -> 0 with the following method: + + #. Denote this qubit by ii. + #. If s[ii,i] = 0, pass. Else, map s[ii,i] -> 0 with the following method: (a) find the shortest path from i -> ii, (b) If the shortest path contains already eliminated qubits, using a LHS-action SWAP-like set of chains of CNOTs along the shortest path i -> ii to do a CNOT from i -> ii whilst @@ -1568,7 +1564,7 @@ def _compile_cnot_circuit_using_ocage_algorithm(s, pspec, qubitorder, qubit_labe column s[:,i] for the qubits along this path to 1. (d) Use the qubit next to ii in this path to set s[ii,i] using a LHS-action CNOT, and don't undo any of the changes to the other qubits along that path. - 2.3. If s[i,ii] = 0, pass. Else, map s[i,ii] -> 0 as in step 2.3 except that now we use RHS-action + #. If s[i,ii] = 0, pass. Else, map s[i,ii] -> 0 as in step 2.3 except that now we use RHS-action CNOTs. Steps 1 - 3 do not change the already eliminated qubits, so after steps 1 - 2 are repeated for each @@ -1589,7 +1585,7 @@ def _compile_cnot_circuit_using_ocage_algorithm(s, pspec, qubitorder, qubit_labe `True`. If nbar > n it is necessary to provide `qubit_labels`, that specifies which of the qubits in `pspec` the CNOT circuit acts on (all other qubits will not be part of the returned circuit, regardless of whether that means an over-head is required to avoid using gates that act on those qubits. If these - additional qubits should be used, then the input CNOT circuit needs to be ``padded'' to be the identity + additional qubits should be used, then the input CNOT circuit needs to be "padded" to be the identity on those qubits). The ordering of the qubits in `s` is assumed to be the same as that in the list pspec.qubit_labels, unless `qubit_labels` is specified. Then, the ordering is taken w.r.t the ordering of the list `qubit_labels`. @@ -1812,7 +1808,7 @@ def _compile_cnot_circuit_using_oicage_algorithm(s, pspec, qubitorder, qubit_lab An improved, ordered and connectivity-adjusted Gaussian-elimination (OiCAGE) algorithm for compiling a CNOT circuit. This is a *slight* improvement (for some CNOT circuits), on the algorithm in - :function:`_compile_cnot_circuit_using_ocage_algorithm()`, which is the meaning of the "improved". See the docstring + :func:`_compile_cnot_circuit_using_ocage_algorithm()`, which is the meaning of the "improved". See the docstring for that function for information on the parameters of this function and the basic outline of the algorithm. Parameters @@ -1828,7 +1824,7 @@ def _compile_cnot_circuit_using_oicage_algorithm(s, pspec, qubitorder, qubit_lab by pspec.qubit_graph. If nbar > n it is necessary to provide `qubit_labels`, that specifies which of the qubits in `pspec` the CNOT circuit acts on (all other qubits will not be part of the returned circuit, regardless of whether that means an over-head is required to avoid using gates that act on those qubits. - If these additional qubits should be used, then the input CNOT circuit needs to be ``padded'' to be the identity + If these additional qubits should be used, then the input CNOT circuit needs to be "padded" to be the identity on those qubits). The ordering of the qubits in `s` is assumed to be the same as that in the list pspec.qubit_labels, unless `qubit_labels` is specified. Then, the ordering is taken w.r.t the ordering of the list `qubit_labels`. @@ -2023,22 +2019,22 @@ def compile_stabilizer_state(s, p, pspec, absolute_compilation, paulieq_compilat algorithm='COiCAGE', aargs=[], costfunction='2QGC:10:depth:1', rand_state=None): """ - Generates a circuit to create the stabilizer state from the standard input state |0,0,0,...>. + Generates a circuit to create the stabilizer state from the standard input state `|0,0,0,...>`. The stabilizer state is specified by `s` and `p`. The circuit returned is over the gates in - the processor spec. See :function:`compile_stabilizer_state()` for the inverse of this. + the processor spec. See :func:`compile_stabilizer_state()` for the inverse of this. Parameters ---------- s : array over [0,1] An (2n X 2n) symplectic matrix of 0s and 1s integers. This is a symplectic matrix representing - any Clifford gate that, when acting on |0,0,0,...>, generates the desired stabilizer state. + any Clifford gate that, when acting on `|0,0,0,...>`, generates the desired stabilizer state. So `s` is not unique. p : array over [0,1] A length-2n vector over [0,1,2,3] that, together with s, defines a valid n-qubit Clifford gate. This phase vector matrix should, together with `s`, represent any Clifford gate that, - when acting on |0,0,0,...>, generates the desired stabilizer state. So `p` is not unique. + when acting on `|0,0,0,...>`, generates the desired stabilizer state. So `p` is not unique. pspec, pspec : QubitProcessorSpec, optional An nbar-qubit QubitProcessorSpec object that encodes the device that the stabilizer is being compiled @@ -2051,7 +2047,7 @@ def compile_stabilizer_state(s, p, pspec, absolute_compilation, paulieq_compilat If nbar > n it is necessary to provide `qubit_labels`, that specifies which of the qubits in `pspec` the stabilizer is over. (All other qubits will not be part of the returned circuit, regardless of whether that means an over-head is required to avoid using gates that act on those qubits. If these - additional qubits should be used, then the input (s,p) needs to be ``padded'' to be the identity + additional qubits should be used, then the input (s,p) needs to be "padded" to be the identity on those qubits). The ordering of the indices in (`s`,`p`) is w.r.t to ordering of the qubit labels in pspec.qubit_labels, @@ -2099,10 +2095,10 @@ def compile_stabilizer_state(s, p, pspec, absolute_compilation, paulieq_compilat between different compilations when randomized algorithms are used: the lowest cost circuit is chosen. If a string it must be one of: - - '2QGC' : the cost of the circuit is the number of 2-qubit gates it contains. - - 'depth' : the cost of the circuit is the depth of the circuit. - - '2QGC:x:depth:y' : the cost of the circuit is x * the number of 2-qubit gates in the circuit + - y * the depth of the circuit, where x and y are integers. + * '2QGC' : the cost of the circuit is the number of 2-qubit gates it contains. + * 'depth' : the cost of the circuit is the depth of the circuit. + * '2QGC:x:depth:y' : the cost of the circuit is x * the number of 2-qubit gates in the circuit + + y * the depth of the circuit, where x and y are integers. rand_state: RandomState, optional A np.random.RandomState object for seeding RNG @@ -2110,7 +2106,7 @@ def compile_stabilizer_state(s, p, pspec, absolute_compilation, paulieq_compilat Returns ------- Circuit - A circuit that creates the specified stabilizer state from |0,0,0,...> + A circuit that creates the specified stabilizer state from `|0,0,0,...>` """ assert(_symp.check_valid_clifford(s, p)), "The input s and p are not a valid clifford." @@ -2197,38 +2193,38 @@ def compile_stabilizer_measurement(s, p, pspec, absolute_compilation, paulieq_co iterations=20, paulirandomize=False, algorithm='COCAGE', aargs=[], costfunction='2QGC:10:depth:1', rand_state=None): """ - Generates a circuit to map the stabilizer state to the standard state |0,0,0,...>. + Generates a circuit to map the stabilizer state to the standard state `|0,0,0,...>`. The stabilizer state is specified by `s` and `p`. The circuit returned is over the gates in the processor spec `pspec`. See :function"`compile_stabilizer_state()` for the inverse of this. So, this circuit followed by a Z-basis measurement can be used to simulate a projection onto the - stabilizer state C|0,0,0,...> where C is the Clifford represented by `s` and `p`. + stabilizer state C`|0,0,0,...>` where C is the Clifford represented by `s` and `p`. Parameters ---------- s : array over [0,1] An (2n X 2n) symplectic matrix of 0s and 1s integers. This is a symplectic matrix representing - any Clifford gate that, when acting on |0,0,0,...>, generates the stabilizer state that we need - to map to |0,0,0,...>. So `s` is not unique. + any Clifford gate that, when acting on `|0,0,0,...>`, generates the stabilizer state that we need + to map to `|0,0,0,...>`. So `s` is not unique. p : array over [0,1] A length-2n vector over [0,1,2,3] that, together with s, defines a valid n-qubit Clifford gate. This phase vector matrix should, together with `s`, represent any Clifford gate that, - when acting on |0,0,0,...>, generates the stabilizer state that we need to map to |0,0,0,...>. + when acting on `|0,0,0,...>`, generates the stabilizer state that we need to map to `|0,0,0,...>`. So `p` is not unique. pspec : QubitProcessorSpec, optional An nbar-qubit QubitProcessorSpec object that encodes the device that the stabilizer is being compiled - for, where nbar >= n. If this is specified, the output circuit is over the gates available + for, where `nbar >= n`. If this is specified, the output circuit is over the gates available in this device. If this is None, the output circuit is over the "canonical" processor of CNOT gates between all qubits, consisting of "H", "HP", "PH", "HPH", "I", "X", "Y" and "Z", which is the set used internally for the compilation. In most circumstances, the output will be more useful if a QubitProcessorSpec is provided. - If nbar > n it is necessary to provide `qubit_labels`, that specifies which of the qubits in `pspec` + If `nbar > n` it is necessary to provide `qubit_labels`, that specifies which of the qubits in `pspec` the stabilizer is over. (All other qubits will not be part of the returned circuit, regardless of whether that means an over-head is required to avoid using gates that act on those qubits. If these - additional qubits should be used, then the input (s,p) needs to be ``padded'' to be the identity + additional qubits should be used, then the input (s,p) needs to be "padded" to be the identity on those qubits). The ordering of the indices in (`s`,`p`) is w.r.t to ordering of the qubit labels in pspec.qubit_labels, @@ -2276,10 +2272,10 @@ def compile_stabilizer_measurement(s, p, pspec, absolute_compilation, paulieq_co between different compilations when randomized algorithms are used: the lowest cost circuit is chosen. If a string it must be one of: - - '2QGC' : the cost of the circuit is the number of 2-qubit gates it contains. - - 'depth' : the cost of the circuit is the depth of the circuit. - - '2QGC:x:depth:y' : the cost of the circuit is x * the number of 2-qubit gates in the circuit + - y * the depth of the circuit, where x and y are integers. + * '2QGC' : the cost of the circuit is the number of 2-qubit gates it contains. + * 'depth' : the cost of the circuit is the depth of the circuit. + * '2QGC:x:depth:y' : the cost of the circuit is x * the number of 2-qubit gates in the circuit + + y * the depth of the circuit, where x and y are integers. rand_state: RandomState, optional A np.random.RandomState object for seeding RNG @@ -2287,7 +2283,7 @@ def compile_stabilizer_measurement(s, p, pspec, absolute_compilation, paulieq_co Returns ------- Circuit - A circuit that maps the specified stabilizer state to |0,0,0,...> + A circuit that maps the specified stabilizer state to `|0,0,0,...>` """ assert(_symp.check_valid_clifford(s, p)), "The input s and p are not a valid clifford." @@ -2771,7 +2767,6 @@ def find_albert_factorization_transform_using_cnots(s, optype, position, qubit_l 1. Finds an *invertable* M such that F = M M.T where F is the submatrix in position `position`, i.e., F is one of A, B, C and D. (this is known as an albert factorization). - 2. Applies a CNOT circuit from the LHS (if `optype` = 'row') or RHS (if `optyp`='colum')) to `s` so that F - > M. @@ -2942,9 +2937,9 @@ def compile_conditional_symplectic(s, pspec, qubit_labels=None, calg='COiCAGE', 1. C1 is a CNOT circuit 2. C2 is a circuit with the form 1-qubit-gates -- CNOT circuit -- 1-qubit gates. 3. The symplectic rep of the circuit consisting of C1 followed by C2 has the form ((.,B)(.,D)) - when s has the form ((A,B),(C,D)). + when s has the form ((A,B),(C,D)). - Therefore, the circuit C2 acting on |0,0,0,...> generates the same stabilizer state (up to Paulis) + Therefore, the circuit C2 acting on `|0,0,0,...>` generates the same stabilizer state (up to Paulis) as a circuit that has the symplectic rep (s,p) for any valid p. The circuit is only "conditionally" equivalent to another circuit with the rep (s,p) -- conditional on the input state -- which is the meaning of the name `compile_conditional_symplectic`. @@ -2959,7 +2954,7 @@ def compile_conditional_symplectic(s, pspec, qubit_labels=None, calg='COiCAGE', for, where nbar >= n. If nbar > n it is necessary to provide `qubit_labels`, that specifies which of the qubits in `pspec` the stabilizer is over. (All other qubits will not be part of the returned circuit, regardless of whether that means an over-head is required to avoid using gates that act on those qubits. - If these additional qubits should be used, then the input (s,p) needs to be ``padded'' to be the identity + If these additional qubits should be used, then the input (s,p) needs to be "padded" to be the identity on those qubits). The ordering of the indices in (`s`,`p`) is w.r.t to ordering of the qubit labels in pspec.qubit_labels, unless `qubit_labels` is specified. Then, the ordering is taken w.r.t the ordering of the list `qubit_labels`. diff --git a/pygsti/algorithms/core.py b/pygsti/algorithms/core.py index c2a58f7e8..274bdb128 100644 --- a/pygsti/algorithms/core.py +++ b/pygsti/algorithms/core.py @@ -859,7 +859,7 @@ def _do_runopt(objective, optimizer, printer): This is factored out as a separate function because of the differences when running Taylor-term simtype calculations, which utilize this - as a subroutine (see :function:`_do_term_runopt`). + as a subroutine (see :func:`_do_term_runopt`). Parameters ---------- @@ -910,7 +910,7 @@ def _do_term_runopt(objective, optimizer, printer): Runs the core model-optimization step for models using the Taylor-term (path integral) method of computing probabilities. - This routine serves the same purpose as :function:`_do_runopt`, but + This routine serves the same purpose as :func:`_do_runopt`, but is more complex because an appropriate "path set" must be found, requiring a loop of model optimizations with fixed path sets until a sufficient "good" path set is obtained. diff --git a/pygsti/algorithms/directx.py b/pygsti/algorithms/directx.py index c45965240..13fd0fb09 100644 --- a/pygsti/algorithms/directx.py +++ b/pygsti/algorithms/directx.py @@ -81,7 +81,7 @@ def model_with_lgst_circuit_estimates( Defaults to dimension of `target_model`. verbosity : int, optional - Verbosity value to send to run_lgst(...) call. + Verbosity value to send to `run_lgst(...)` call. Returns ------- @@ -158,7 +158,7 @@ def direct_lgst_model(circuit_to_estimate, circuit_label, dataset, Defaults to dimension of `target_model`. verbosity : int, optional - Verbosity value to send to run_lgst(...) call. + Verbosity value to send to `run_lgst(...)` call. Returns ------- diff --git a/pygsti/algorithms/fiducialpairreduction.py b/pygsti/algorithms/fiducialpairreduction.py index bccb5516e..4f2dfde57 100644 --- a/pygsti/algorithms/fiducialpairreduction.py +++ b/pygsti/algorithms/fiducialpairreduction.py @@ -161,7 +161,7 @@ def find_sufficient_fiducial_pairs(target_model, prep_fiducials, meas_fiducials, for prepLbl, povmLbl in prep_povm_tuples] def _get_derivs(length): - """ Compute all derivative info: get derivative of each + """ Compute all derivative info: get derivative of each `` where i = composite EVec & fiducial index and j similar """ st = 0 # running row count over to-be-concatenated dPall matrix @@ -968,7 +968,7 @@ def test_fiducial_pairs(fid_pairs, target_model, prep_fiducials, meas_fiducials, for prepLbl, povmLbl in pre_povm_tuples] def _get_derivs(length): - """ Compute all derivative info: get derivative of each + """ Compute all derivative info: get derivative of each `` where i = composite EVec & fiducial index and j similar """ circuits = [] diff --git a/pygsti/algorithms/fiducialselection.py b/pygsti/algorithms/fiducialselection.py index 28b8c662d..b0ea19400 100644 --- a/pygsti/algorithms/fiducialselection.py +++ b/pygsti/algorithms/fiducialselection.py @@ -638,9 +638,10 @@ def create_prep_cache(model, available_prep_fid_list, circuit_cache=None): """ Make a dictionary structure mapping native state preps and circuits to numpy column vectors for the corresponding effective state prep. - + This can then be passed into 'create_prep_mxs' to more efficiently generate the matrices for score function evaluation. + Parameters ---------- model : Model @@ -651,7 +652,7 @@ def create_prep_cache(model, available_prep_fid_list, circuit_cache=None): circuit_cache : dict dictionary of PTMs for the circuits in the available_prep_fid_list - + Returns ------- dictionary @@ -682,9 +683,10 @@ def create_meas_cache(model, available_meas_fid_list, circuit_cache=None): """ Make a dictionary structure mapping native measurements and circuits to numpy column vectors corresponding to the transpose of the effective measurement effects. - + This can then be passed into 'create_meas_mxs' to more efficiently generate the matrices for score function evaluation. + Parameters ---------- model : Model diff --git a/pygsti/algorithms/gaugeopt.py b/pygsti/algorithms/gaugeopt.py index a4d0f2252..fcd52d267 100644 --- a/pygsti/algorithms/gaugeopt.py +++ b/pygsti/algorithms/gaugeopt.py @@ -133,12 +133,14 @@ def gaugeopt_to_target(model, target_model, item_weights=None, Returns ------- - model if return_all == False - (goodnessMin, gaugeMx, model) if return_all == True - where goodnessMin is the minimum value of the goodness function (the best 'goodness') - found, gaugeMx is the gauge matrix used to transform the model, and model is the + model : if return_all == False + + (goodnessMin, gaugeMx, model) : if return_all == True + Where goodnessMin is the minimum value of the goodness function (the best 'goodness') + found, gaugeMx is the gauge matrix used to transform the model, and model is the final gauge-transformed model. """ + if item_weights is None: item_weights = {} ls_mode_allowed = bool(target_model is not None @@ -250,8 +252,11 @@ def gaugeopt_custom(model, objective_fn, gauge_group=None, Returns ------- - model if return_all == False - (goodnessMin, gaugeMx, model) if return_all == True + model + if return_all == False + + (goodnessMin, gaugeMx, model) + if return_all == True where goodnessMin is the minimum value of the goodness function (the best 'goodness') found, gaugeMx is the gauge matrix used to transform the model, and model is the final gauge-transformed model. diff --git a/pygsti/algorithms/germselection.py b/pygsti/algorithms/germselection.py index d8d571498..0ba13d2df 100644 --- a/pygsti/algorithms/germselection.py +++ b/pygsti/algorithms/germselection.py @@ -101,20 +101,19 @@ def find_germs(target_model, randomize=True, randomization_strength=1e-2, algorithm : {'greedy', 'grasp', 'slack'}, optional Specifies the algorithm to use to generate the germ set. Current options are: - 'greedy' - Add germs one-at-a-time until the set is AC, picking the germ that - improves the germ-set score by the largest amount at each step. See - :func:`find_germs_breadthfirst` for more details. - 'grasp' - Use GRASP to generate random greedy germ sets and then locally - optimize them. See :func:`find_germs_grasp` for more - details. - 'slack' - From a initial set of germs, add or remove a germ at each step in - an attempt to improve the germ-set score. Will allow moves that - degrade the score in an attempt to escape local optima as long as - the degredation is within some specified amount of "slack". See - :func:`find_germs_integer_slack` for more details. + 'greedy' : Add germs one-at-a-time until the set is AC, picking the germ that + improves the germ-set score by the largest amount at each step. See + :func:`find_germs_breadthfirst` for more details. + + 'grasp': Use GRASP to generate random greedy germ sets and then locally + optimize them. See :func:`find_germs_grasp` for more + details. + + 'slack': From a initial set of germs, add or remove a germ at each step in + an attempt to improve the germ-set score. Will allow moves that + degrade the score in an attempt to escape local optima as long as + the degredation is within some specified amount of "slack". See + :func:`find_germs_integer_slack` for more details. algorithm_kwargs : dict Dictionary of ``{'keyword': keyword_arg}`` pairs providing keyword @@ -435,7 +434,7 @@ def compute_germ_set_score(germs, target_model=None, neighborhood=None, Number of randomized models to construct around `target_model`. randomization_strength : float, optional - Strength of unitary randomizations, as passed to :method:`target_model.randomize_with_unitary`. + Strength of unitary randomizations, as passed to :meth:`target_model.randomize_with_unitary`. score_func : {'all', 'worst'} Sets the objective function for scoring the eigenvalues. If 'all', @@ -609,7 +608,7 @@ def compute_composite_germ_set_score(score_fn, threshold_ac=1e6, init_n=1, eps : float, optional Used when calculating `partial_deriv_dagger_deriv` to determine if two - eigenvalues are equal (see :func:`_bulk_twirled_deriv` for details). Not + eigenvalues are equal (see :func:`~pygsti.algorithms.germselection._bulk_twirled_deriv` for details). Not used if `partial_deriv_dagger_deriv` is provided. op_penalty : float, optional @@ -893,7 +892,7 @@ def randomize_model_list(model_list, randomization_strength, num_copies, A list of Model objects. randomization_strength : float, optional - Strength of unitary randomizations, as passed to :method:`Model.randomize_with_unitary`. + Strength of unitary randomizations, as passed to :meth:`Model.randomize_with_unitary`. num_copies : int The number of random perturbations of `model_list[0]` to generate when @@ -942,14 +941,14 @@ def test_germs_list_completeness(model_list, germs_list, score_func, threshold, models around a model of interest. germs_list : list - A list of the germ :class:`Circuit`s (the "germ set") to test for completeness. + A list of the germ :class:`Circuit` objects (the "germ set") to test for completeness. score_func : {'all', 'worst'} Sets the objective function for scoring the eigenvalues. If 'all', score is ``sum(1/eigval_array)``. If 'worst', score is ``1/min(eigval_array)``. threshold : float, optional - An eigenvalue of jacobian^T*jacobian is considered zero and thus a + An eigenvalue of `jacobian^T*jacobian` is considered zero and thus a parameter un-amplified when its reciprocal is greater than threshold. Also used for eigenvector degeneracy testing in twirling operation. @@ -1185,7 +1184,7 @@ def _twirled_deriv(model, circuit, eps=1e-6, float_type=_np.cdouble): eps : float, optional Tolerance used for testing whether two eigenvectors are degenerate - (i.e. abs(eval1 - eval2) < eps ? ) + (i.e. `abs(eval1 - eval2) < eps` ? ) float_type : numpy dtype object, optional Numpy data type to use for floating point arrays. @@ -1224,7 +1223,7 @@ def _bulk_twirled_deriv(model, circuits, eps=1e-6, check=False, comm=None, float eps : float, optional Tolerance used for testing whether two eigenvectors are degenerate - (i.e. abs(eval1 - eval2) < eps ? ) + (i.e. `abs(eval1 - eval2) < eps` ? ) check : bool, optional Whether to perform internal consistency checks, at the expense of @@ -1295,14 +1294,14 @@ def test_germ_set_finitel(model, germs_to_test, length, weights=None, A 1-D array of weights with length equal len(germs_to_test), which multiply the contribution of each germ to the total jacobian matrix determining parameter amplification. If - None, a uniform weighting of 1.0/len(germs_to_test) is applied. + None, a uniform weighting of `1.0/len(germs_to_test)` is applied. return_spectrum : bool, optional - If True, return the jacobian^T*jacobian spectrum in addition + If True, return the `jacobian^T*jacobian` spectrum in addition to the success flag. tol : float, optional - Tolerance: an eigenvalue of jacobian^T*jacobian is considered + Tolerance: an eigenvalue of `jacobian^T*jacobian` is considered zero and thus a parameter un-amplified when it is less than tol. Returns @@ -1363,14 +1362,14 @@ def test_germ_set_infl(model, germs_to_test, score_func='all', weights=None, A 1-D array of weights with length equal len(germs_to_test), which multiply the contribution of each germ to the total jacobian matrix determining parameter amplification. If - None, a uniform weighting of 1.0/len(germs_to_test) is applied. + None, a uniform weighting of `1.0/len(germs_to_test)` is applied. return_spectrum : bool, optional - If ``True``, return the jacobian^T*jacobian spectrum in addition + If ``True``, return the `jacobian^T*jacobian` spectrum in addition to the success flag. threshold : float, optional - An eigenvalue of jacobian^T*jacobian is considered zero and thus a + An eigenvalue of `jacobian^T*jacobian` is considered zero and thus a parameter un-amplified when its reciprocal is greater than threshold. Also used for eigenvector degeneracy testing in twirling operation. @@ -1461,7 +1460,7 @@ def find_germs_depthfirst(model_list, germs_list, randomize=True, Parameters ---------- model_list : Model or list - The model or list of `Model`s to select germs for. + The model or list of Models to select germs for. germs_list : list of Circuit The list of germs to contruct a germ set from. @@ -1626,7 +1625,7 @@ def find_germs_breadthfirst(model_list, germs_list, randomize=True, Parameters ---------- model_list : Model or list - The model or list of `Model`s to select germs for. + The model or list of `Model` objects to select germs for. germs_list : list of Circuit The list of germs to contruct a germ set from. @@ -3528,7 +3527,7 @@ def find_germs_breadthfirst_greedy(model_list, germs_list, randomize=True, Parameters ---------- model_list : Model or list - The model or list of `Model`s to select germs for. + The model or list of Models to select germs for. germs_list : list of Circuit The list of germs to contruct a germ set from. diff --git a/pygsti/algorithms/randomcircuit.py b/pygsti/algorithms/randomcircuit.py index a5c325be6..9dab36dfb 100644 --- a/pygsti/algorithms/randomcircuit.py +++ b/pygsti/algorithms/randomcircuit.py @@ -57,7 +57,7 @@ def sample_random_clifford_one_qubit_unitary_parameters(): def sample_compiled_haar_random_one_qubit_gates_zxzxz_circuit(pspec, zname='Gzr', xname='Gxpi2', qubit_labels=None): """ TODO: docstring #generate layer of random unitaries and make a series of circuit layers with the compiled versions - of these + of these """ if qubit_labels is not None: n = len(qubit_labels) @@ -86,7 +86,7 @@ def sample_compiled_haar_random_one_qubit_gates_zxzxz_circuit(pspec, zname='Gzr' def sample_compiled_random_clifford_one_qubit_gates_zxzxz_circuit(pspec, zname='Gzr', xname='Gxpi2', qubit_labels=None): """ TODO: docstring #generate layer of random unitaries and make a series of circuit layers with the compiled versions - of these + of these """ if qubit_labels is not None: n = len(qubit_labels) @@ -538,7 +538,7 @@ def sample_circuit_layer_by_co2_q_gates(pspec, qubit_labels, co2_q_gates, co2_q_ from those 1-qubit gates specified by `one_q_gate_names`. For example, consider 4 qubits with linear connectivity. a valid `co2_q_gates` list is - co2_q_gates = [[,],[Label(Gcphase,(0,1)),Label(Gcphase,(2,3))]] which consists of an + `co2_q_gates = [[,],[Label(Gcphase,(0,1)),Label(Gcphase,(2,3))]]` which consists of an element containing zero 2-qubit gates and an element containing two 2-qubit gates that can be applied in parallel. In this example there are 5 possible sets of compatible 2-qubit gates: @@ -552,14 +552,14 @@ def sample_circuit_layer_by_co2_q_gates(pspec, qubit_labels, co2_q_gates, co2_q_ The list of compatible two-qubit gates `co2_q_gates` can be any list containing anywhere from 1 to all 5 of these lists. - In order to allow for convenient sampling of some commonly useful distributions, - `co2_q_gates` can be a list of lists of lists of compatible 2-qubit gates ("nested" sampling). - In this case, a list of lists of compatible 2-qubit gates is picked according to the distribution - `co2_q_gates_prob`, and then one of the sublists of compatible 2-qubit gates in the selected list is - then chosen uniformly at random. For example, this is useful for sampling a layer containing one - uniformly random 2-qubit gate with probability p and a layer of 1-qubit gates with probability - 1-p. Here, we can specify `co2_q_gates` as [[],[[the 1st 2Q-gate,],[the 2nd 2Q-gate,], ...]] and - set `two_q_prob=1` and `co2_q_gates_prob = [1-p,p]. + In order to allow for convenient sampling of some commonly useful distributions, + `co2_q_gates` can be a list of lists of lists of compatible 2-qubit gates ("nested" sampling). + In this case, a list of lists of compatible 2-qubit gates is picked according to the distribution + `co2_q_gates_prob`, and then one of the sublists of compatible 2-qubit gates in the selected list is + then chosen uniformly at random. For example, this is useful for sampling a layer containing one + uniformly random 2-qubit gate with probability p and a layer of 1-qubit gates with probability + 1-p. Here, we can specify `co2_q_gates` as `[[],[[the 1st 2Q-gate,],[the 2nd 2Q-gate,], ...]]` and + set `two_q_prob=1` and `co2_q_gates_prob = [1-p,p]`. Parameters ---------- @@ -2164,7 +2164,7 @@ def create_clifford_rb_circuit(pspec, clifford_compilations, length, qubit_label If not None, a list of the qubits that the RB circuit is to be sampled for. This should be all or a subset of the qubits in the device specified by the QubitProcessorSpec `pspec`. If None, it is assumed that the RB circuit should be over all the qubits. Note that the - ordering of this list is the order of the ``wires'' in the returned circuit, but is otherwise + ordering of this list is the order of the "wires" in the returned circuit, but is otherwise irrelevant. If desired, a circuit that explicitly idles on the other qubits can be obtained by using methods of the Circuit object. @@ -2178,7 +2178,7 @@ def create_clifford_rb_circuit(pspec, clifford_compilations, length, qubit_label Some of the Clifford compilation algorithms in pyGSTi (including the default algorithm) are randomized, and the lowest-cost circuit is chosen from all the circuit generated in the iterations of the algorithm. This is the number of iterations used. The time required to - generate a CRB circuit is linear in `citerations` * (`length`+2). Lower-depth / lower 2-qubit + generate a CRB circuit is linear in `citerations` * (`length` + 2). Lower-depth / lower 2-qubit gate count compilations of the Cliffords are important in order to successfully implement CRB on more qubits. @@ -2186,20 +2186,26 @@ def create_clifford_rb_circuit(pspec, clifford_compilations, length, qubit_label A list of arguments that are handed to compile_clifford() function, which includes all the optional arguments of compile_clifford() *after* the `iterations` option (set by `citerations`). In order, this list should be values for: - - algorithm : str. A string that specifies the compilation algorithm. The default in - compile_clifford() will always be whatever we consider to be the 'best' all-round - algorith, - - aargs : list. A list of optional arguments for the particular compilation algorithm. - - costfunction : 'str' or function. The cost-function from which the "best" compilation - for a Clifford is chosen from all `citerations` compilations. The default costs a - circuit as 10x the num. of 2-qubit gates in the circuit + 1x the depth of the circuit. - - prefixpaulis : bool. Whether to prefix or append the Paulis on each Clifford. - - paulirandomize : bool. Whether to follow each layer in the Clifford circuit with a - random Pauli on each qubit (compiled into native gates). I.e., if this is True the - native gates are Pauli-randomized. When True, this prevents any coherent errors adding - (on average) inside the layers of each compiled Clifford, at the cost of increased - circuit depth. Defaults to False. - For more information on these options, see the compile_clifford() docstring. + + algorithm : str. A string that specifies the compilation algorithm. The default in + compile_clifford() will always be whatever we consider to be the 'best' all-round + algorithm + + aargs : list. A list of optional arguments for the particular compilation algorithm. + + costfunction : 'str' or function. The cost-function from which the "best" compilation + for a Clifford is chosen from all `citerations` compilations. The default costs a + circuit as 10x the num. of 2-qubit gates in the circuit + 1x the depth of the circuit. + + prefixpaulis : bool. Whether to prefix or append the Paulis on each Clifford. + + paulirandomize : bool. Whether to follow each layer in the Clifford circuit with a + random Pauli on each qubit (compiled into native gates). I.e., if this is True the + native gates are Pauli-randomized. When True, this prevents any coherent errors adding + (on average) inside the layers of each compiled Clifford, at the cost of increased + circuit depth. Defaults to False. + + For more information on these options, see the `:func:compile_clifford()` docstring. seed : int, optional A seed to initialize the random number generator used for creating random clifford @@ -2207,9 +2213,10 @@ def create_clifford_rb_circuit(pspec, clifford_compilations, length, qubit_label Returns ------- - Circuit + full_circuit : Circuit A random CRB circuit over the "native" gate-set specified. - Tuple + + idealout : tuple A length-n tuple of integers in [0,1], corresponding to the error-free outcome of the circuit. Always all zeros if `randomizeout` is False. The ith element of the tuple corresponds to the error-free outcome for the qubit labelled by: the ith element of @@ -2217,6 +2224,7 @@ def create_clifford_rb_circuit(pspec, clifford_compilations, length, qubit_label In both cases, the ith element of the tuple corresponds to the error-free outcome for the qubit on the ith wire of the output circuit. """ + # Find the labels of the qubits to create the circuit for. if qubit_labels is not None: qubits = qubit_labels[:] # copy this list else: qubits = pspec.qubit_labels[:] # copy this list @@ -2419,28 +2427,28 @@ def create_mirror_rb_circuit(pspec, absolute_compilation, length, qubit_labels=N The "mirror RB length" of the circuit, which is closely related to the circuit depth. It must be an even integer, and can be zero. - - If `localclifford` and `paulirandomize` are False, this is the depth of the sampled circuit. - The first length/2 layers are all sampled independently according to the sampler specified by - `sampler`. The remaining half of the circuit is the "inversion" circuit that is determined - by the first half. + If `localclifford` and `paulirandomize` are False, this is the depth of the sampled circuit. + The first length/2 layers are all sampled independently according to the sampler specified by + `sampler`. The remaining half of the circuit is the "inversion" circuit that is determined + by the first half. - - If `paulirandomize` is True and `localclifford` is False, the depth of the circuits is - 2*length+1 with odd-indexed layers sampled according to the sampler specified by `sampler, and - the the zeroth layer + the even-indexed layers consisting of random 1-qubit Pauli gates. + If `paulirandomize` is True and `localclifford` is False, the depth of the circuits is + 2*length+1 with odd-indexed layers sampled according to the sampler specified by `sampler`, and + the the zeroth layer + the even-indexed layers consisting of random 1-qubit Pauli gates. - - If `paulirandomize` and `localclifford` are True, the depth of the circuits is - 2*length+1 + X where X is a random variable (between 0 and normally <= ~12-16) that accounts for - the depth from the layer of random 1-qubit Cliffords at the start and end of the circuit. + If `paulirandomize` and `localclifford` are True, the depth of the circuits is + 2*length+1 + X where X is a random variable (between 0 and normally <= ~12-16) that accounts for + the depth from the layer of random 1-qubit Cliffords at the start and end of the circuit. - - If `paulirandomize` is False and `localclifford` is True, the depth of the circuits is - length + X where X is a random variable (between 0 and normally <= ~12-16) that accounts for - the depth from the layer of random 1-qubit Cliffords at the start and end of the circuit. + If `paulirandomize` is False and `localclifford` is True, the depth of the circuits is + length + X where X is a random variable (between 0 and normally <= ~12-16) that accounts for + the depth from the layer of random 1-qubit Cliffords at the start and end of the circuit. qubit_labels : list, optional If not None, a list of the qubits that the RB circuit is to be sampled for. This should be all or a subset of the qubits in the device specified by the QubitProcessorSpec `pspec`. If None, it is assumed that the RB circuit should be over all the qubits. Note that the - ordering of this list is the order of the ``wires'' in the returned circuit, but is otherwise + ordering of this list is the order of the "wires" in the returned circuit, but is otherwise irrelevant. sampler : str or function, optional @@ -2481,14 +2489,18 @@ def create_mirror_rb_circuit(pspec, absolute_compilation, length, qubit_labels=N Circuit A random MRB circuit, sampled as specified, of depth: - - `length`, if not paulirandomize and not local clifford. - - 2*`length`+1 if paulirandomize and not local clifford. - - `length` + X, if not paulirandomize and local clifford, where X is a random variable - that accounts for the depth from the layers of random 1-qubit Cliffords (X = 2 if the 1 - qubit Clifford gates are "native" gates in the QubitProcessorSpec). - - 2*`length`+1 + X, if paulirandomize and local clifford, where X is a random variable - that accounts for the depth from the layers of random 1-qubit Cliffords (X = 2 if the 1 - qubit Clifford gates are "native" gates in the QubitProcessorSpec). + `length`, if not paulirandomize and not local clifford. + + 2*`length`+1 if paulirandomize and not local clifford. + + `length` + X, if not paulirandomize and local clifford, where X is a random variable + that accounts for the depth from the layers of random 1-qubit Cliffords (X = 2 if the 1 + qubit Clifford gates are "native" gates in the QubitProcessorSpec). + + 2*`length`+1 + X, if paulirandomize and local clifford, where X is a random variable + that accounts for the depth from the layers of random 1-qubit Cliffords (X = 2 if the 1 + qubit Clifford gates are "native" gates in the QubitProcessorSpec). + Tuple A length-n tuple of integers in [0,1], corresponding to the error-free outcome of the circuit. Always all zeros if `randomizeout` is False. The ith element of the tuple diff --git a/pygsti/algorithms/robust_phase_estimation.py b/pygsti/algorithms/robust_phase_estimation.py index 6d8448977..c5ec90a86 100644 --- a/pygsti/algorithms/robust_phase_estimation.py +++ b/pygsti/algorithms/robust_phase_estimation.py @@ -19,14 +19,15 @@ class RobustPhaseEstimation(object): Runs the non-adaptive RPE algorithm using a dictionary of measurement results, `Q.raw_angles`, containing the angles calculated from the probabilities: - P^{γ'γ}_{Nâ‚–s} = |<γ' y| U^Nâ‚– |γ x>|² = |<γ' x| U^Nâ‚– |-γ y>|² = (1 ± sin(θ))/2 - P^{γ'γ}_{Nâ‚–c} = |<γ' x| U^Nâ‚– |γ x>|² = |<γ' y| U^Nâ‚– | γ y>|² = (1 ± cos(θ))/2 + + `P^{γ'γ}_{Nâ‚–s} = |<γ' y| U^Nâ‚– |γ x>|² = |<γ' x| U^Nâ‚– |-γ y>|² = (1 ± sin(θ))/2` + `P^{γ'γ}_{Nâ‚–c} = |<γ' x| U^Nâ‚– |γ x>|² = |<γ' y| U^Nâ‚– | γ y>|² = (1 ± cos(θ))/2` - Expect measured[Nâ‚–] = θ. + Expect `measured[Nâ‚–] = θ`. Overview: - At each generation, use the previous estimated angle to select the 2Ï€/L window + At each generation, use the previous estimated angle to select the `2Ï€/L` window (of which the measurements cannot distinguish). Returns an result object. theta is the estimated angle, angle_estimates are diff --git a/pygsti/baseobjs/basis.py b/pygsti/baseobjs/basis.py index 5141de9d3..74a76e9ba 100644 --- a/pygsti/baseobjs/basis.py +++ b/pygsti/baseobjs/basis.py @@ -333,7 +333,7 @@ def elsize(self): int """ if self.elshape is None: return 0 - return int(_np.product(self.elshape)) + return int(_np.prod(self.elshape)) @property def first_element_is_identity(self): @@ -366,7 +366,7 @@ def is_complete(self): def is_partial(self): """ - The negative of :method:`is_complete`, effectively "is_incomplete". + The negative of :meth:`is_complete`, effectively "is_incomplete". Returns ------- @@ -509,7 +509,7 @@ def reverse_transform_matrix(self, from_basis): """ Get the matrix that transforms a vector from `from_basis` to this basis. - The reverse of :method:`create_transform_matrix`. + The reverse of :meth:`create_transform_matrix`. Parameters ---------- @@ -781,7 +781,7 @@ class LazyBasis(Basis): def __init__(self, name, longname, real, sparse): """ Creates a new LazyBasis. Parameters are the same as those to - :method:`Basis.__init__`. + :meth:`Basis.__init__`. """ self._elements = None # "natural-shape" elements - can be vecs or matrices self._labels = None # element labels @@ -957,7 +957,7 @@ def __init__(self, elements, labels=None, name=None, longname=None, real=False, if elshape is None: elshape = el.shape else: assert(elshape == el.shape), "Inconsistent element shapes!" self.elements.append(el) - dim = int(_np.product(elshape)) + dim = int(_np.prod(elshape)) self.ellookup = {lbl: el for lbl, el in zip(self.labels, self.elements)} # fast by-label element lookup if vector_elements is not None: @@ -1210,7 +1210,7 @@ class DirectSumBasis(LazyBasis): ---------- component_bases : iterable A list of the component bases. Each list elements may be either - a Basis object or a tuple of arguments to :function:`Basis.cast`, + a Basis object or a tuple of arguments to :func:`Basis.cast`, e.g. `('pp',4)`. name : str, optional @@ -1236,7 +1236,7 @@ def __init__(self, component_bases, name=None, longname=None): ---------- component_bases : iterable A list of the component bases. Each list elements may be either - a Basis object or a tuple of arguments to :function:`Basis.cast`, + a Basis object or a tuple of arguments to :func:`Basis.cast`, e.g. `('pp',4)`. name : str, optional @@ -1459,7 +1459,7 @@ def to_elementstd_transform_matrix(self): number of vectors). """ assert(not self.sparse), "to_elementstd_transform_matrix not implemented for sparse mode" - expanddim = self.elsize # == _np.product(self.elshape) + expanddim = self.elsize # == _np.prod(self.elshape) if self.sparse: toSimpleStd = _sps.lil_matrix((expanddim, self.size), dtype='complex') else: @@ -1542,7 +1542,7 @@ class TensorProdBasis(LazyBasis): ---------- component_bases : iterable A list of the component bases. Each list elements may be either - a Basis object or a tuple of arguments to :function:`Basis.cast`, + a Basis object or a tuple of arguments to :func:`Basis.cast`, e.g. `('pp',4)`. name : str, optional @@ -1563,7 +1563,7 @@ def __init__(self, component_bases, name=None, longname=None): ---------- component_bases : iterable A list of the component bases. Each list elements may be either - a Basis object or a tuple of arguments to :function:`Basis.cast`, + a Basis object or a tuple of arguments to :func:`Basis.cast`, e.g. `('pp',4)`. name : str, optional @@ -1617,7 +1617,7 @@ def dim(self): spans. Equivalently, the length of the `vector_elements` of the basis. """ - dim = int(_np.product([c.dim for c in self.component_bases])) + dim = int(_np.prod([c.dim for c in self.component_bases])) #NOTE: this is actually to restrictive -- what we need is a test/flag for whether the elements of a # basis are in their "natrual" representation where it makes sense to take tensor products. For @@ -1635,7 +1635,7 @@ def size(self): """ The number of elements (or vector-elements) in the basis. """ - return int(_np.product([c.size for c in self.component_bases])) + return int(_np.prod([c.size for c in self.component_bases])) @property def elshape(self): @@ -1840,7 +1840,7 @@ def embed_label(cls, lbl, target_labels): @classmethod def unembed_label(cls, lbl, target_labels): """ - Convenience method that performs the reverse of :method:`embed_label` + Convenience method that performs the reverse of :meth:`embed_label` Parameters ---------- diff --git a/pygsti/baseobjs/basisconstructors.py b/pygsti/baseobjs/basisconstructors.py index 7037070f9..5fa8f3a19 100644 --- a/pygsti/baseobjs/basisconstructors.py +++ b/pygsti/baseobjs/basisconstructors.py @@ -709,17 +709,22 @@ def qsim_matrices(matrix_dim): The returned matrices are given in the QuantumSim representation of the density matrix space, and are thus kronecker products of the standard representation of the QuantumSim matrices: - '0' == [[1, 0],[0,0]] - 'X' == [[0, 1],[1,0]] - 'Y' == [[0,-1.j],[1.j,0]] - '1' == [[0, 0],[0,1]] + + * `'0' == [[1, 0],[0,0]]` + * `'X' == [[0, 1],[1,0]]` + * `'Y' == [[0,-1.j],[1.j,0]]` + * `'1' == [[0, 0],[0,1]]` + The normalization is such that the resulting basis is orthonormal under the trace inner product: - Tr( dot(Mi,Mj) ) == delta_ij. + + `Tr( dot(Mi,Mj) ) == delta_ij`. + In the returned list, the right-most factor of the kronecker product varies the fastest, so, for example, when matrix_dim == 4 the returned list is: - [ 00,0X,0Y,01,X0,XX,XY,X1,Y0,Y0,YX,YY,Y1,10,1X,1Y,11 ]. + + `[ 00,0X,0Y,01,X0,XX,XY,X1,Y0,Y0,YX,YY,Y1,10,1X,1Y,11 ]`. Parameters ---------- @@ -736,7 +741,7 @@ def qsim_matrices(matrix_dim): Notes ----- Matrices are ordered with first qubit being most significant, - e.g., for 2 qubits: 00, 0X, 0Y, 01, X0, XX, XY, X1, Y0, ... 11 + e.g., for 2 qubits: `00, 0X, 0Y, 01, X0, XX, XY, X1, Y0, ... 11` """ sig0q = _np.array([[1., 0], [0, 0]], dtype='complex') sigXq = _np.array([[0, 1], [1, 0]], dtype='complex') @@ -836,14 +841,14 @@ def pp_matrices(matrix_dim, max_weight=None, normalize=True): Returns ------- list - A list of N numpy arrays each of shape (matrix_dim, matrix_dim), where N == matrix_dim^2, + A list of N numpy arrays each of shape (matrix_dim, matrix_dim), where `N == matrix_dim^2`, the dimension of the density-matrix space. (Exception: when max_weight is not None, the returned list may have fewer than N elements.) Notes ----- Matrices are ordered with first qubit being most significant, - e.g., for 2 qubits: II, IX, IY, IZ, XI, XX, XY, XZ, YI, ... ZZ + e.g., for 2 qubits: `II, IX, IY, IZ, XI, XX, XY, XZ, YI, ... ZZ` """ _check_dim(matrix_dim) sigmaVec = (id2x2, sigmax, sigmay, sigmaz) diff --git a/pygsti/baseobjs/errorgenbasis.py b/pygsti/baseobjs/errorgenbasis.py index 2f4f92bf8..8f254198e 100644 --- a/pygsti/baseobjs/errorgenbasis.py +++ b/pygsti/baseobjs/errorgenbasis.py @@ -67,6 +67,11 @@ def elemgen_supports_and_matrices(self): def label_index(self, label, ok_if_missing=False): """ TODO: docstring + + Parameters + ---------- + label + ok_if_missing : bool If True, then returns `None` instead of an integer when the given label is not present. """ @@ -194,7 +199,7 @@ def _count_uptriangle_labels_for_support(cls, support, left_support, type_str, t right_offsets = [(i + 1 if ii < ifirst_trivial else 0) for ii, i in enumerate(left_inds)] if n1 == n: right_offsets[-1] += 1 # advance past diagonal element start_at = _np.dot(right_offsets, placevals) - cnt += _np.product(right_lengths) - start_at + cnt += _np.prod(right_lengths) - start_at return cnt @@ -447,6 +452,11 @@ def elemgen_supports_and_matrices(self): def label_index(self, elemgen_label, ok_if_missing=False): """ TODO: docstring + + Parameters + ---------- + elemgen_label + ok_if_missing : bool If True, then returns `None` instead of an integer when the given label is not present. """ diff --git a/pygsti/baseobjs/label.py b/pygsti/baseobjs/label.py index 29929bca2..8a081a882 100644 --- a/pygsti/baseobjs/label.py +++ b/pygsti/baseobjs/label.py @@ -187,7 +187,7 @@ def expand_subcircuits(self): ------- tuple A tuple of component Labels (none of which should be - :class:`CircuitLabel`s). + :class:`CircuitLabel` objects). """ return (self,) # most labels just expand to themselves @@ -1157,7 +1157,7 @@ def expand_subcircuits(self): ------- tuple A tuple of component Labels (none of which should be - :class:`CircuitLabel`s). + :class:`CircuitLabel` objects). """ ret = [] expanded_comps = [x.expand_subcircuits() for x in self.components] @@ -1426,7 +1426,7 @@ def expand_subcircuits(self): ------- tuple A tuple of component Labels (none of which should be - :class:`CircuitLabel`s). + :class:`CircuitLabel` objects). """ ret = [] expanded_comps = [x.expand_subcircuits() for x in self.components] @@ -1721,7 +1721,7 @@ def expand_subcircuits(self): ------- tuple A tuple of component Labels (none of which should be - :class:`CircuitLabel`s). + :class:`CircuitLabel` objects). """ return tuple(_itertools.chain(*[x.expand_subcircuits() for x in self.components])) * self.reps diff --git a/pygsti/baseobjs/mongoserializable.py b/pygsti/baseobjs/mongoserializable.py index 3cd3169c4..fe1a8bdb7 100644 --- a/pygsti/baseobjs/mongoserializable.py +++ b/pygsti/baseobjs/mongoserializable.py @@ -40,7 +40,7 @@ def _create_obj_from_doc_and_mongodb(cls, doc, mongodb, **kwargs): def _add_auxiliary_write_ops_and_update_doc(self, doc, write_ops, mongodb, collection_name, overwrite_existing, **kwargs): - """ Add to `write_ops` and update `doc` so that all of `self`'s data is serialized """ + """ Add to `write_ops` and update `doc` so that all of `self` 's data is serialized """ raise NotImplementedError("Subclasses must implement this!") @classmethod @@ -67,7 +67,7 @@ def from_mongodb(cls, mongodb, doc_id, **kwargs): document is loaded from the collection given by the `collection_name` attribute of this class. - **kwargs : dict + `**kwargs` : dict Additional keyword arguments poentially used by subclass implementations. Any arguments allowed by a subclass's `_create_obj_from_doc_and_mongodb` method is allowed here. @@ -103,7 +103,7 @@ def from_mongodb_doc(cls, mongodb, collection_name, doc, **kwargs): The already-retrieved main document for the object being loaded. This takes the place of giving an identifier for this object. - **kwargs : dict + `**kwargs` : dict Additional keyword arguments poentially used by subclass implementations. Any arguments allowed by a subclass's `_create_obj_from_doc_and_mongodb` method is allowed here. @@ -145,7 +145,7 @@ def write_to_mongodb(self, mongodb, session=None, overwrite_existing=False, **kw a ValueError to be raised if a document with the given `_id` already exists and is different from what is being written. - **kwargs : dict + `**kwargs` : dict Additional keyword arguments poentially used by subclass implementations. Any arguments allowed by a subclass's `_add_auxiliary_write_ops_and_update_doc` method is allowed here. @@ -163,12 +163,12 @@ def add_mongodb_write_ops(self, write_ops, mongodb, overwrite_existing=False, ** """ Accumulate write and update operations for writing this object to a MongoDB database. - Similar to :method:`write_to_mongodb` but collects write operations instead of actually + Similar to :meth:`write_to_mongodb` but collects write operations instead of actually executing any write operations on the database. This function may be preferred to - :method:`write_to_mongodb` when this object is being written as a part of a larger entity + :meth:`write_to_mongodb` when this object is being written as a part of a larger entity and executing write operations is saved until the end. - As in :method:`write_to_mongodb`, `self.collection_name` is the collection name and `_id` is either: + As in :meth:`write_to_mongodb`, `self.collection_name` is the collection name and `_id` is either: 1) the ID used by a previous write or initial read-in, if one exists, OR 2) a new random `bson.objectid.ObjectId` @@ -187,7 +187,7 @@ def add_mongodb_write_ops(self, write_ops, mongodb, overwrite_existing=False, ** a ValueError to be raised if a document with the given `_id` already exists and is different from what is being written. - **kwargs : dict + `**kwargs` : dict Additional keyword arguments poentially used by subclass implementations. Any arguments allowed by a subclass's `_add_auxiliary_write_ops_and_update_doc` method is allowed here. @@ -414,7 +414,7 @@ def add_gridfs_put_op(self, collection_name, doc_id, binary_data, overwrite_exis Add a GridFS put operation to this dictionary of write operations. This is a special type of operation for placing large chunks of binary data into a MongoDB. - Arguments are similar to :method:`add_one_op`. + Arguments are similar to :meth:`add_one_op`. """ import gridfs as _gridfs fs = _gridfs.GridFS(mongodb, collection=collection_name) @@ -432,8 +432,8 @@ def execute(self, mongodb): """ Execute all of the "queued" operations within this dictionary on a MongoDB instance. - Note that `mongodb` should be the same as the `mongodb` given to any :method:`add_one_op` and - :method:`add_gridfs_put_op` method calls. The session given at the initialization of + Note that `mongodb` should be the same as the `mongodb` given to any :meth:`add_one_op` and + :meth:`add_gridfs_put_op` method calls. The session given at the initialization of this object is used for these write operations. On exit, this dictionary is empty, indicating there are no more queued operations. @@ -487,13 +487,13 @@ def prepare_doc_for_existing_doc_check(doc, existing_doc, set_id=True, convert_t Prepares a to-be inserted document for comparison with an existing document. Optionally (see parameters): - 1) sets _id of `doc` to that of `existing_doc`. This is useful in cases where the _id + 1. sets _id of `doc` to that of `existing_doc` . This is useful in cases where the _id field is redundant with other uniquely identifying fields in the document, and so inserted documents don't need to match this field. - 2) converts all of `doc`'s tuples to lists, as the existing_doc is typically read from a MongoDB + 2. converts all of `doc` 's tuples to lists, as the existing_doc is typically read from a MongoDB which only stores lists and doesn't distinguish between lists and tuples. - 3) converts numpy datatypes to native python types - 4) rounds floating point values + 3. converts numpy datatypes to native python types + 4. rounds floating point values Parameters ---------- diff --git a/pygsti/baseobjs/opcalc/slowopcalc.py b/pygsti/baseobjs/opcalc/slowopcalc.py index 81f55a418..b887ba591 100644 --- a/pygsti/baseobjs/opcalc/slowopcalc.py +++ b/pygsti/baseobjs/opcalc/slowopcalc.py @@ -25,7 +25,7 @@ def _typed_bulk_eval_compact_polynomials(vtape, ctape, paramvec, dest_shape, dty vtape, ctape : numpy.ndarray Specifies "variable" and "coefficient" 1D numpy arrays to evaluate. These "tapes" can be generated by concatenating the tapes of individual - complact-polynomial tuples returned by :method:`Polynomial.compact`. + complact-polynomial tuples returned by :meth:`Polynomial.compact`. paramvec : array-like An object that can be indexed so that `paramvec[i]` gives the @@ -174,7 +174,7 @@ def compact_deriv(vtape, ctape, wrt_params): vtape, ctape : numpy.ndarray Specifies "variable" and "coefficient" 1D numpy arrays to differentiate. These "tapes" can be generated by concatenating the tapes of individual - complact-polynomial tuples returned by :method:`Polynomial.compact`. + complact-polynomial tuples returned by :meth:`Polynomial.compact`. wrt_params : list The variable indices to differentiate with respect to. They @@ -259,4 +259,4 @@ def compact_deriv(vtape, ctape, wrt_params): def float_product(ar): - return _np.product(ar) + return _np.prod(ar) diff --git a/pygsti/baseobjs/polynomial.py b/pygsti/baseobjs/polynomial.py index eb77c2e16..eed6e88c0 100644 --- a/pygsti/baseobjs/polynomial.py +++ b/pygsti/baseobjs/polynomial.py @@ -278,7 +278,7 @@ def evaluate(self, variable_values): #FUTURE: make this function smarter (Russian peasant) ret = 0 for ivar, coeff in self.coeffs.items(): - ret += coeff * _np.product([variable_values[i] for i in ivar]) + ret += coeff * _np.prod([variable_values[i] for i in ivar]) return ret def compact(self, complex_coeff_tape=True): @@ -286,7 +286,7 @@ def compact(self, complex_coeff_tape=True): Generate a compact form of this polynomial designed for fast evaluation. The resulting "tapes" can be evaluated using - :function:`opcalc.bulk_eval_compact_polynomials`. + :func:`opcalc.bulk_eval_compact_polynomials`. Parameters ---------- @@ -365,7 +365,7 @@ def mapvec_indices(self, mapvec): """ Performs a bulk find & replace on this polynomial's variable indices. - This function is similar to :method:`map_indices` but uses a *vector* + This function is similar to :meth:`map_indices` but uses a *vector* to describe *individual* index updates instead of a function for increased performance. @@ -374,7 +374,7 @@ def mapvec_indices(self, mapvec): mapvec : numpy.ndarray An array whose i-th element gives the updated "new" index for the i-th variable. Note that this vector maps *individual* - variable indices old->new, whereas `mapfn` in :method:`map_indices` + variable indices old->new, whereas `mapfn` in :meth:`map_indices` maps between *tuples* of indices. Returns @@ -389,7 +389,7 @@ def mapvec_indices_inplace(self, mapvec): """ Performs an in-place bulk find & replace on this polynomial's variable indices. - This function is similar to :method:`map_indices_inplace` but uses a *vector* + This function is similar to :meth:`map_indices_inplace` but uses a *vector* to describe *individual* index updates instead of a function for increased performance. @@ -399,7 +399,7 @@ def mapvec_indices_inplace(self, mapvec): An array whose i-th element gives the updated "new" index for the i-th variable. Note that this vector maps *individual* variable indices old->new, whereas `mapfn` in - :method:`map_indices_inplace` maps between *tuples* of indices. + :meth:`map_indices_inplace` maps between *tuples* of indices. Returns ------- @@ -768,7 +768,7 @@ def to_rep(self): # , max_num_vars=None not needed anymore -- given at __init__ # #FUTURE: make this function smarter (Russian peasant) # ret = 0 # for ivar, coeff in self.items(): -# ret += coeff * _np.product([variable_values[i] for i in ivar]) +# ret += coeff * _np.prod([variable_values[i] for i in ivar]) # assert(_np.isclose(ret, self.fastpoly.evaluate(variable_values))) # self._check_fast_polynomial() # return ret @@ -778,7 +778,7 @@ def to_rep(self): # , max_num_vars=None not needed anymore -- given at __init__ # Generate a compact form of this polynomial designed for fast evaluation. # # The resulting "tapes" can be evaluated using -# :function:`opcalc.bulk_eval_compact_polynomials`. +# :func:`opcalc.bulk_eval_compact_polynomials`. # # Parameters # ---------- @@ -1138,7 +1138,7 @@ def bulk_load_compact_polynomials(vtape, ctape, keep_compact=False, max_num_vars ctape : numpy.ndarray A 1D array of coefficients that, together with `vtape`, specify an - efficient means for evaluating a set of polynoials. + efficient means for evaluating a set of polynoials. keep_compact : bool, optional If True the returned list has elements which are (vtape,ctape) tuples diff --git a/pygsti/baseobjs/qubitgraph.py b/pygsti/baseobjs/qubitgraph.py index 294d9b2b0..79315c419 100644 --- a/pygsti/baseobjs/qubitgraph.py +++ b/pygsti/baseobjs/qubitgraph.py @@ -334,7 +334,7 @@ def node_names(self): All the node labels of this graph. These correpond to integer indices where appropriate, - e.g. for :method:`shortest_path_distance_matrix`. + e.g. for :meth:`shortest_path_distance_matrix`. Returns ------- @@ -755,7 +755,7 @@ def shortest_path(self, node1, node2): def shortest_path_edges(self, node1, node2): """ - Like :method:`shortest_path`, but returns a list of (nodeA,nodeB) tuples. + Like :meth:`shortest_path`, but returns a list of (nodeA,nodeB) tuples. These tuples define a path from `node1` to `node2`, so the first tuple's nodeA == `node1` and the final tuple's nodeB == `node2`. @@ -825,7 +825,7 @@ def shortest_path_distance_matrix(self): This matrix is indexed by the integer-index of each node label (as specified to __init__). The list of index-ordered node labels is given - by :method:`node_names`. + by :meth:`node_names`. Returns ------- @@ -842,7 +842,7 @@ def shortest_path_predecessor_matrix(self): This matrix is indexed by the integer-index of each node label (as specified to __init__). The list of index-ordered node labels is given - by :method:`node_names`. + by :meth:`node_names`. Returns ------- diff --git a/pygsti/baseobjs/resourceallocation.py b/pygsti/baseobjs/resourceallocation.py index 028e4d66f..e0d27a8f8 100644 --- a/pygsti/baseobjs/resourceallocation.py +++ b/pygsti/baseobjs/resourceallocation.py @@ -321,7 +321,7 @@ def gather_base(self, result, local, slice_of_global, unit_ralloc=None, all_gath #OLD: gathered_data = gather_comm.allgather(local) # could change this to Allgatherv (?) slices = gather_comm.allgather(slice_of_global if participating else None) shapes = gather_comm.allgather(local.shape if participating else (0,)) - sizes = [_np.product(shape) for shape in shapes] + sizes = [_np.prod(shape) for shape in shapes] gathered_data = _np.empty(sum(sizes), dtype=local.dtype) gather_comm.Allgatherv(local.flatten() if participating else _np.empty(0, dtype=local.dtype), (gathered_data, sizes)) @@ -331,7 +331,7 @@ def gather_base(self, result, local, slice_of_global, unit_ralloc=None, all_gath slices = gather_comm.gather(slice_of_global if participating else None, root=0) if gather_comm.rank == 0: - sizes = [_np.product(shape) for shape in shapes] + sizes = [_np.prod(shape) for shape in shapes] gathered_data = _np.empty(sum(sizes), dtype=local.dtype) recvbuf = (gathered_data, sizes) else: @@ -510,7 +510,7 @@ def allreduce_sum_simple(self, local, unit_ralloc=None): """ A simplified sum over quantities on different processors that doesn't use shared memory. - The shared memory usage of :method:`allreduce_sum` can be overkill when just summing a single + The shared memory usage of :meth:`allreduce_sum` can be overkill when just summing a single scalar quantity. This method provides a way to easily sum a quantity across all the processors in this :class:`ResourceAllocation` object using a unit resource allocation. diff --git a/pygsti/baseobjs/smartcache.py b/pygsti/baseobjs/smartcache.py index e0e462b1c..078666649 100644 --- a/pygsti/baseobjs/smartcache.py +++ b/pygsti/baseobjs/smartcache.py @@ -221,7 +221,7 @@ def add_digest(self, custom): def low_overhead_cached_compute(self, fn, arg_vals, kwargs=None): """ - Cached compute with less profiling. See :method:`cached_compute` docstring. + Cached compute with less profiling. See :meth:`cached_compute` docstring. Parameters ---------- diff --git a/pygsti/baseobjs/statespace.py b/pygsti/baseobjs/statespace.py index 91f325331..b97797695 100644 --- a/pygsti/baseobjs/statespace.py +++ b/pygsti/baseobjs/statespace.py @@ -37,7 +37,7 @@ def cast(cls, obj): ---------- obj : StateSpace or int or list Either an already-built state space object or an integer specifying the number of qubits, - or a list of labels as would be provided to the first argument of :method:`ExplicitStateSpace.__init__`. + or a list of labels as would be provided to the first argument of :meth:`ExplicitStateSpace.__init__`. Returns ------- @@ -610,7 +610,7 @@ def udim(self): """ Integer Hilbert (unitary operator) space dimension of this quantum state space. """ - return _np.product(self.qudit_udims) + return _np.prod(self.qudit_udims) @property def dim(self): @@ -622,7 +622,7 @@ def num_qudits(self): # may raise ValueError if the state space doesn't consist """ The number of qubits in this quantum state space. """ - return len(self.qubit_labels) + return len(self.qudit_labels) @property def num_tensor_product_blocks(self): @@ -1060,17 +1060,17 @@ def is_label(x): self.tpb_dims = [] self.tpb_udims = [] for iTPB, tpbLabels in enumerate(self.labels): - float_prod = _np.product(_np.array([self.label_dims[lbl] for lbl in tpbLabels], 'd')) + float_prod = _np.prod(_np.array([self.label_dims[lbl] for lbl in tpbLabels], 'd')) if float_prod >= float(_sys.maxsize): # too many qubits to hold dimension in an integer self.tpb_dims.append(_np.inf) else: - self.tpb_dims.append(int(_np.product([self.label_dims[lbl] for lbl in tpbLabels]))) + self.tpb_dims.append(int(_np.prod([self.label_dims[lbl] for lbl in tpbLabels]))) - float_prod = _np.product(_np.array([self.label_udims[lbl] for lbl in tpbLabels], 'd')) + float_prod = _np.prod(_np.array([self.label_udims[lbl] for lbl in tpbLabels], 'd')) if float_prod >= float(_sys.maxsize): # too many qubits to hold dimension in an integer self.tpb_udims.append(_np.inf) else: - self.tpb_udims.append(int(_np.product([self.label_udims[lbl] for lbl in tpbLabels]))) + self.tpb_udims.append(int(_np.prod([self.label_udims[lbl] for lbl in tpbLabels]))) self.tpb_index.update({lbl: iTPB for lbl in tpbLabels}) diff --git a/pygsti/baseobjs/verbosityprinter.py b/pygsti/baseobjs/verbosityprinter.py index c17c1caf2..1cbc4abca 100644 --- a/pygsti/baseobjs/verbosityprinter.py +++ b/pygsti/baseobjs/verbosityprinter.py @@ -132,7 +132,7 @@ class VerbosityPrinter(object): comm : mpi4py.MPI.Comm or ResourceAllocation, optional Restricts output if the program is running in parallel (By default, - if the rank is 0, output is sent to screen, and otherwise sent to commfiles 1, 2, ... + if the rank is 0, output is sent to screen, and otherwise sent to commfiles `1, 2, ...` warnings : bool, optional Whether or not to print warnings @@ -191,7 +191,7 @@ def __init__(self, verbosity=1, filename=None, comm=None, warnings=True, split=F comm : mpi4py.MPI.Comm or ResourceAllocation, optional Restricts output if the program is running in parallel (By default, - if the rank is 0, output is sent to screen, and otherwise sent to commfiles 1, 2, ... + if the rank is 0, output is sent to screen, and otherwise sent to `commfiles 1, 2, ...` warnings : bool, optional Whether or not to print warnings @@ -580,7 +580,7 @@ def start_recording(self): Begins recording the output (to memory). Begins recording (in memory) a list of `(type, verbosityLevel, message)` - tuples that is returned by the next call to :method:`stop_recording`. + tuples that is returned by the next call to :meth:`stop_recording`. Returns ------- @@ -602,7 +602,7 @@ def stop_recording(self): """ Stops recording and returns recorded output. - Stops a "recording" started by :method:`start_recording` and returns the + Stops a "recording" started by :meth:`start_recording` and returns the list of `(type, verbosityLevel, message)` tuples that have been recorded since then. diff --git a/pygsti/circuits/circuit.py b/pygsti/circuits/circuit.py index 6034a29fe..b8867e33d 100644 --- a/pygsti/circuits/circuit.py +++ b/pygsti/circuits/circuit.py @@ -212,7 +212,7 @@ class Circuit(object): to a Label objects. For example, any of the following are allowed: - `['Gx','Gx']` : X gate on each of 2 layers - - `[Label('Gx'),Label('Gx')] : same as above + - `[Label('Gx'),Label('Gx')]` : same as above - `[('Gx',0),('Gy',0)]` : X then Y on qubit 0 (2 layers) - `[[('Gx',0),('Gx',1)],[('Gy',0),('Gy',1)]]` : parallel X then Y on qubits 0 & 1 @@ -362,7 +362,7 @@ def __init__(self, layer_labels=(), line_labels='auto', num_lines=None, editable to a Label objects. For example, any of the following are allowed: - `['Gx','Gx']` : X gate on each of 2 layers - - `[Label('Gx'),Label('Gx')] : same as above + - `[Label('Gx'),Label('Gx')]` : same as above - `[('Gx',0),('Gy',0)]` : X then Y on qubit 0 (2 layers) - `[[('Gx',0),('Gx',1)],[('Gy',0),('Gy',1)]]` : parallel X then Y on qubits 0 & 1 @@ -665,7 +665,7 @@ def name(self): The name of this circuit. Note: the name is *not* a part of the hashed value. - The name is used to name the :class:`CircuitLabel` returned from :method:`to_label`. + The name is used to name the :class:`CircuitLabel` returned from :meth:`to_label`. """ return self._name @@ -1257,7 +1257,7 @@ def insert_idling_layers(self, insert_before, num_to_insert, lines=None): lines : str/int, slice, or list/tuple of strs/ints, optional Which lines should have new layers (blank circuit space) inserted into them. A single or multiple line-labels can be - specified, similarly as in :method:`extract_labels`. The default + specified, similarly as in :meth:`extract_labels`. The default value `None` stands for *all* lines. Returns @@ -1290,7 +1290,7 @@ def insert_idling_layers_inplace(self, insert_before, num_to_insert, lines=None) lines : str/int, slice, or list/tuple of strs/ints, optional Which lines should have new layers (blank circuit space) inserted into them. A single or multiple line-labels can be - specified, similarly as in :method:`extract_labels`. The default + specified, similarly as in :meth:`extract_labels`. The default value `None` stands for *all* lines. Returns @@ -1345,7 +1345,7 @@ def _append_idling_layers_inplace(self, num_to_insert, lines=None): lines : str/int, slice, or list/tuple of strs/ints, optional Which lines should have new layers (blank circuit space) inserted into them. A single or multiple line-labels can be - specified, similarly as in :method:`extract_labels`. The default + specified, similarly as in :meth:`extract_labels`. The default value `None` stands for *all* lines. Returns @@ -1647,11 +1647,11 @@ def clear_labels(self, layers=None, lines=None, clear_straddlers=False): ---------- layers : int, slice, or list/tuple of ints Defines the horizontal dimension of the region to clear. See - :method:`extract_labels` for details. + :meth:`extract_labels` for details. lines : str/int, slice, or list/tuple of strs/ints Defines the vertical dimension of the region to clear. See - :method:`extract_labels` for details. + :meth:`extract_labels` for details. clear_straddlers : bool, optional Whether or not gates which straddle cleared and non-cleared lines @@ -1673,7 +1673,7 @@ def delete_layers(self, layers=None): Parameters ---------- layers : int, slice, or list/tuple of ints - The layer index or indices to delete. See :method:`extract_labels` + The layer index or indices to delete. See :meth:`extract_labels` for details. Returns @@ -1700,7 +1700,7 @@ def delete_lines(self, lines, delete_straddlers=False): Parameters ---------- lines : str/int, slice, or list/tuple of strs/ints - The line label(s) to delete. See :method:`extract_labels` for details. + The line label(s) to delete. See :meth:`extract_labels` for details. delete_straddlers : bool, optional Whether or not gates which straddle deleted and non-deleted lines @@ -1781,7 +1781,7 @@ def from_pythonstr(cls, python_string, op_labels): Create a Circuit from a python string where each operation label is represented as a **single** character, starting with 'A' and continuing - down the alphabet. This performs the inverse of :method:`to_pythonstr`. + down the alphabet. This performs the inverse of :meth:`to_pythonstr`. Parameters ---------- @@ -1973,7 +1973,7 @@ def factorize_repetitions_inplace(self): """ Attempt to replace repeated sub-circuits with :class:`CircuitLabel` objects. - More or less the reverse of :method:`expand_subcircuits`, this method + More or less the reverse of :meth:`expand_subcircuits`, this method attempts to collapse repetitions of the same labels into single :class:`CircuitLabel` labels within this circuit. @@ -2135,7 +2135,7 @@ def append_circuit(self, circuit): Append a circuit to the end of this circuit, returning a copy. This circuit must satisfy the requirements of - :method:`insert_circuit()`. See that method for more details. + :meth:`insert_circuit()`. See that method for more details. Parameters ---------- @@ -2153,7 +2153,7 @@ def append_circuit_inplace(self, circuit): Append a circuit to the end of this circuit. This circuit must satisfy the requirements of - :method:`insert_circuit()`. See that method for more details. + :meth:`insert_circuit()`. See that method for more details. Parameters ---------- @@ -2171,7 +2171,7 @@ def prefix_circuit(self, circuit): Prefix a circuit to the beginning of this circuit, returning a copy. This circuit must satisfy the requirements of the - :method:`insert_circuit()`. See that method for more details. + :meth:`insert_circuit()`. See that method for more details. Parameters ---------- @@ -2189,7 +2189,7 @@ def prefix_circuit_inplace(self, circuit): Prefix a circuit to the beginning of this circuit. This circuit must satisfy the requirements of the - :method:`insert_circuit()`. See that method for more details. + :meth:`insert_circuit()`. See that method for more details. Parameters ---------- @@ -2938,8 +2938,8 @@ def _combine_one_q_gates_inplace(self, one_q_gate_relations): one_q_gate_relations[name1,name2] = name3, name1 -> name3 and name2 -> self.identity, the identity name in the circuit. Moreover, this is still implemented when there are self.identity gates between these 1-qubit gates, and it is implemented iteratively in the sense that if there - is a sequence of 1-qubit gates with names name1, name2, name3, ... and there are relations - for all of (name1,name2) -> name12, (name12,name3) -> name123 etc then the entire sequence of + is a sequence of 1-qubit gates with names `name1, name2, name3, ...` and there are relations + for all of `(name1,name2) -> name12`, `(name12,name3) -> name123` etc then the entire sequence of 1-qubit gates will be compressed into a single possibly non-idle 1-qubit gate followed by idle gates in place of the previous 1-qubit gates. Note that `None` can be used as `name3` to signify that the result is the identity (no gate labels). @@ -3023,7 +3023,7 @@ def _shift_gates_forward_inplace(self): Shift all gates forward (left) as far as is possible. This operation is performed without any knowledge of what any of the - gates are. One of the steps of :method:`depth_compression()`. + gates are. One of the steps of :meth:`depth_compression()`. Returns ------- @@ -3110,7 +3110,7 @@ def compress_depth_inplace(self, one_q_gate_relations=None, verbosity=0): one_q_gate_relations[name1,name2] = name3, name1 -> name3 and name2 -> self.identity, the identity name in the circuit. Moreover, this is still implemented when there are self.identity gates between these 1-qubit gates, and it is implemented iteratively in the sense that if there - is a sequence of 1-qubit gates with names name1, name2, name3, ... and there are relations + is a sequence of 1-qubit gates with names `name1, name2, name3, ...` and there are relations for all of (name1,name2) -> name12, (name12,name3) -> name123 etc then the entire sequence of 1-qubit gates will be compressed into a single possibly non-idle 1-qubit gate followed by idle gates in place of the previous 1-qubit gates. @@ -3244,7 +3244,7 @@ def num_layers(self): """ The number of circuit layers. - In simple circuits, this is the same as the depth (given by :method:`depth`). + In simple circuits, this is the same as the depth (given by :meth:`depth`). For circuits containing sub-circuit blocks, this gives the number of top-level layers in this circuit. @@ -3261,7 +3261,7 @@ def depth(self): This is the number of layers in simple circuits. For circuits containing sub-circuit blocks, this includes the full depth of these blocks. If you - just want the number of top-level layers, use :method:`num_layers`. + just want the number of top-level layers, use :meth:`num_layers`. Returns ------- @@ -3720,7 +3720,7 @@ def convert_to_quil(self, gatename_conversion : dict, optional A dictionary mapping gate names contained in this circuit to the corresponding gate names used in the rendered quil. If None, a standard set of conversions - is used (see :function:`standard_gatenames_quil_conversions`). + is used (see :func:`standard_gatenames_quil_conversions`). qubit_conversion : dict, optional If not None, a dictionary converting the qubit labels in the circuit to the @@ -4380,7 +4380,7 @@ def compress_op_label_tuple(circuit, min_len_to_compress=20, max_period_to_look_ @staticmethod def expand_op_label_tuple(compressed_op_labels): """ - Expand a compressed tuple (created with :method:`compress_op_label_tuple`) into a tuple of operation labels. + Expand a compressed tuple (created with :meth:`compress_op_label_tuple`) into a tuple of operation labels. Parameters ---------- diff --git a/pygsti/circuits/circuitconstruction.py b/pygsti/circuits/circuitconstruction.py index e961f90ba..35b8acd39 100644 --- a/pygsti/circuits/circuitconstruction.py +++ b/pygsti/circuits/circuitconstruction.py @@ -218,7 +218,7 @@ def repeat_and_truncate(x, n, assert_at_least_one_rep=False): def _repeat_remainder_for_truncation(x, n, assert_at_least_one_rep=False): """ - Returns the portion truncated by :function:`repeat_and_truncate`. + Returns the portion truncated by :func:`repeat_and_truncate`. Repeat the operation sequence x the fewest number of times such that the repeated sequence has length greater than or equal to n. Return the portion of this @@ -301,7 +301,7 @@ def list_all_circuits(op_labels, minlength, maxlength): def iter_all_circuits(op_labels, minlength, maxlength): """ - Iterative version of :function:`list_all_circuits` + Iterative version of :func:`list_all_circuits` Parameters ---------- @@ -343,7 +343,7 @@ def list_all_circuits_onelen(op_labels, length): def iter_all_circuits_onelen(op_labels, length): """ - Iterative version of :function:`list_all_circuits_onelen` + Iterative version of :func:`list_all_circuits_onelen` Parameters ---------- @@ -626,7 +626,7 @@ def translate_circuit(circuit, alias_dict): def translate_circuits(circuits, alias_dict): """ - Applies :function:`translate_circuit` to each element of `circuits`. + Applies :func:`translate_circuit` to each element of `circuits`. Creates a new list of Circuit objects from an existing one by replacing operation labels in `circuits` by (possibly multiple) new labels according @@ -780,7 +780,7 @@ def manipulate_circuit(circuit, rules, line_labels="auto"): def manipulate_circuits(circuits, rules, line_labels="auto"): """ - Applies :function:`manipulate_circuit` to each element of `circuits`. + Applies :func:`manipulate_circuit` to each element of `circuits`. This creates a new list of Circuit objects from an existing one by performing replacements according to `rules` (see :func:`manipulate_circuit`). @@ -814,7 +814,7 @@ def manipulate_circuits(circuits, rules, line_labels="auto"): def filter_circuits(circuits, sslbls_to_keep, new_sslbls=None, drop=False, idle=()): """ - Applies :function:`filter_circuit` to each element of `circuits`. + Applies :func:`filter_circuit` to each element of `circuits`. Removes any labels from `circuits` whose state-space labels are not entirely in `sslbls_to_keep`. If a gates label's state-space labels diff --git a/pygsti/circuits/circuitlist.py b/pygsti/circuits/circuitlist.py index 7a3f188ab..92aa3a56b 100644 --- a/pygsti/circuits/circuitlist.py +++ b/pygsti/circuits/circuitlist.py @@ -138,7 +138,7 @@ def apply_aliases(self): Returns ------- list - A list of :class:`Circuit`s. + A list of :class:`Circuit` objects. """ return _lt.apply_aliases_to_circuits(self._circuits, self.op_label_aliases) diff --git a/pygsti/circuits/circuitparser/__init__.py b/pygsti/circuits/circuitparser/__init__.py index 840c94dc9..72c87bfe4 100644 --- a/pygsti/circuits/circuitparser/__init__.py +++ b/pygsti/circuits/circuitparser/__init__.py @@ -10,7 +10,7 @@ """ Encapsulates a text parser for reading GST input files. -** Grammar ** +**Grammar** expop :: '^' multop :: '*' @@ -116,7 +116,10 @@ def make_label(s): @staticmethod def t_GATE(t): # noqa - r'G[a-z0-9_]+(;[a-zQ0-9_\./]+)*(:[a-zQ0-9_]+)*(![0-9\.]+)?' + """ + ``'G[a-z0-9_]+(;[a-zQ0-9_\./]+)*(:[a-zQ0-9_]+)*(![0-9\.]+)?'`` + """ + #Note: Q is only capital letter allowed in qubit label #Note: don't need to convert parts[1],etc, to integers (if possible) as Label automatically does this lbl = CircuitLexer.make_label(t.value) @@ -124,8 +127,10 @@ def t_GATE(t): return t @staticmethod - def t_INSTRMT(t): # noqa - r'I[a-z0-9_]+(![0-9\.]+)?' + def t_INSTRMT(t): # noqa + """ + ``'I[a-z0-9_]+(![0-9\.]+)?'`` + """ #Note: don't need to convert parts[1],etc, to integers (if possible) as Label automatically does this lbl = CircuitLexer.make_label(t.value) t.value = lbl, # make it a tuple @@ -133,7 +138,9 @@ def t_INSTRMT(t): @staticmethod def t_PREP(t): # noqa - r'rho[a-z0-9_]+(![0-9\.]+)?' + """ + ``'rho[a-z0-9_]+(![0-9\.]+)?'`` + """ #Note: don't need to convert parts[1],etc, to integers (if possible) as Label automatically does this lbl = CircuitLexer.make_label(t.value) t.value = lbl, # make it a tuple @@ -141,7 +148,9 @@ def t_PREP(t): @staticmethod def t_POVM(t): # noqa - r'M[a-z0-9_]+(![0-9\.]+)?' + """ + ``'M[a-z0-9_]+(![0-9\.]+)?'`` + """ #Note: don't need to convert parts[1],etc, to integers (if possible) as Label automatically does this lbl = CircuitLexer.make_label(t.value) t.value = lbl, # make it a tuple @@ -149,12 +158,16 @@ def t_POVM(t): @staticmethod def t_STRINGIND(t): # noqa - r'S(?=\s*\<)' + """ + ``'S(?=\s*\<)'`` + """ return t @staticmethod def t_REFLBL(t): # noqa - r'<\s*[a-zA-Z0-9_]+\s*>' + """ + ``'<\s*[a-zA-Z0-9_]+\s*>'`` + """ t.value = t.value[1:-1].strip() return t @@ -171,13 +184,17 @@ def t_REFLBL(t): @staticmethod def t_NOP(t): # noqa - r'\{\}' + """ + ``'\{\}'`` + """ t.value = tuple() return t @staticmethod def t_INTEGER(t): # noqa - r'\d+' + """ + ``'\d+'`` + """ t.value = int(t.value) return t diff --git a/pygsti/circuits/circuitstructure.py b/pygsti/circuits/circuitstructure.py index 6e8729109..6b3916b09 100644 --- a/pygsti/circuits/circuitstructure.py +++ b/pygsti/circuits/circuitstructure.py @@ -626,7 +626,7 @@ def cast(cls, circuits_or_structure): circuits_or_structure : list or CircuitList The object to convert. If a :class:`PlaquetteGridCircuitStructure`, then the object is simply returned. Lists of circuits (including - :class:`CircuitList`s are converted to structures having no + :class:`CircuitList` objects are converted to structures having no plaquettes. Returns diff --git a/pygsti/circuits/cloudcircuitconstruction.py b/pygsti/circuits/cloudcircuitconstruction.py index f7eb2e4b2..999259361 100644 --- a/pygsti/circuits/cloudcircuitconstruction.py +++ b/pygsti/circuits/cloudcircuitconstruction.py @@ -449,9 +449,9 @@ def _find_amped_polynomials_for_clifford_syntheticidle(qubit_filter, core_filter prep_lbl=None, effect_lbls=None, init_j=None, init_j_rank=None, wrt_params=None, verbosity=0): """ - A specialized version of :function:`_find_amped_polynomials_for_syntheticidle`. + A specialized version of :func:`_find_amped_polynomials_for_syntheticidle`. - Similar to :function:`_find_amped_polynomials_for_syntheticidle` but + Similar to :func:`_find_amped_polynomials_for_syntheticidle` but specialized to "qubit cloud" processing case used in higher-level functions and assumes that `idle_str` is composed of Clifford gates only which act on a "core" of qubits (given by `core_filter`). @@ -466,7 +466,7 @@ def _find_amped_polynomials_for_clifford_syntheticidle(qubit_filter, core_filter Because of these assumptions and pre-computed information, this function often takes considerably less time to run than - :function:`_find_amped_polynomials_for_syntheticidle`. + :func:`_find_amped_polynomials_for_syntheticidle`. Parameters ---------- @@ -564,7 +564,7 @@ def _find_amped_polynomials_for_clifford_syntheticidle(qubit_filter, core_filter parameters (at most the number requested). fidpair_lists : list The selected fiducial pairs, each in "gatename-fidpair-list" format. - See :function:`_find_amped_polynomials_for_syntheticidle` for details. + See :func:`_find_amped_polynomials_for_syntheticidle` for details. """ #Assert that model uses termorder:1, as doing L1-L0 to extract the "amplified" part @@ -816,7 +816,7 @@ def _get_fidpairs_needed_to_access_amped_polynomials(qubit_filter, core_filter, ------- fidpair_lists : list The selected fiducial pairs, each in "gatename-fidpair-list" format. - See :function:`_find_amped_polynomials_for_syntheticidle` for details. + See :func:`_find_amped_polynomials_for_syntheticidle` for details. """ printer = _VerbosityPrinter.create_printer(verbosity) polynomial_vindices_per_int = _Polynomial._vindices_per_int(model.num_params) @@ -1060,7 +1060,7 @@ def _tile_cloud_fidpairs(template_gatename_fidpair_lists, template_germpower, ma on qubits labeled 0 to `cloudsize-1`, and map those fiducial pairs into fiducial pairs for all the qubits by placing in parallel the pairs for as many non-overlapping clouds as possible. This function performs a - function analogous to :function:`_tile_idle_fidpairs` except here we tile + function analogous to :func:`_tile_idle_fidpairs` except here we tile fiducial pairs for non-idle operations. Parameters @@ -1405,7 +1405,7 @@ def _create_xycnot_cloudnoise_circuits(num_qubits, max_lengths, geometry, cnot_e algorithm : {"greedy","sequential"} The algorithm is used internall by - :function:`_find_amped_polynomials_for_syntheticidle`. You should leave this + :func:`_find_amped_polynomials_for_syntheticidle`. You should leave this as the default unless you know what you're doing. comm : mpi4py.MPI.Comm, optional @@ -1534,7 +1534,7 @@ def create_cloudnoise_circuits(processor_spec, max_lengths, single_q_fiducials, algorithm : {"greedy","sequential"} The algorithm is used internall by - :function:`_find_amped_polynomials_for_syntheticidle`. You should leave this + :func:`_find_amped_polynomials_for_syntheticidle`. You should leave this as the default unless you know what you're doing. idle_op_str : Circuit or tuple, optional @@ -2368,7 +2368,7 @@ def _check_kcoverage_template(rows, n, k, verbosity=0): ---------- rows : list A list of k-coverage words. The same as whas is returned by - :function:`create_kcoverage_template`. + :func:`create_kcoverage_template`. n : int The sequences length. @@ -2410,7 +2410,7 @@ def _filter_nqubit_circuittuple(sequence_tuples, sectors_to_keep, More specifically, this function removes any operation labels which act specifically on sectors not in `sectors_to_keep` (e.g. an idle gate acting on *all* sectors because it's `.sslbls` is None will *not* be removed -- - see :function:`filter_circuit` for details). Non-empty sequences for + see :func:`filter_circuit` for details). Non-empty sequences for which all labels are removed in the *germ* are not included in the output (as these correspond to an irrelevant germ). @@ -2508,12 +2508,12 @@ def _gatename_fidpair_list_to_fidpairs(gatename_fidpair_list): def _fidpairs_to_gatename_fidpair_list(fidpairs, num_qubits): """ - The inverse of :function:`_gatename_fidpair_list_to_fidpairs`. + The inverse of :func:`_gatename_fidpair_list_to_fidpairs`. Converts a list of `(prep,meas)` pairs of fiducial circuits (containing only single-qubit gates!) to the "gatename fiducial pair list" format, consisting of per-qubit lists of gate names (see docstring for - :function:`_gatename_fidpair_list_to_fidpairs` for mor details). + :func:`_gatename_fidpair_list_to_fidpairs` for mor details). Parameters ---------- diff --git a/pygsti/data/datacomparator.py b/pygsti/data/datacomparator.py index fbe0a56ac..83b70481a 100644 --- a/pygsti/data/datacomparator.py +++ b/pygsti/data/datacomparator.py @@ -296,11 +296,11 @@ class DataComparator(): This object stores the p-values and log-_likelihood ratio values from a consistency comparison between two or more data, and provides methods to: - - Perform a hypothesis test to decide which sequences contain statistically significant variation. - - Plot p-value histograms and log-_likelihood ratio box plots. - - Extract (1) the "statistically significant total variation distance" for a circuit, - (2) various other quantifications of the "amount" of context dependence, and (3) - the level of statistical significance at which any context dependence is detected. + * Perform a hypothesis test to decide which sequences contain statistically significant variation. + * Plot p-value histograms and log-_likelihood ratio box plots. + * Extract (1) the "statistically significant total variation distance" for a circuit, + (2) various other quantifications of the "amount" of context dependence, and (3) + the level of statistical significance at which any context dependence is detected. Parameters ---------- @@ -529,38 +529,33 @@ def run(self, significance=0.05, per_circuit_correction='Hochberg', the details of what the per-circuit comparison is). This can be any string that is an allowed value for the `localcorrections` input parameter of the HypothesisTest object. This includes: - - 'Hochberg'. This implements the Hochberg multi-test compensation technique. This - is strictly the best method available in the code, if you wish to control the FWER, - and it is the method described in "Probing context-dependent errors in quantum processors", - by Rudinger et al. - - - 'Holms'. This implements the Holms multi-test compensation technique. This - controls the FWER, and it results in a strictly less powerful test than the Hochberg - correction. - - - 'Bonferroni'. This implements the well-known Bonferroni multi-test compensation - technique. This controls the FWER, and it results in a strictly less powerful test than - the Hochberg correction. - - - 'none'. This implements no multi-test compensation for the per-sequence comparsions, - so they are all implemented at a "local" signifincance level that is altered from `significance` - only by the (inbuilt) Bonferroni-like correction between the "aggregate" test and the per-sequence - tests. This option does *not* control the FWER, and many sequences may be flagged up as context - dependent even if none are. - - -'Benjamini-Hochberg'. This implements the Benjamini-Hockberg multi-test compensation - technique. This does *not* control the FWER, and instead controls the "False Detection Rate" - (FDR); see, for example, https://en.wikipedia.org/wiki/False_discovery_rate. That means that - the global significance is maintained for the test of "Is there any context dependence?". I.e., - one or more tests will trigger when there is no context - dependence with at most a probability of `significance`. But, if one or more per-sequence tests - trigger then we are only guaranteed that (in expectation) no more than a fraction of - "local-signifiance" of the circuits that have been flagged up as context dependent actually aren't. - Here, "local-significance" is the significance at which the per-sequence tests are, together, - implemented, which is `significance`*(1 - `aggregate_test_weighting`) if the aggregate test doesn't - detect context dependence and `significance` if it does (as long as `pass_alpha` is True). This - method is strictly more powerful than the Hochberg correction, but it controls a different, weaker - quantity. + * 'Hochberg'. This implements the Hochberg multi-test compensation technique. This + is strictly the best method available in the code, if you wish to control the FWER, + and it is the method described in "Probing context-dependent errors in quantum processors", + by Rudinger et al. + * 'Holms'. This implements the Holms multi-test compensation technique. This + controls the FWER, and it results in a strictly less powerful test than the Hochberg + correction. + * 'Bonferroni'. This implements the well-known Bonferroni multi-test compensation + technique. This controls the FWER, and it results in a strictly less powerful test than + the Hochberg correction. + * 'none'. This implements no multi-test compensation for the per-sequence comparsions, + so they are all implemented at a "local" signifincance level that is altered from `significance` + only by the (inbuilt) Bonferroni-like correction between the "aggregate" test and the per-sequence + tests. This option does *not* control the FWER, and many sequences may be flagged up as context + dependent even if none are. + * 'Benjamini-Hochberg'. This implements the Benjamini-Hockberg multi-test compensation + technique. This does *not* control the FWER, and instead controls the "False Detection Rate" + (FDR); see, for example, https://en.wikipedia.org/wiki/False_discovery_rate. That means that + the global significance is maintained for the test of "Is there any context dependence?". I.e., + one or more tests will trigger when there is no context dependence with at most a probability of `significance`. + But, if one or more per-sequence tests trigger then we are only guaranteed that (in expectation) no + more than a fraction of "local-signifiance" of the circuits that have been flagged up as context dependent actually aren't. + Here, "local-significance" is the significance at which the per-sequence tests are, together, + implemented, which is `significance`*(1 - `aggregate_test_weighting`) if the aggregate test doesn't + detect context dependence and `significance` if it does (as long as `pass_alpha` is True). This + method is strictly more powerful than the Hochberg correction, but it controls a different, weaker + quantity. aggregate_test_weighting : float in [0,1], optional (default is 0.5) The weighting, in a generalized Bonferroni correction, to put on the "aggregate test", that jointly @@ -581,7 +576,7 @@ def run(self, significance=0.05, per_circuit_correction='Hochberg', verbosity : int, optional (default is 1) If > 0 then a summary of the results of the tests is printed to screen. Otherwise, the - various .get_...() methods need to be queried to obtain the results of the + various `.get_...()` methods need to be queried to obtain the results of the hypothesis tests. Returns diff --git a/pygsti/data/dataset.py b/pygsti/data/dataset.py index c5865c2fa..8d8658ec8 100644 --- a/pygsti/data/dataset.py +++ b/pygsti/data/dataset.py @@ -151,7 +151,7 @@ class _DataSetRow(object): ---------- outcomes : list Returns this row's sequence of outcome labels, one per "bin" of repetition - counts (returned by :method:`get_counts`). + counts (returned by :meth:`get_counts`). counts : dict a dictionary of per-outcome counts. @@ -890,7 +890,7 @@ class DataSet(_MongoSerializable): file_to_load_from : string or file object Specify this argument and no others to create a static DataSet by loading - from a file (just like using the load(...) function). + from a file (just like using the `load(...)` function). collision_action : {"aggregate","overwrite","keepseparate"} Specifies how duplicate circuits should be handled. "aggregate" @@ -1762,7 +1762,7 @@ def update_ol(self): """ Updates the internal outcome-label list in this dataset. - Call this after calling add_count_dict(...) or add_raw_series_data(...) + Call this after calling `add_count_dict(...)` or `add_raw_series_data(...)` with `update_olIndex=False`. Returns @@ -1839,7 +1839,7 @@ def aggregate_outcomes(self, label_merge_dict, record_zero_counts=True): if a two-qubit DataSet has outcome labels "00", "01", "10", and "11", and we want to ''aggregate out'' the second qubit, we could use label_merge_dict = {'0':['00','01'],'1':['10','11']}. When doing this, however, it may be better - to use :function:`filter_qubits` which also updates the circuits. + to use :func:`filter_qubits` which also updates the circuits. record_zero_counts : bool, optional Whether zero-counts are actually recorded (stored) in the returned @@ -2390,7 +2390,7 @@ def split_by_time(self, aggregate_to_time=None): aggregate_to_time : float, optional If not None, a single timestamp to give all the data in each returned data set, resulting in time-independent - `DataSet`s. If None, then the original timestamps are + `DataSet` objects. If None, then the original timestamps are preserved. Returns @@ -2469,12 +2469,10 @@ def process_times(self, process_times_array_fn): Manipulate this DataSet's timestamps according to `processor_fn`. For example, using, the folloing `process_times_array_fn` would change - the timestamps for each circuit to sequential integers. - - ``` - def process_times_array_fn(times): - return list(range(len(times))) - ``` + the timestamps for each circuit to sequential integers. :: + + def process_times_array_fn(times): + return list(range(len(times))) Parameters ---------- @@ -2936,7 +2934,7 @@ def read_binary(self, file_or_filename): """ Read a DataSet from a binary file, clearing any data is contained previously. - The file should have been created with :method:`DataSet.write_binary` + The file should have been created with :meth:`DataSet.write_binary` Parameters ---------- @@ -3067,7 +3065,7 @@ def add_std_nqubit_outcome_labels(self, nqubits): ---------- nqubits : int The number of qubits. For example, if equal to 3 the outcome labels - '000', '001', ... '111' are added. + '000', '001', `...` '111' are added. Returns ------- diff --git a/pygsti/data/datasetconstruction.py b/pygsti/data/datasetconstruction.py index 4dfabcea2..fa361eedf 100644 --- a/pygsti/data/datasetconstruction.py +++ b/pygsti/data/datasetconstruction.py @@ -298,7 +298,7 @@ def aggregate_dataset_outcomes(dataset, label_merge_dict, record_zero_counts=Tru if a two-qubit DataSet has outcome labels "00", "01", "10", and "11", and we want to ''aggregate out'' the second qubit, we could use label_merge_dict = {'0':['00','01'],'1':['10','11']}. When doing this, however, it may be better - to use :function:`filter_dataset` which also updates the circuits. + to use :func:`filter_dataset` which also updates the circuits. record_zero_counts : bool, optional Whether zero-counts are actually recorded (stored) in the returned @@ -360,7 +360,7 @@ def aggregate_dataset_outcomes(dataset, label_merge_dict, record_zero_counts=Tru def _create_qubit_merge_dict(num_qubits, qubits_to_keep): """ - Creates a dictionary appropriate for use with :function:`aggregate_dataset_outcomes`. + Creates a dictionary appropriate for use with :func:`aggregate_dataset_outcomes`. The returned dictionary instructs `aggregate_dataset_outcomes` to aggregate all but the specified `qubits_to_keep` when the outcome labels are those of @@ -386,7 +386,7 @@ def _create_qubit_merge_dict(num_qubits, qubits_to_keep): def _create_merge_dict(indices_to_keep, outcome_labels): """ - Creates a dictionary appropriate for use with :function:`aggregate_dataset_outcomes`. + Creates a dictionary appropriate for use with :func:`aggregate_dataset_outcomes`. Each element of `outcome_labels` should be a n-character string (or a 1-tuple of such a string). The returned dictionary's keys will be all the diff --git a/pygsti/data/hypothesistest.py b/pygsti/data/hypothesistest.py index b0880238b..87046853a 100644 --- a/pygsti/data/hypothesistest.py +++ b/pygsti/data/hypothesistest.py @@ -27,9 +27,9 @@ class HypothesisTest(object): Specifies the set of null hypotheses. This should be a list containing elements that are either - - A "label" for a hypothesis, which is just some hashable object such + * A "label" for a hypothesis, which is just some hashable object such as a string. - - A tuple of "nested hypotheses", which are also just labels for some + * A tuple of "nested hypotheses", which are also just labels for some null hypotheses. The elements of this list are then subject to multi-test correction of the "closed test @@ -74,35 +74,31 @@ class HypothesisTest(object): the "local" significance is for each of the "nested hypotheses" is multi-test corrected using this procedure. Must be one of: - - 'Holms'. This implements the Holms multi-test compensation technique. This - controls the FWER for each set of nested hypotheses (and so controls the global FWER, in - combination with the "top level" corrections). This requires no assumptions about the - null hypotheses. - - - 'Bonferroni'. This implements the well-known Bonferroni multi-test compensation - technique. This is strictly less powerful test than the Hochberg correction. - - Note that neither 'Holms' nor 'Bonferronni' gained any advantage from being implemented - using "nesting", as if all the hypotheses were put into the "top level" the same corrections - could be achieved. - - - 'Hochberg'. This implements the Hockberg multi-test compensation technique. It is - not a "closed test procedure", so it is not something that can be implemented in the - top level. To be provably valid, it is necessary for the p-values of the nested - hypotheses to be non-negatively dependent. When that is true, this is strictly better - than the Holms and Bonferroni corrections whilst still controlling the FWER. - - - 'none'. This implements no multi-test compensation. This option does *not* control the - FWER of the nested hypotheses. So it will generally not control the global FWER as specified. - - -'Benjamini-Hochberg'. This implements the Benjamini-Hockberg multi-test compensation - technique. This does *not* control the FWER of the nested hypotheses, and instead controls - the "False Detection Rate" (FDR); see wikipedia. That means that the global significance is - maintained in the sense that the probability of one or more tests triggering is at most `significance`. - But, if one or more tests are triggered in a particular nested hypothesis test we are only guaranteed - that (in expectation) no more than a fraction of "local signifiance" of tests are false alarms.This - method is strictly more powerful than the Hochberg correction, but it controls a different, weaker - quantity. + * 'Holms'. This implements the Holms multi-test compensation technique. This + controls the FWER for each set of nested hypotheses (and so controls the global FWER, in + combination with the "top level" corrections). This requires no assumptions about the + null hypotheses. + * 'Bonferroni'. This implements the well-known Bonferroni multi-test compensation technique. + This is strictly less powerful test than the Hochberg correction. Note that neither + 'Holms' nor 'Bonferronni' gained any advantage from being implemented + using "nesting", as if all the hypotheses were put into the "top level" the same corrections + could be achieved. + * 'Hochberg'. This implements the Hockberg multi-test compensation technique. It is + not a "closed test procedure", so it is not something that can be implemented in the + top level. To be provably valid, it is necessary for the p-values of the nested + hypotheses to be non-negatively dependent. When that is true, this is strictly better + than the Holms and Bonferroni corrections whilst still controlling the FWER. + * 'none'. This implements no multi-test compensation. This option does *not* control the + FWER of the nested hypotheses. So it will generally not control the global FWER as specified. + * 'Benjamini-Hochberg'. This implements the Benjamini-Hockberg multi-test compensation + technique. This does *not* control the FWER of the nested hypotheses, and instead controls + the "False Detection Rate" (FDR); see wikipedia. That means that the global significance is + maintained in the sense that the probability of one or more tests triggering is at most `significance`. + But, if one or more tests are triggered in a particular nested hypothesis test we are only guaranteed + that (in expectation) no more than a fraction of "local signifiance" of tests are false alarms.This + method is strictly more powerful than the Hochberg correction, but it controls a different, weaker + quantity. + """ def __init__(self, hypotheses, significance=0.05, weighting='equal', @@ -118,9 +114,9 @@ def __init__(self, hypotheses, significance=0.05, weighting='equal', Specifies the set of null hypotheses. This should be a list containing elements that are either - - A "label" for a hypothesis, which is just some hashable object such + * A "label" for a hypothesis, which is just some hashable object such as a string. - - A tuple of "nested hypotheses", which are also just labels for some + * A tuple of "nested hypotheses", which are also just labels for some null hypotheses. The elements of this list are then subject to multi-test correction of the "closed test @@ -165,35 +161,30 @@ def __init__(self, hypotheses, significance=0.05, weighting='equal', the "local" significance is for each of the "nested hypotheses" is multi-test corrected using this procedure. Must be one of: - - 'Holms'. This implements the Holms multi-test compensation technique. This - controls the FWER for each set of nested hypotheses (and so controls the global FWER, in - combination with the "top level" corrections). This requires no assumptions about the - null hypotheses. - - - 'Bonferroni'. This implements the well-known Bonferroni multi-test compensation - technique. This is strictly less powerful test than the Hochberg correction. - - Note that neither 'Holms' nor 'Bonferronni' gained any advantage from being implemented - using "nesting", as if all the hypotheses were put into the "top level" the same corrections - could be achieved. - - - 'Hochberg'. This implements the Hockberg multi-test compensation technique. It is - not a "closed test procedure", so it is not something that can be implemented in the - top level. To be provably valid, it is necessary for the p-values of the nested - hypotheses to be non-negatively dependent. When that is true, this is strictly better - than the Holms and Bonferroni corrections whilst still controlling the FWER. - - - 'none'. This implements no multi-test compensation. This option does *not* control the - FWER of the nested hypotheses. So it will generally not control the global FWER as specified. - - -'Benjamini-Hochberg'. This implements the Benjamini-Hockberg multi-test compensation - technique. This does *not* control the FWER of the nested hypotheses, and instead controls - the "False Detection Rate" (FDR); see wikipedia. That means that the global significance is - maintained in the sense that the probability of one or more tests triggering is at most `significance`. - But, if one or more tests are triggered in a particular nested hypothesis test we are only guaranteed - that (in expectation) no more than a fraction of "local signifiance" of tests are false alarms.This - method is strictly more powerful than the Hochberg correction, but it controls a different, weaker - quantity. + * 'Holms'. This implements the Holms multi-test compensation technique. This + controls the FWER for each set of nested hypotheses (and so controls the global FWER, in + combination with the "top level" corrections). This requires no assumptions about the + null hypotheses. + * 'Bonferroni'. This implements the well-known Bonferroni multi-test compensation + technique. This is strictly less powerful test than the Hochberg correction. + Note that neither 'Holms' nor 'Bonferronni' gained any advantage from being implemented + using "nesting", as if all the hypotheses were put into the "top level" the same corrections + could be achieved. + * 'Hochberg'. This implements the Hockberg multi-test compensation technique. It is + not a "closed test procedure", so it is not something that can be implemented in the + top level. To be provably valid, it is necessary for the p-values of the nested + hypotheses to be non-negatively dependent. When that is true, this is strictly better + than the Holms and Bonferroni corrections whilst still controlling the FWER. + * 'none'. This implements no multi-test compensation. This option does *not* control the + FWER of the nested hypotheses. So it will generally not control the global FWER as specified. + * 'Benjamini-Hochberg'. This implements the Benjamini-Hockberg multi-test compensation + technique. This does *not* control the FWER of the nested hypotheses, and instead controls + the "False Detection Rate" (FDR); see wikipedia. That means that the global significance is + maintained in the sense that the probability of one or more tests triggering is at most `significance`. + But, if one or more tests are triggered in a particular nested hypothesis test we are only guaranteed + that (in expectation) no more than a fraction of "local signifiance" of tests are false alarms.This + method is strictly more powerful than the Hochberg correction, but it controls a different, weaker + quantity. Returns ------- diff --git a/pygsti/data/multidataset.py b/pygsti/data/multidataset.py index 91efdea85..418dcde4c 100644 --- a/pygsti/data/multidataset.py +++ b/pygsti/data/multidataset.py @@ -732,7 +732,7 @@ def read_binary(self, file_or_filename): """ Read a MultiDataSet from a file, clearing any data is contained previously. - The file should have been created with :method:`MultiDataSet.write_binary` + The file should have been created with :meth:`MultiDataSet.write_binary` Parameters ---------- diff --git a/pygsti/drivers/bootstrap.py b/pygsti/drivers/bootstrap.py index 978d99200..ceb71eca0 100644 --- a/pygsti/drivers/bootstrap.py +++ b/pygsti/drivers/bootstrap.py @@ -117,7 +117,7 @@ def create_bootstrap_models(num_models, input_data_set, generation_method, Models are created from a single DataSet (and possibly Model) and are typically used for generating bootstrapped error bars. The resulting Models are obtained by performing MLGST on data generated by repeatedly calling - :function:`create_bootstrap_dataset` with consecutive integer seed values. + :func:`create_bootstrap_dataset` with consecutive integer seed values. Parameters ---------- diff --git a/pygsti/drivers/longsequence.py b/pygsti/drivers/longsequence.py index b0af7c0d3..423d08061 100644 --- a/pygsti/drivers/longsequence.py +++ b/pygsti/drivers/longsequence.py @@ -179,12 +179,12 @@ def run_linear_gst(data_filename_or_set, target_model_filename_or_object, """ Perform Linear Gate Set Tomography (LGST). - This function differs from the lower level :function:`run_lgst` function + This function differs from the lower level :func:`run_lgst` function in that it may perform a post-LGST gauge optimization and this routine returns a :class:`Results` object containing the LGST estimate. Overall, this is a high-level driver routine which can be used similarly - to :function:`run_long_sequence_gst` whereas `run_lgst` is a low-level + to :func:`run_long_sequence_gst` whereas `run_lgst` is a low-level routine used when building your own algorithms. Parameters @@ -220,7 +220,7 @@ def run_linear_gst(data_filename_or_set, target_model_filename_or_object, advanced_options : dict, optional Specifies advanced options most of which deal with numerical details of the objective function or expert-level functionality. See - :function:`run_long_sequence_gst`. + :func:`run_long_sequence_gst`. comm : mpi4py.MPI.Comm, optional When not ``None``, an MPI communicator for distributing the computation @@ -378,8 +378,7 @@ def run_long_sequence_gst(data_filename_or_set, target_model_filename_or_object, - XX op_label_aliases = dict (default = None) - always_perform_mle = bool (default = False) - only_perform_mle = bool (default = False) - - XX truncScheme = "whole germ powers" (default) or "truncated germ powers" - or "length as exponent" + - XX truncScheme = "whole germ powers" (default) or "truncated germ powers" or "length as exponent" - appendTo = Results (default = None) - estimateLabel = str (default = "default") - XX missingDataAction = {'drop','raise'} (default = 'drop') @@ -408,8 +407,7 @@ def run_long_sequence_gst(data_filename_or_set, target_model_filename_or_object, - 2 -- show summary details about each individual iteration - 3 -- also shows outer iterations of LM algorithm - 4 -- also shows inner iterations of LM algorithm - - 5 -- also shows detailed info from within jacobian - and objective function calls. + - 5 -- also shows detailed info from within jacobian and objective function calls. Returns ------- @@ -530,8 +528,7 @@ def run_long_sequence_gst_base(data_filename_or_set, target_model_filename_or_ob - 2 -- show summary details about each individual iteration - 3 -- also shows outer iterations of LM algorithm - 4 -- also shows inner iterations of LM algorithm - - 5 -- also shows detailed info from within jacobian - and objective function calls. + - 5 -- also shows detailed info from within jacobian and objective function calls. Returns ------- diff --git a/pygsti/evotypes/densitymx_slow/effectreps.py b/pygsti/evotypes/densitymx_slow/effectreps.py index 7109e886d..37d0d6599 100644 --- a/pygsti/evotypes/densitymx_slow/effectreps.py +++ b/pygsti/evotypes/densitymx_slow/effectreps.py @@ -96,7 +96,7 @@ def __init__(self, povm_factors, effect_labels, state_space): self.factor_dims = factordims self.max_factor_dim = max_factor_dim # Unused state_space = _StateSpace.cast(state_space) - assert(_np.product(factordims) == state_space.dim) + assert(_np.prod(factordims) == state_space.dim) super(EffectRepTensorProduct, self).__init__(state_space) self.factor_effects_have_changed() diff --git a/pygsti/evotypes/densitymx_slow/opreps.py b/pygsti/evotypes/densitymx_slow/opreps.py index 08d79e825..8feb14d95 100644 --- a/pygsti/evotypes/densitymx_slow/opreps.py +++ b/pygsti/evotypes/densitymx_slow/opreps.py @@ -329,7 +329,7 @@ def __init__(self, state_space, target_labels, embedded_rep): # final map just acts as identity w.r.t. labelIndices = [tensorProdBlkLabels.index(label) for label in target_labels] actionInds = _np.array(labelIndices, _np.int64) - assert(_np.product([numBasisEls[i] for i in actionInds]) == embedded_rep.dim), \ + assert(_np.prod([numBasisEls[i] for i in actionInds]) == embedded_rep.dim), \ "Embedded operation has dimension (%d) inconsistent with the given target labels (%s)" % ( embedded_rep.dim, str(target_labels)) @@ -337,7 +337,7 @@ def __init__(self, state_space, target_labels, embedded_rep): iActiveBlock = iTensorProdBlk nComponents = len(state_space.tensor_product_block_labels(iActiveBlock)) #embeddedDim = embedded_rep.dim - blocksizes = _np.array([_np.product(state_space.tensor_product_block_dimensions(k)) + blocksizes = _np.array([_np.prod(state_space.tensor_product_block_dimensions(k)) for k in range(nBlocks)], _np.int64) self.embedded_rep = embedded_rep diff --git a/pygsti/evotypes/densitymx_slow/statereps.py b/pygsti/evotypes/densitymx_slow/statereps.py index 6bc230a2a..b5d3e4e8a 100644 --- a/pygsti/evotypes/densitymx_slow/statereps.py +++ b/pygsti/evotypes/densitymx_slow/statereps.py @@ -143,7 +143,7 @@ def __reduce__(self): class StateRepTensorProduct(StateRep): def __init__(self, factor_state_reps, state_space): self.factor_reps = factor_state_reps - dim = _np.product([fct.dim for fct in self.factor_reps]) + dim = _np.prod([fct.dim for fct in self.factor_reps]) super(StateRepTensorProduct, self).__init__(_np.zeros(dim, 'd'), state_space) self.reps_have_changed() diff --git a/pygsti/evotypes/stabilizer_slow/stabilizer.py b/pygsti/evotypes/stabilizer_slow/stabilizer.py index 596172809..75f89ad38 100644 --- a/pygsti/evotypes/stabilizer_slow/stabilizer.py +++ b/pygsti/evotypes/stabilizer_slow/stabilizer.py @@ -231,7 +231,7 @@ def push_view(self, qubit_filter): def pop_view(self): """ - Removes the last-applied (via :method:`push_view`) view filter. + Removes the last-applied (via :meth:`push_view`) view filter. Returns ------- @@ -655,7 +655,7 @@ def clifford_update(self, smatrix, svector, u_mx, qubit_filter=None): Update this stabilizer frame by the action of a Clifford operation. The Clifford operation is given in the usual symplectic representation. - If there are any active views (from calling :method:`push_view`) and/or + If there are any active views (from calling :meth:`push_view`) and/or if `qubit_filter` is not None, then `smatrix`, `svector`, and `u_mx` should be sized for just the number of qubits in the current view. @@ -676,7 +676,7 @@ def clifford_update(self, smatrix, svector, u_mx, qubit_filter=None): qubit_filter : list, optional An additional view filter to apply just for this function call (i.e. - it is not stored on a stack as it is for :method:`push_view`. + it is not stored on a stack as it is for :meth:`push_view`. Returns ------- diff --git a/pygsti/evotypes/stabilizer_slow/statereps.py b/pygsti/evotypes/stabilizer_slow/statereps.py index bb070fccd..b47417f08 100644 --- a/pygsti/evotypes/stabilizer_slow/statereps.py +++ b/pygsti/evotypes/stabilizer_slow/statereps.py @@ -104,7 +104,7 @@ class StateRepTensorProduct(StateRep): def __init__(self, factor_state_reps, state_space): self.factor_reps = factor_state_reps n = sum([sf.nqubits for sf in self.factor_reps]) # total number of qubits - np = int(_np.product([len(sf.pvectors) for sf in self.factor_reps])) + np = int(_np.prod([len(sf.pvectors) for sf in self.factor_reps])) super(StateRepTensorProduct, self).__init__(_np.zeros((2 * n, 2 * n), _np.int64), _np.zeros((np, 2 * n), _np.int64), diff --git a/pygsti/evotypes/statevec_slow/effectreps.py b/pygsti/evotypes/statevec_slow/effectreps.py index 8c391cc7b..d1a14ebc6 100644 --- a/pygsti/evotypes/statevec_slow/effectreps.py +++ b/pygsti/evotypes/statevec_slow/effectreps.py @@ -98,7 +98,7 @@ def __init__(self, povm_factors, effect_labels, state_space): factordims = _np.ascontiguousarray( _np.array([fct.state_space.udim for fct in povm_factors], _np.int64)) - #dim = _np.product(factordims) + #dim = _np.prod(factordims) self.povm_factors = povm_factors self.effect_labels = effect_labels self.kron_array = kron_array @@ -106,7 +106,7 @@ def __init__(self, povm_factors, effect_labels, state_space): self.nfactors = len(self.povm_factors) self.max_factor_dim = max_factor_dim # Unused state_space = _StateSpace.cast(state_space) - assert(_np.product(factordims) == state_space.udim) + assert(_np.prod(factordims) == state_space.udim) super(EffectRepTensorProduct, self).__init__(state_space) self.factor_effects_have_changed() diff --git a/pygsti/evotypes/statevec_slow/opreps.py b/pygsti/evotypes/statevec_slow/opreps.py index c66a66aa8..b60fadd51 100644 --- a/pygsti/evotypes/statevec_slow/opreps.py +++ b/pygsti/evotypes/statevec_slow/opreps.py @@ -203,7 +203,7 @@ def __init__(self, state_space, target_labels, embedded_rep): # final map just acts as identity w.r.t. labelIndices = [tensorProdBlkLabels.index(label) for label in target_labels] actionInds = _np.array(labelIndices, _np.int64) - assert(_np.product([numBasisEls[i] for i in actionInds]) == embedded_rep.dim), \ + assert(_np.prod([numBasisEls[i] for i in actionInds]) == embedded_rep.dim), \ "Embedded operation has dimension (%d) inconsistent with the given target labels (%s)" % ( embedded_rep.dim, str(target_labels)) @@ -212,7 +212,7 @@ def __init__(self, state_space, target_labels, embedded_rep): iActiveBlock = iTensorProdBlk nComponents = len(state_space.tensor_product_block_labels(iActiveBlock)) embeddedDim = embedded_rep.dim # a *unitary* dim - see .dim property above - blocksizes = _np.array([_np.product(state_space.tensor_product_block_udimensions(k)) + blocksizes = _np.array([_np.prod(state_space.tensor_product_block_udimensions(k)) for k in range(nBlocks)], _np.int64) self.target_labels = target_labels diff --git a/pygsti/evotypes/statevec_slow/statereps.py b/pygsti/evotypes/statevec_slow/statereps.py index b1b29bdf1..7b5682733 100644 --- a/pygsti/evotypes/statevec_slow/statereps.py +++ b/pygsti/evotypes/statevec_slow/statereps.py @@ -123,7 +123,7 @@ def actionable_staterep(self): class StateRepTensorProduct(StateRep): def __init__(self, factor_state_reps, state_space): self.factor_reps = factor_state_reps - dim = _np.product([fct.dim for fct in self.factor_reps]) + dim = _np.prod([fct.dim for fct in self.factor_reps]) # FUTURE TODO: below compute a tensorprod basis instead of punting and passing `None` super(StateRepTensorProduct, self).__init__(_np.zeros(dim, complex), state_space, None) self.reps_have_changed() diff --git a/pygsti/extras/drift/probtrajectory.py b/pygsti/extras/drift/probtrajectory.py index 84afd3ce7..76ac4fcb1 100644 --- a/pygsti/extras/drift/probtrajectory.py +++ b/pygsti/extras/drift/probtrajectory.py @@ -66,9 +66,9 @@ def copy(self): def basisfunction(self, i, times): """ - The ith basis function of the model, evaluated at the times in `times. + The ith basis function of the model, evaluated at the times in `times`. - *** Defined in a derived class *** + **Defined in a derived class** Parameters ---------- @@ -266,7 +266,7 @@ def __init__(self, outcomes, hyperparameters, parameters, starttime, timestep, n numtimes : int The number of data collection times defining the DCT basis functions (defines the total number - of DCT basis functions: the hyperparameters list is then a subset of this [0,1,2,...,numtimes-1]). + of DCT basis functions: the hyperparameters list is then a subset of this `[0,1,2,...,numtimes-1]`). This is typically set to the number of data collection times for the circuit that this probability trajectory is being defined for. diff --git a/pygsti/extras/drift/signal.py b/pygsti/extras/drift/signal.py index d0f59e069..961f0a5f8 100644 --- a/pygsti/extras/drift/signal.py +++ b/pygsti/extras/drift/signal.py @@ -33,7 +33,7 @@ def spectrum(x, times=None, null_hypothesis=None, counts=1, frequencies='auto', where the arithmetic is element-wise, and `null_hypothesis` is a vector in (0,1). If `null_hypothesis` is None it is set to the mean of x. If that mean is 0 or 1 then - the power spectrum returned is (0,1,1,1,...). + the power spectrum returned is `(0,1,1,1,...)`. Parameters ---------- @@ -295,7 +295,7 @@ def lsp(x, times, frequencies='auto', null_hypothesis=None, counts=1): Performs a Lomb-Scargle periodogram (lsp) on the input data, returning powers and frequencies. - *** This function uses astropy, which is not a required dependency for pyGSTi *** + **This function uses astropy, which is not a required dependency for pyGSTi** Parameters ---------- @@ -588,9 +588,9 @@ def sparsity(p): """ Returns the Hoyer sparsity index of the input vector p. This is defined to be: - HoyerIndex = (sqrt(l) - (|p|_1 / |p|_2)) / (sqrt(l) - 1) + `HoyerIndex = (sqrt(l) - (|p|_1 / |p|_2)) / (sqrt(l) - 1)` - where l is the length of the vector and |.|_1 and |.|_2 are the 1-norm and 2-norm of the vector, resp. + where l is the length of the vector and `|.|_1` and `|.|_2` are the 1-norm and 2-norm of the vector, resp. """ n = len(p) @@ -717,7 +717,7 @@ def generate_flat_signal(power, nummodes, n, candidatefreqs=None, base=0.5, meth The number of sample times that the probability trajectory is being created for. candidatefreqs : list, optional - A list containing a subset of 1,2,...,n-1. If not None, then all frequencies are included. + A list containing a subset of `1,2,...,n-1`. If not None, then all frequencies are included. base : float in (0,1), optional diff --git a/pygsti/extras/drift/stabilityanalyzer.py b/pygsti/extras/drift/stabilityanalyzer.py index 21bf0ffc5..d856c8157 100644 --- a/pygsti/extras/drift/stabilityanalyzer.py +++ b/pygsti/extras/drift/stabilityanalyzer.py @@ -247,9 +247,9 @@ def __init__(self, ds, transform='auto', marginalize='auto', mergeoutcomes=None, Initialize a StabilityAnalyzer, by inputing time-series data and some information on how it should be processed. - *** Some of the nominally allowed values for the inputs are not yet functional. For + Some of the nominally allowed values for the inputs are not yet functional. For entirely non-functional code an assert() will flag up the input as not yet allowed, and for untested - and perhaps unreliable code a warning will be flagged but the code will still run *** + and perhaps unreliable code a warning will be flagged but the code will still run. Parameters ---------- @@ -259,24 +259,24 @@ def __init__(self, ds, transform='auto', marginalize='auto', mergeoutcomes=None, transform : str, optional The type of transform to use in the spectral analysis. Options are: - - 'auto': An attempt is made to choose the best transform given the "meta-data" of the data, - e.g., the variability in the time-step between data points. For beginners, - 'auto' is the best option. If you are familiar with the underlying methods, the - meta-data of the input, and the relative merits of the different transform, then - it is probably better to choose this yourself -- as the auto-selection is not hugely - sophisticated. + * 'auto': An attempt is made to choose the best transform given the "meta-data" of the data, + e.g., the variability in the time-step between data points. For beginners, + 'auto' is the best option. If you are familiar with the underlying methods, the + meta-data of the input, and the relative merits of the different transform, then + it is probably better to choose this yourself -- as the auto-selection is not hugely + sophisticated. - - 'dct' : The Type-II Discrete Cosine Transform (with an orthogonal normalization). This is - the only tested option, and it is our recommended option when the data is - approximately equally-spaced, i.e., the time-step between each "click" for each - circuit is almost a constant. (the DCT transform implicitly assumes that this - time-step is exactly constant) + * 'dct' : The Type-II Discrete Cosine Transform (with an orthogonal normalization). This is + the only tested option, and it is our recommended option when the data is + approximately equally-spaced, i.e., the time-step between each "click" for each + circuit is almost a constant. (the DCT transform implicitly assumes that this + time-step is exactly constant) - - 'dft' : The discrete Fourier transform (with an orthogonal normalization). *** This is an - experimental feature, and the results are unreliable with this transform *** + * 'dft' : The discrete Fourier transform (with an orthogonal normalization). **This is an** + **experimental feature, and the results are unreliable with this transform** - - 'lsp' : The Lomb-Scargle periodogram. *** This is an experimental feature, and the code is - untested with this transform *** + * 'lsp' : The Lomb-Scargle periodogram. **This is an experimental feature, and the code is** + **untested with this transform** marginalize : str or bool, optional True, False or 'auto'. Whether or not to marginalize multi-qubit data, to look for instability diff --git a/pygsti/extras/drift/trmodel.py b/pygsti/extras/drift/trmodel.py index a5deb9f3c..604a7a05c 100644 --- a/pygsti/extras/drift/trmodel.py +++ b/pygsti/extras/drift/trmodel.py @@ -67,7 +67,7 @@ def parameters_copy(self): def probabilities(self, circuit, times): """ - *** Specified in each derive class *** + **Specified in each derived class** Specifying this method is the core to building a time-resolved model. This method should return the probabiilties for each outcome, for the input circuit at the specified times. diff --git a/pygsti/extras/ibmq/ibmqcore.py b/pygsti/extras/ibmq/ibmqcore.py index d31459034..4a783d01a 100644 --- a/pygsti/extras/ibmq/ibmqcore.py +++ b/pygsti/extras/ibmq/ibmqcore.py @@ -87,8 +87,8 @@ def __init__(self, edesign, pspec, remove_duplicates=True, randomized_order=True A QubitProcessorSpec that represents the IBM Q device being used. This can be created using the extras.devices.create_processor_spec(). The ProcessorSpecs qubit ordering *must* correspond to that of the IBM device (which will be the case if you create it using that function). - I.e., pspecs qubits should be labelled Q0 through Qn-1 and the labelling of the qubits - should agree with IBM's labelling. + I.e., pspecs qubits should be labelled Q0 through Qn-1 and the labelling of the qubits + should agree with IBM's labelling. remove_duplicates: bool, optional If true, each distinct circuit in `edesign` is run only once. If false, if a circuit is diff --git a/pygsti/extras/idletomography/idtcore.py b/pygsti/extras/idletomography/idtcore.py index 5ff1340c0..41358fab8 100644 --- a/pygsti/extras/idletomography/idtcore.py +++ b/pygsti/extras/idletomography/idtcore.py @@ -428,7 +428,7 @@ def preferred_signs_from_paulidict(pauli_basis_dict): tuple A 3-tuple of elements in {"+", "-"}, exactly the format expected by `preferred_*_basis_signs` arguments of - :function:`idle_tomography_fidpairs`. + :func:`idle_tomography_fidpairs`. """ preferred_signs = () for let in ('X', 'Y', 'Z'): @@ -469,7 +469,7 @@ def fidpairs_to_pauli_fidpairs(fidpairs_list, pauli_basis_dicts, nqubits): pauli_basis_dicts : tuple A `(prepPauliBasisDict,measPauliBasisDict)` tuple of dictionaries specifying the way to prepare and measure in Pauli bases. See - :function:`preferred_signs_from_paulidict` for details on each + :func:`preferred_signs_from_paulidict` for details on each dictionary's format. nqubits : int @@ -679,7 +679,7 @@ def make_idle_tomography_list(nqubits, max_lengths, pauli_basis_dicts, maxweight pauli_basis_dicts : tuple A `(prepPauliBasisDict,measPauliBasisDict)` tuple of dictionaries specifying the way to prepare and measure in Pauli bases. See - :function:`preferred_signs_from_paulidict` for details on each + :func:`preferred_signs_from_paulidict` for details on each dictionary's format. maxweight : int, optional @@ -706,7 +706,7 @@ def make_idle_tomography_list(nqubits, max_lengths, pauli_basis_dicts, maxweight or measuring in the X, Y, and Z bases is preferable. Usually one orientation if preferred because it's easier to achieve using the native model. Additionally, the special (and default) value "auto" - may be used, in which case :function:`preferred_signs_from_paulidict` + may be used, in which case :func:`preferred_signs_from_paulidict` is used to choose preferred signs based on `pauli_basis_dicts`. Returns @@ -764,7 +764,7 @@ def make_idle_tomography_lists(nqubits, max_lengths, pauli_basis_dicts, maxweigh pauli_basis_dicts : tuple A `(prepPauliBasisDict,measPauliBasisDict)` tuple of dictionaries specifying the way to prepare and measure in Pauli bases. See - :function:`preferred_signs_from_paulidict` for details on each + :func:`preferred_signs_from_paulidict` for details on each dictionary's format. maxweight : int, optional @@ -791,7 +791,7 @@ def make_idle_tomography_lists(nqubits, max_lengths, pauli_basis_dicts, maxweigh or measuring in the X, Y, and Z bases is preferable. Usually one orientation if preferred because it's easier to achieve using the native model. Additionally, the special (and default) value "auto" - may be used, in which case :function:`preferred_signs_from_paulidict` + may be used, in which case :func:`preferred_signs_from_paulidict` is used to choose preferred signs based on `pauli_basis_dicts`. Returns @@ -848,7 +848,7 @@ def compute_observed_samebasis_err_rate(dataset, pauli_fidpair, pauli_basis_dict pauli_basis_dicts : tuple A `(prepPauliBasisDict,measPauliBasisDict)` tuple of dictionaries specifying the way to prepare and measure in Pauli bases. See - :function:`preferred_signs_from_paulidict` for details on each + :func:`preferred_signs_from_paulidict` for details on each dictionary's format. idle_string : Circuit @@ -937,7 +937,7 @@ def compute_observed_diffbasis_err_rate(dataset, pauli_fidpair, pauli_basis_dict pauli_basis_dicts : tuple A `(prepPauliBasisDict,measPauliBasisDict)` tuple of dictionaries specifying the way to prepare and measure in Pauli bases. See - :function:`preferred_signs_from_paulidict` for details on each + :func:`preferred_signs_from_paulidict` for details on each dictionary's format. idle_string : Circuit @@ -1062,7 +1062,7 @@ def do_idle_tomography(nqubits, dataset, max_lengths, pauli_basis_dicts, maxweig pauli_basis_dicts : tuple A `(prepPauliBasisDict,measPauliBasisDict)` tuple of dictionaries specifying the way to prepare and measure in Pauli bases. See - :function:`preferred_signs_from_paulidict` for details on each + :func:`preferred_signs_from_paulidict` for details on each dictionary's format. maxweight : int, optional @@ -1088,7 +1088,7 @@ def do_idle_tomography(nqubits, dataset, max_lengths, pauli_basis_dicts, maxweig - "preferred_meas_basis_signs" : 3-tuple of "+"/"-" or default="auto" - "pauli_fidpairs": alternate list of pauli fiducial pairs to use - "fit order" : integer order for polynomial fits to data - - "ham_tmpl" : see :function:`make_idle_tomography_list` + - "ham_tmpl" : see :func:`make_idle_tomography_list` verbosity : int, optional How much detail to send to stdout. diff --git a/pygsti/extras/idletomography/pauliobjs.py b/pygsti/extras/idletomography/pauliobjs.py index 9accc3bc0..84721dacf 100644 --- a/pygsti/extras/idletomography/pauliobjs.py +++ b/pygsti/extras/idletomography/pauliobjs.py @@ -140,7 +140,7 @@ def to_circuit(self, pauli_basis_dict): """ Convert this Pauli basis state or measurement to a fiducial operation sequence. - When the returned operation sequence follows a preparation in the |0...0> + When the returned operation sequence follows a preparation in the `|0...0>` Z-basis state or is followed by a Z-basis measurement (with all "+" signs), then the Pauli state preparation or measurement described by this object will be performed. @@ -405,7 +405,7 @@ def ri_sign(pauli1, pauli2, parity): sign = (-1)**((num_i + 1) / 2) * _np.prod([ri_sign(pauli1, pauli2, p) for pauli1, pauli2, p in zip(s1, s2, parities)]) if isinstance(other, NQPauliOp): other_sign = other.sign - elif isinstance(other, NQPauliState): other_sign = _np.product(other.signs) + elif isinstance(other, NQPauliState): other_sign = _np.prod(other.signs) else: raise ValueError("Can't take commutator with %s type" % str(type(other))) return NQPauliOp(op, sign * self.sign * other_sign) diff --git a/pygsti/extras/interpygate/core.py b/pygsti/extras/interpygate/core.py index cd0a7c232..d7b205146 100644 --- a/pygsti/extras/interpygate/core.py +++ b/pygsti/extras/interpygate/core.py @@ -565,7 +565,7 @@ def compute_data(self, comm=None, mpi_workers_per_process=1, verbosity=0): if rank in root_ranks: #Only root ranks store data (fn_to_interpolate only needs to return results on root proc) - flat_data = _np.empty(len(my_points) * int(_np.product(expected_fn_output_shape)), dtype='d') + flat_data = _np.empty(len(my_points) * int(_np.prod(expected_fn_output_shape)), dtype='d') data = flat_data.view(); data.shape = (len(my_points),) + expected_fn_output_shape if (comm is not None): printer.log("Group %d processing %d points on %d processors." % (color, len(my_points), diff --git a/pygsti/extras/rpe/rpetools.py b/pygsti/extras/rpe/rpetools.py index 0893d0ad8..2a64e0e0e 100644 --- a/pygsti/extras/rpe/rpetools.py +++ b/pygsti/extras/rpe/rpetools.py @@ -375,20 +375,19 @@ def analyze_rpe_data(input_dataset, true_or_target_model, string_list_d, rpeconf Returns ------- resultsD : dict - A dictionary of the results - The keys of the dictionary are: + A dictionary of the results. The keys of the dictionary are: - -'alphaHatList' : List (ordered by k) of alpha estimates. - -'epsilonHatList' : List (ordered by k) of epsilon estimates. - -'thetaHatList' : List (ordered by k) of theta estimates. - -'alphaErrorList' : List (ordered by k) of difference between true + * 'alphaHatList' : List (ordered by k) of alpha estimates. + * 'epsilonHatList' : List (ordered by k) of epsilon estimates. + * 'thetaHatList' : List (ordered by k) of theta estimates. + * 'alphaErrorList' : List (ordered by k) of difference between true alpha and RPE estimate of alpha. - -'epsilonErrorList' : List (ordered by k) of difference between true + * 'epsilonErrorList' : List (ordered by k) of difference between true epsilon and RPE estimate of epsilon. - -'thetaErrorList' : List (ordered by k) of difference between true + * 'thetaErrorList' : List (ordered by k) of difference between true theta and RPE estimate of theta. - -'PhiFunErrorList' : List (ordered by k) of _sin_phi2 values. - + * 'PhiFunErrorList' : List (ordered by k) of _sin_phi2 values. + """ alphaCosStrList = string_list_d['alpha', 'cos'] alphaSinStrList = string_list_d['alpha', 'sin'] diff --git a/pygsti/forwardsims/distforwardsim.py b/pygsti/forwardsims/distforwardsim.py index e7c1db727..b6a864411 100644 --- a/pygsti/forwardsims/distforwardsim.py +++ b/pygsti/forwardsims/distforwardsim.py @@ -39,7 +39,7 @@ class DistributableForwardSimulator(_ForwardSimulator): but it will need to be set (by assigning `self.model` before using this simulator. num_atoms : int, optional - The number of atoms to use when creating a layout (i.e. when calling :method:`create_layout`). + The number of atoms to use when creating a layout (i.e. when calling :meth:`create_layout`). This determines how many units the element (circuit outcome probability) dimension is divided into, and doesn't have to correclate with the number of processors. When multiple processors are used, if `num_atoms` is less than the number of processors it should divide the number of @@ -456,7 +456,7 @@ def _compute_processor_distribution(self, array_types, nprocs, num_params, num_c else self._pblk_sizes[0:len(param_dimensions)] # automatically set these? if self._processor_grid is not None: - assert(_np.product(self._processor_grid) <= nprocs), "`processor_grid` must multiply to # of procs!" + assert(_np.prod(self._processor_grid) <= nprocs), "`processor_grid` must multiply to # of procs!" na = self._processor_grid[0] natoms = max(na, self._num_atoms) if (self._num_atoms is not None) else na npp = () diff --git a/pygsti/forwardsims/forwardsim.py b/pygsti/forwardsims/forwardsim.py index bd6249194..d5af0937d 100644 --- a/pygsti/forwardsims/forwardsim.py +++ b/pygsti/forwardsims/forwardsim.py @@ -324,7 +324,7 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types : tuple, optional A tuple of string-valued array types, as given by - :method:`CircuitOutcomeProbabilityArrayLayout.allocate_local_array`. These types determine + :meth:`CircuitOutcomeProbabilityArrayLayout.allocate_local_array`. These types determine what types of arrays we anticipate computing using this layout (and forward simulator). These are used to check available memory against the limit (if it exists) within `resource_alloc`. The array types also determine the number of derivatives that this layout is able to compute. @@ -548,7 +548,7 @@ def bulk_fill_probs(self, array_to_fill, layout): layout : CircuitOutcomeProbabilityArrayLayout A layout for `array_to_fill`, describing what circuit outcome each - element corresponds to. Usually given by a prior call to :method:`create_layout`. + element corresponds to. Usually given by a prior call to :meth:`create_layout`. Returns ------- @@ -592,11 +592,11 @@ def bulk_fill_dprobs(self, array_to_fill, layout, pr_array_to_fill=None): layout : CircuitOutcomeProbabilityArrayLayout A layout for `array_to_fill`, describing what circuit outcome each - element corresponds to. Usually given by a prior call to :method:`create_layout`. + element corresponds to. Usually given by a prior call to :meth:`create_layout`. pr_mx_to_fill : numpy array, optional when not None, an already-allocated length-`len(layout)` numpy array that is - filled with probabilities, just as in :method:`bulk_fill_probs`. + filled with probabilities, just as in :meth:`bulk_fill_probs`. Returns ------- @@ -663,21 +663,21 @@ def bulk_fill_hprobs(self, array_to_fill, layout, layout : CircuitOutcomeProbabilityArrayLayout A layout for `array_to_fill`, describing what circuit outcome each - element corresponds to. Usually given by a prior call to :method:`create_layout`. + element corresponds to. Usually given by a prior call to :meth:`create_layout`. pr_mx_to_fill : numpy array, optional when not None, an already-allocated length-`len(layout)` numpy array that is - filled with probabilities, just as in :method:`bulk_fill_probs`. + filled with probabilities, just as in :meth:`bulk_fill_probs`. deriv1_array_to_fill : numpy array, optional when not None, an already-allocated numpy array of shape `(len(layout),M1)` that is filled with probability derivatives, similar to - :method:`bulk_fill_dprobs` (see `array_to_fill` for a definition of `M1`). + :meth:`bulk_fill_dprobs` (see `array_to_fill` for a definition of `M1`). deriv2_array_to_fill : numpy array, optional when not None, an already-allocated numpy array of shape `(len(layout),M2)` that is filled with probability derivatives, similar to - :method:`bulk_fill_dprobs` (see `array_to_fill` for a definition of `M2`). + :meth:`bulk_fill_dprobs` (see `array_to_fill` for a definition of `M2`). Returns ------- @@ -747,7 +747,7 @@ def iter_hprobs_by_rectangle(self, layout, wrt_slices_list, ---------- layout : CircuitOutcomeProbabilityArrayLayout A layout for generated arrays, describing what circuit outcome each - element corresponds to. Usually given by a prior call to :method:`create_layout`. + element corresponds to. Usually given by a prior call to :meth:`create_layout`. wrt_slices_list : list A list of `(rowSlice,colSlice)` 2-tuples, each of which specify @@ -857,7 +857,7 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, the layout (evaluation strategy) is constructed. array_types : tuple, optional - A tuple of string-valued array types. See :method:`ForwardSimulator.create_layout`. + A tuple of string-valued array types. See :meth:`ForwardSimulator.create_layout`. derivative_dimensions : tuple, optional A tuple containing, optionally, the parameter-space dimension used when taking first @@ -877,7 +877,7 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, cache = None # Derived classes should override this function and create a cache here. # A dictionary whose keys are the elements of `circuits` and values can be # whatever the user wants. These values are provided when calling - # :method:`iter_unique_circuits_with_cache`. + # :meth:`iter_unique_circuits_with_cache`. return _CachedCOPALayout.create_from(circuits, self.model, dataset, derivative_dimensions, cache) # Override these two functions to plumb `cache` down to _compute* methods diff --git a/pygsti/forwardsims/mapforwardsim.py b/pygsti/forwardsims/mapforwardsim.py index 189762b49..fd38b7722 100644 --- a/pygsti/forwardsims/mapforwardsim.py +++ b/pygsti/forwardsims/mapforwardsim.py @@ -122,7 +122,7 @@ class MapForwardSimulator(_DistributableForwardSimulator, SimpleMapForwardSimula num_atoms : int, optional The number of atoms (sub-prefix-tables) to use when creating the layout (i.e. when calling - :method:`create_layout`). This determines how many units the element (circuit outcome + :meth:`create_layout`). This determines how many units the element (circuit outcome probability) dimension is divided into, and doesn't have to correclate with the number of processors. When multiple processors are used, if `num_atoms` is less than the number of processors then `num_atoms` should divide the number of processors evenly, so that @@ -211,7 +211,7 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types the layout (evaluation strategy) is constructed. array_types : tuple, optional - A tuple of string-valued array types. See :method:`ForwardSimulator.create_layout`. + A tuple of string-valued array types. See :meth:`ForwardSimulator.create_layout`. derivative_dimension : int, optional Optionally, the parameter-space dimension used when taking first @@ -252,9 +252,9 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types printer.log(f'Num Param Processors {npp}') printer.log("MapLayout: %d processors divided into %s (= %d) grid along circuit and parameter directions." % - (nprocs, ' x '.join(map(str, (na,) + npp)), _np.product((na,) + npp))) + (nprocs, ' x '.join(map(str, (na,) + npp)), _np.prod((na,) + npp))) printer.log(" %d atoms, parameter block size limits %s" % (natoms, str(param_blk_sizes))) - assert(_np.product((na,) + npp) <= nprocs), "Processor grid size exceeds available processors!" + assert(_np.prod((na,) + npp) <= nprocs), "Processor grid size exceeds available processors!" layout = _MapCOPALayout(circuits, self.model, dataset, self._max_cache_size, natoms, na, npp, param_dimensions, param_blk_sizes, resource_alloc, verbosity) @@ -387,7 +387,7 @@ def bulk_fill_timedep_chi2(self, array_to_fill, layout, ds_circuits, num_total_o layout : CircuitOutcomeProbabilityArrayLayout A layout for `array_to_fill`, describing what circuit outcome each - element corresponds to. Usually given by a prior call to :method:`create_layout`. + element corresponds to. Usually given by a prior call to :meth:`create_layout`. ds_circuits : list of Circuits the circuits to use as they should be queried from `dataset` (see @@ -439,7 +439,7 @@ def bulk_fill_timedep_dchi2(self, array_to_fill, layout, ds_circuits, num_total_ """ Compute the chi2 jacobian contributions for an entire tree of circuits, allowing for time dependent operations. - Similar to :method:`bulk_fill_timedep_chi2` but compute the *jacobian* + Similar to :meth:`bulk_fill_timedep_chi2` but compute the *jacobian* of the summed chi2 contributions for each circuit with respect to the model's parameters. @@ -452,7 +452,7 @@ def bulk_fill_timedep_dchi2(self, array_to_fill, layout, ds_circuits, num_total_ layout : CircuitOutcomeProbabilityArrayLayout A layout for `array_to_fill`, describing what circuit outcome each - element corresponds to. Usually given by a prior call to :method:`create_layout`. + element corresponds to. Usually given by a prior call to :meth:`create_layout`. ds_circuits : list of Circuits the circuits to use as they should be queried from `dataset` (see @@ -470,7 +470,7 @@ def bulk_fill_timedep_dchi2(self, array_to_fill, layout, ds_circuits, num_total_ min_prob_clip_for_weighting : float, optional Sets the minimum and maximum probability p allowed in the chi^2 - weights: N/(p*(1-p)) by clipping probability p values to lie within + weights: `N/(p*(1-p))` by clipping probability p values to lie within the interval [ min_prob_clip_for_weighting, 1-min_prob_clip_for_weighting ]. prob_clip_interval : 2-tuple or None, optional @@ -480,7 +480,7 @@ def bulk_fill_timedep_dchi2(self, array_to_fill, layout, ds_circuits, num_total_ chi2_array_to_fill : numpy array, optional when not None, an already-allocated length-E numpy array that is filled with the per-circuit chi2 contributions, just like in - bulk_fill_timedep_chi2(...). + `bulk_fill_timedep_chi2(...)`. Returns ------- @@ -516,9 +516,9 @@ def bulk_fill_timedep_loglpp(self, array_to_fill, layout, ds_circuits, num_total an already-allocated 1D numpy array of length equal to the total number of computed elements (i.e. layout.num_elements) - layout : CircuitOutcomeProbabilityArrayLayout + layout : CircuitOutcomeProbabilityArrayLayout A layout for `array_to_fill`, describing what circuit outcome each - element corresponds to. Usually given by a prior call to :method:`create_layout`. + element corresponds to. Usually given by a prior call to :meth:`create_layout`. ds_circuits : list of Circuits the circuits to use as they should be queried from `dataset` (see @@ -576,7 +576,7 @@ def bulk_fill_timedep_dloglpp(self, array_to_fill, layout, ds_circuits, num_tota """ Compute the ("poisson picture")log-likelihood jacobian contributions for an entire tree of circuits. - Similar to :method:`bulk_fill_timedep_loglpp` but compute the *jacobian* + Similar to :meth:`bulk_fill_timedep_loglpp` but compute the *jacobian* of the summed logl (in posison picture) contributions for each circuit with respect to the model's parameters. @@ -589,7 +589,7 @@ def bulk_fill_timedep_dloglpp(self, array_to_fill, layout, ds_circuits, num_tota layout : CircuitOutcomeProbabilityArrayLayout A layout for `array_to_fill`, describing what circuit outcome each - element corresponds to. Usually given by a prior call to :method:`create_layout`. + element corresponds to. Usually given by a prior call to :meth:`create_layout`. ds_circuits : list of Circuits the circuits to use as they should be queried from `dataset` (see @@ -618,7 +618,7 @@ def bulk_fill_timedep_dloglpp(self, array_to_fill, layout, ds_circuits, num_tota logl_array_to_fill : numpy array, optional when not None, an already-allocated length-E numpy array that is filled with the per-circuit logl contributions, just like in - bulk_fill_timedep_loglpp(...). + `bulk_fill_timedep_loglpp(...)` . Returns ------- diff --git a/pygsti/forwardsims/matrixforwardsim.py b/pygsti/forwardsims/matrixforwardsim.py index 21267a493..fda58668b 100644 --- a/pygsti/forwardsims/matrixforwardsim.py +++ b/pygsti/forwardsims/matrixforwardsim.py @@ -648,7 +648,7 @@ class MatrixForwardSimulator(_DistributableForwardSimulator, SimpleMatrixForward num_atoms : int, optional The number of atoms (sub-evaluation-trees) to use when creating the layout (i.e. when calling - :method:`create_layout`). This determines how many units the element (circuit outcome + :meth:`create_layout`). This determines how many units the element (circuit outcome probability) dimension is divided into, and doesn't have to correclate with the number of processors. When multiple processors are used, if `num_atoms` is less than the number of processors then `num_atoms` should divide the number of processors evenly, so that @@ -1044,7 +1044,7 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types the layout (evaluation strategy) is constructed. array_types : tuple, optional - A tuple of string-valued array types. See :method:`ForwardSimulator.create_layout`. + A tuple of string-valued array types. See :meth:`ForwardSimulator.create_layout`. derivative_dimension : int, optional Optionally, the parameter-space dimension used when taking first @@ -1093,9 +1093,9 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types na, npp = 1, (1, 1) # save all processor division for within the (single) atom, for different timestamps printer.log("MatrixLayout: %d processors divided into %s (= %d) grid along circuit and parameter directions." % - (nprocs, ' x '.join(map(str, (na,) + npp)), _np.product((na,) + npp))) + (nprocs, ' x '.join(map(str, (na,) + npp)), _np.prod((na,) + npp))) printer.log(" %d atoms, parameter block size limits %s" % (natoms, str(param_blk_sizes))) - assert(_np.product((na,) + npp) <= nprocs), "Processor grid size exceeds available processors!" + assert(_np.prod((na,) + npp) <= nprocs), "Processor grid size exceeds available processors!" layout = _MatrixCOPALayout(circuits, self.model, dataset, natoms, na, npp, param_dimensions, param_blk_sizes, resource_alloc, verbosity) @@ -1839,7 +1839,7 @@ def bulk_fill_timedep_chi2(self, array_to_fill, layout, ds_circuits, num_total_o layout : CircuitOutcomeProbabilityArrayLayout A layout for `array_to_fill`, describing what circuit outcome each - element corresponds to. Usually given by a prior call to :method:`create_layout`. + element corresponds to. Usually given by a prior call to :meth:`create_layout`. ds_circuits : list of Circuits the circuits to use as they should be queried from `dataset` (see @@ -1880,7 +1880,7 @@ def bulk_fill_timedep_dchi2(self, array_to_fill, layout, ds_circuits, num_total_ """ Compute the chi2 jacobian contributions for an entire tree of circuits, allowing for time dependent operations. - Similar to :method:`bulk_fill_timedep_chi2` but compute the *jacobian* + Similar to :meth:`bulk_fill_timedep_chi2` but compute the *jacobian* of the summed chi2 contributions for each circuit with respect to the model's parameters. @@ -1893,7 +1893,7 @@ def bulk_fill_timedep_dchi2(self, array_to_fill, layout, ds_circuits, num_total_ layout : CircuitOutcomeProbabilityArrayLayout A layout for `array_to_fill`, describing what circuit outcome each - element corresponds to. Usually given by a prior call to :method:`create_layout`. + element corresponds to. Usually given by a prior call to :meth:`create_layout`. ds_circuits : list of Circuits the circuits to use as they should be queried from `dataset` (see @@ -1947,9 +1947,9 @@ def bulk_fill_timedep_loglpp(self, array_to_fill, layout, ds_circuits, num_total an already-allocated 1D numpy array of length equal to the total number of computed elements (i.e. layout.num_elements) - layout : CircuitOutcomeProbabilityArrayLayout + layout : CircuitOutcomeProbabilityArrayLayout A layout for `array_to_fill`, describing what circuit outcome each - element corresponds to. Usually given by a prior call to :method:`create_layout`. + element corresponds to. Usually given by a prior call to :meth:`create_layout`. ds_circuits : list of Circuits the circuits to use as they should be queried from `dataset` (see @@ -1995,7 +1995,7 @@ def bulk_fill_timedep_dloglpp(self, array_to_fill, layout, ds_circuits, num_tota """ Compute the ("poisson picture")log-likelihood jacobian contributions for an entire tree of circuits. - Similar to :method:`bulk_fill_timedep_loglpp` but compute the *jacobian* + Similar to :meth:`bulk_fill_timedep_loglpp` but compute the *jacobian* of the summed logl (in posison picture) contributions for each circuit with respect to the model's parameters. @@ -2008,7 +2008,7 @@ def bulk_fill_timedep_dloglpp(self, array_to_fill, layout, ds_circuits, num_tota layout : CircuitOutcomeProbabilityArrayLayout A layout for `array_to_fill`, describing what circuit outcome each - element corresponds to. Usually given by a prior call to :method:`create_layout`. + element corresponds to. Usually given by a prior call to :meth:`create_layout`. ds_circuits : list of Circuits the circuits to use as they should be queried from `dataset` (see diff --git a/pygsti/forwardsims/successfailfwdsim.py b/pygsti/forwardsims/successfailfwdsim.py index e1f3c0b84..83336324a 100644 --- a/pygsti/forwardsims/successfailfwdsim.py +++ b/pygsti/forwardsims/successfailfwdsim.py @@ -36,7 +36,7 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, the layout (evaluation strategy) is constructed. array_types : tuple, optional - A tuple of string-valued array types. See :method:`ForwardSimulator.create_layout`. + A tuple of string-valued array types. See :meth:`ForwardSimulator.create_layout`. derivative_dimension : int, optional Optionally, the parameter-space dimension used when taking first diff --git a/pygsti/forwardsims/termforwardsim.py b/pygsti/forwardsims/termforwardsim.py index ea07fe1b2..1dd7c92b4 100644 --- a/pygsti/forwardsims/termforwardsim.py +++ b/pygsti/forwardsims/termforwardsim.py @@ -132,7 +132,7 @@ class TermForwardSimulator(_DistributableForwardSimulator): num_atoms : int, optional The number of atoms (sub-tables) to use when creating the layout (i.e. when calling - :method:`create_layout`). This determines how many units the element (circuit outcome + :meth:`create_layout`). This determines how many units the element (circuit outcome probability) dimension is divided into, and doesn't have to correclate with the number of processors. When multiple processors are used, if `num_atoms` is less than the number of processors then `num_atoms` should divide the number of processors evenly, so that @@ -286,7 +286,7 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types the layout (evaluation strategy) is constructed. array_types : tuple, optional - A tuple of string-valued array types. See :method:`ForwardSimulator.create_layout`. + A tuple of string-valued array types. See :meth:`ForwardSimulator.create_layout`. derivative_dimension : int, optional Optionally, the parameter-space dimension used when taking first @@ -326,9 +326,9 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types array_types, nprocs, num_params, len(circuits), default_natoms=nprocs) printer.log("TermLayout: %d processors divided into %s (= %d) grid along circuit and parameter directions." % - (nprocs, ' x '.join(map(str, (na,) + npp)), _np.product((na,) + npp))) + (nprocs, ' x '.join(map(str, (na,) + npp)), _np.prod((na,) + npp))) printer.log(" %d atoms, parameter block size limits %s" % (natoms, str(param_blk_sizes))) - assert(_np.product((na,) + npp) <= nprocs), "Processor grid size exceeds available processors!" + assert(_np.prod((na,) + npp) <= nprocs), "Processor grid size exceeds available processors!" layout = _TermCOPALayout(circuits, self.model, dataset, natoms, na, npp, param_dimensions, param_blk_sizes, resource_alloc, printer) @@ -950,7 +950,7 @@ def _achieved_and_max_sopm_jacobian_atom(self, layout_atom): Eops = [self.model.circuit_layer_operator(elbl, 'povm') for elbl in elabels] partial_op_maxmag_values = [op.total_term_magnitude() for op in partial_ops] Eop_maxmag_values = [Eop.total_term_magnitude() for Eop in Eops] - maxmag_partial_product = _np.product(partial_op_maxmag_values) + maxmag_partial_product = _np.prod(partial_op_maxmag_values) maxmag_products = [maxmag_partial_product * Eop_val for Eop_val in Eop_maxmag_values] deriv = _np.zeros((len(elabels), Np), 'd') @@ -995,7 +995,7 @@ def _sopm_gaps_jacobian_atom(self, layout_atom): def bulk_sopm_gaps_jacobian(self, layout): """ - Compute the jacobian of the the output of :method:`bulk_sopm_gaps`. + Compute the jacobian of the the output of :meth:`bulk_sopm_gaps`. Parameters ---------- @@ -1051,7 +1051,7 @@ def _prs_as_pruned_polynomial_reps(self, In particular, the circuit-outcomes under consideration share the same state preparation and differ only in their POVM effects. Employs a truncated or pruned path-integral approach, as opposed to just including everything up to some Taylor - order as in :method:`_prs_as_polynomials`. + order as in :meth:`_prs_as_polynomials`. Parameters ---------- diff --git a/pygsti/forwardsims/termforwardsim_calc_generic.py b/pygsti/forwardsims/termforwardsim_calc_generic.py index 792bf2606..c23b35d30 100644 --- a/pygsti/forwardsims/termforwardsim_calc_generic.py +++ b/pygsti/forwardsims/termforwardsim_calc_generic.py @@ -72,7 +72,7 @@ def prs_as_polynomials(fwdsim, rholabel, elabels, circuit, polynomial_vindices_p # use get_direct_order_terms(order, order_base) w/order_base=0.1(?) instead of taylor_order_terms?? # below: replace prps with: prs = _np.zeros(len(elabels),complex) # an array in "bulk" mode # use *= or * instead of .mult( and .scale( - # e.g. res = _np.product([f.coeff for f in factors]) + # e.g. res = _np.prod([f.coeff for f in factors]) # res *= (pLeft * pRight) # - add assert(_np.linalg.norm(_np.imag(prs)) < 1e-6) at end and return _np.real(prs) @@ -227,7 +227,7 @@ def prs_as_polynomials(fwdsim, rholabel, elabels, circuit, polynomial_vindices_p # #DEBUG!!! # db_nfactors = [len(l) for l in factor_lists] - # db_totfactors = _np.product(db_nfactors) + # db_totfactors = _np.prod(db_nfactors) # db_factor_cnt += db_totfactors # DEBUG_FCOUNT += db_totfactors # db_part_cnt += 1 @@ -347,7 +347,7 @@ def circuit_achieved_and_max_sopm(fwdsim, rholabel, elabels, circuit, repcache, ops = [fwdsim.model._circuit_layer_operator(rholabel, 'prep')] + \ [fwdsim.model._circuit_layer_operator(glbl, 'op') for glbl in circuit] - max_sum_of_pathmags = _np.product([op.total_term_magnitude for op in ops]) + max_sum_of_pathmags = _np.prod([op.total_term_magnitude for op in ops]) max_sum_of_pathmags = _np.array( [max_sum_of_pathmags * fwdsim.model._circuit_layer_operator(elbl, 'povm').total_term_magnitude for elbl in elabels], 'd') @@ -459,7 +459,7 @@ def find_best_pathmagnitude_threshold(fwdsim, rholabel, elabels, circuit, polyno ops = [fwdsim.model._circuit_layer_operator(rholabel, 'prep')] + \ [fwdsim.model._circuit_layer_operator(glbl, 'op') for glbl in circuit] - max_sum_of_pathmags = _np.product([op.total_term_magnitude for op in ops]) + max_sum_of_pathmags = _np.prod([op.total_term_magnitude for op in ops]) max_sum_of_pathmags = _np.array( [max_sum_of_pathmags * fwdsim.model._circuit_layer_operator(elbl, 'povm').total_term_magnitude for elbl in elabels], 'd') @@ -838,7 +838,7 @@ def _prs_as_pruned_polys(fwdsim, rholabel, elabels, circuit, repcache, comm=None ops = [fwdsim.model._circuit_layer_operator(rholabel, 'prep')] + \ [fwdsim.model._circuit_layer_operator(glbl, 'op') for glbl in circuit] - max_sum_of_pathmags = _np.product([op.total_term_magnitude for op in ops]) + max_sum_of_pathmags = _np.prod([op.total_term_magnitude for op in ops]) max_sum_of_pathmags = _np.array( [max_sum_of_pathmags * fwdsim.model._circuit_layer_operator(elbl, 'povm').total_term_magnitude for elbl in elabels], 'd') diff --git a/pygsti/io/metadir.py b/pygsti/io/metadir.py index 4456672ea..849da9729 100644 --- a/pygsti/io/metadir.py +++ b/pygsti/io/metadir.py @@ -484,7 +484,7 @@ def _obj_to_meta_json(obj, dirname): Create a meta.json file within `dirname` that contains (only) the type of `obj` in its 'type' field. This is used to save an object that contains essentially no other data - to a directory, in lieu of :function:`write_obj_to_meta_based_dir`. + to a directory, in lieu of :func:`write_obj_to_meta_based_dir`. Parameters ---------- @@ -509,9 +509,9 @@ def write_obj_to_meta_based_dir(obj, dirname, auxfile_types_member, omit_attribu """ Write the contents of `obj` to `dirname` using a 'meta.json' file and an auxfile-types dictionary. - This is similar to :function:`write_meta_based_dir`, except it takes an object (`obj`) + This is similar to :func:`write_meta_based_dir`, except it takes an object (`obj`) whose `.__dict__`, minus omitted attributes, is used as the dictionary to write and whose - auxfile-types comes from another object attribute.. + auxfile-types comes from another object attribute. Parameters ---------- @@ -611,7 +611,7 @@ def write_dict_to_json_or_pkl_files(d, dirname): If the element is json-able, it is JSON-serialized and the ".json" extension is used. If not, pickle is used to serialize the element, and the ".pkl" extension is used. This is the reverse of - :function:`_read_json_or_pkl_files_to_dict`. + :func:`_read_json_or_pkl_files_to_dict`. Parameters ---------- diff --git a/pygsti/io/mongodb.py b/pygsti/io/mongodb.py index 0a4b51456..b1dae3492 100644 --- a/pygsti/io/mongodb.py +++ b/pygsti/io/mongodb.py @@ -864,7 +864,7 @@ def _remove_auxdoc_member(mongodb, member_name, typ, metadata, session, recursiv def read_dict_from_mongodb(mongodb, collection_name, identifying_metadata): """ - Read a dictionary serialized via :function:`write_dict_to_mongodb` into a dictionary. + Read a dictionary serialized via :func:`write_dict_to_mongodb` into a dictionary. The elements of the constructed dictionary are stored as a separate documents in a the specified MongoDB collection. diff --git a/pygsti/io/readers.py b/pygsti/io/readers.py index 048b1c5cd..204c6dee2 100644 --- a/pygsti/io/readers.py +++ b/pygsti/io/readers.py @@ -351,7 +351,7 @@ def read_circuit_list(filename, read_raw_strings=False, line_labels='auto', num_ def convert_strings_to_circuits(obj): """ - Converts an object resulting from :function:`convert_circuits_to_strings` back to its original. + Converts an object resulting from :func:`convert_circuits_to_strings` back to its original. Parameters ---------- diff --git a/pygsti/io/writers.py b/pygsti/io/writers.py index ff1641dab..97f701816 100644 --- a/pygsti/io/writers.py +++ b/pygsti/io/writers.py @@ -476,7 +476,7 @@ def write_empty_protocol_data(dirname, edesign, sparse="auto", clobber_ok=False) Write to a directory an experimental design (`edesign`) and the dataset template files needed to load in a :class:`ProtocolData` object, e.g. - using the :function:`read_data_from_dir` function, after the template + using the :func:`read_data_from_dir` function, after the template files are filled in. Parameters @@ -618,7 +618,7 @@ def fill_in_empty_dataset_with_fake_data(dataset_filename, model, num_samples, s value as its *start time*. fixed_column_mode : bool or 'auto', optional - How the underlying data set file is written - see :function:`write_dataset`. + How the underlying data set file is written - see :func:`write_dataset`. Returns ------- diff --git a/pygsti/layouts/cachedlayout.py b/pygsti/layouts/cachedlayout.py index d1e39afca..4e4e2ce02 100644 --- a/pygsti/layouts/cachedlayout.py +++ b/pygsti/layouts/cachedlayout.py @@ -31,7 +31,7 @@ class CachedCOPALayout(_CircuitOutcomeProbabilityArrayLayout): unique_circuits : list of Circuits The same as `circuits`, except duplicates are removed. Often this value is obtained - by a derived class calling the class method :method:`_compute_unique_circuits`. + by a derived class calling the class method :meth:`_compute_unique_circuits`. to_unique : dict A mapping that translates an index into `circuits` to one into `unique_circuits`. @@ -61,7 +61,7 @@ class CachedCOPALayout(_CircuitOutcomeProbabilityArrayLayout): cache : dict The cache dictionary for this layout. Its keys are the elements of `circuits` and its values can be whatever the user wants. These values are provided when calling - :method:`iter_unique_circuits_with_cache`, so that a forward simulator using this + :meth:`iter_unique_circuits_with_cache`, so that a forward simulator using this layout can cache arbitrary precomputed information within the layout. """ @@ -99,7 +99,7 @@ def create_from(cls, circuits, model, dataset=None, param_dimensions=(), resourc cache : dict A dictionary whose keys are the elements of `circuits` and values can be whatever the user wants. These values are provided when calling - :method:`iter_unique_circuits_with_cache`. + :meth:`iter_unique_circuits_with_cache`. """ if cache is None: cache = {} ret = super().create_from(circuits, model, dataset, param_dimensions) @@ -119,9 +119,9 @@ def iter_unique_circuits_with_cache(self): A generator used to iterate over a `(element_indices, circuit, outcomes, cache)` tuple for each *unique* circuit held by this layout, where `element_indices` and `outcomes` - are the values that would be retrieved by the :method:`indices` and :method:`outcomes` + are the values that would be retrieved by the :meth:`indices` and :meth:`outcomes` methods, `circuit` is the unique circuit itself, and `cache` is the user-defined value - of the cache-dictionary entry for this circuit.. + of the cache-dictionary entry for this circuit. Returns ------- diff --git a/pygsti/layouts/copalayout.py b/pygsti/layouts/copalayout.py index f24326f07..430fb6734 100644 --- a/pygsti/layouts/copalayout.py +++ b/pygsti/layouts/copalayout.py @@ -54,7 +54,7 @@ class CircuitOutcomeProbabilityArrayLayout(_NicelySerializable): unique_circuits : list of Circuits The same as `circuits`, except duplicates are removed. Often this value is obtained - by a derived class calling the class method :method:`_compute_unique_circuits`. + by a derived class calling the class method :meth:`_compute_unique_circuits`. to_unique : dict A mapping that translates an index into `circuits` to one into `unique_circuits`. @@ -324,7 +324,7 @@ def allocate_local_array(self, array_type, dtype, zero_out=False, memory_tracker memory_tracker : ResourceAllocation, optional If not None, the amount of memory being allocated is added, using - :method:`add_tracked_memory` to this resource allocation object. + :meth:`add_tracked_memory` to this resource allocation object. extra_elements : int, optional The number of additional "extra" elements to append to the element @@ -363,10 +363,10 @@ def allocate_local_array(self, array_type, dtype, zero_out=False, memory_tracker def free_local_array(self, local_array): """ - Frees an array allocated by :method:`allocate_local_array`. + Frees an array allocated by :meth:`allocate_local_array`. This method should always be paired with a call to - :method:`allocate_local_array`, since the allocated array + :meth:`allocate_local_array`, since the allocated array may utilize shared memory, which must be explicitly de-allocated. Parameters @@ -383,11 +383,11 @@ def free_local_array(self, local_array): def gather_local_array_base(self, array_type, array_portion, extra_elements=0, all_gather=False, return_shared=False): """ - Gathers an array onto the root processor or all the processors.. + Gathers an array onto the root processor or all the processors. Gathers the portions of an array that was distributed using this layout (i.e. according to the host_element_slice, etc. slices in - this layout). This could be an array allocated by :method:`allocate_local_array` + this layout). This could be an array allocated by :meth:`allocate_local_array` but need not be, as this routine does not require that `array_portion` be shared. Arrays can be 1, 2, or 3-dimensional. The dimensions are understood to be along the "element", "parameter", and @@ -397,7 +397,7 @@ def gather_local_array_base(self, array_type, array_portion, extra_elements=0, ---------- array_type : ("e", "ep", "ep2", "epp", "p", "jtj", "jtf", "c", "cp", "cp2", "cpp") The type of array to allocate, often corresponding to the array shape. See - :method:`allocate_local_array` for a more detailed description. + :meth:`allocate_local_array` for a more detailed description. array_portion : numpy.ndarray The portion of the final array that is local to the calling @@ -407,7 +407,7 @@ def gather_local_array_base(self, array_type, array_portion, extra_elements=0, extra_elements : int, optional The number of additional "extra" elements to append to the element dimension, beyond those called for by this layout. Should match - usage in :method:`allocate_local_array`. + usage in :meth:`allocate_local_array`. all_gather : bool, optional Whether the result should be returned on all the processors (when `all_gather=True`) @@ -418,7 +418,7 @@ def gather_local_array_base(self, array_type, array_portion, extra_elements=0, in a small performance gain because the array used internally to gather the results can be returned directly. When `True` a shared memory handle is also returned, and the caller assumes responsibilty for freeing the memory via - :function:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`. + :func:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`. Returns ------- @@ -441,7 +441,7 @@ def gather_local_array(self, array_type, array_portion, extra_elements=0, return Gathers the portions of an array that was distributed using this layout (i.e. according to the host_element_slice, etc. slices in - this layout). This could be an array allocated by :method:`allocate_local_array` + this layout). This could be an array allocated by :meth:`allocate_local_array` but need not be, as this routine does not require that `array_portion` be shared. Arrays can be 1, 2, or 3-dimensional. The dimensions are understood to be along the "element", "parameter", and @@ -457,7 +457,7 @@ def gather_local_array(self, array_type, array_portion, extra_elements=0, return extra_elements : int, optional The number of additional "extra" elements to append to the element dimension, beyond those called for by this layout. Should match - usage in :method:`allocate_local_array`. + usage in :meth:`allocate_local_array`. return_shared : bool, optional If `True` then, when shared memory is being used, the shared array used @@ -483,7 +483,7 @@ def allgather_local_array(self, array_type, array_portion, extra_elements=0, ret Gathers the portions of an array that was distributed using this layout (i.e. according to the host_element_slice, etc. slices in - this layout). This could be an array allocated by :method:`allocate_local_array` + this layout). This could be an array allocated by :meth:`allocate_local_array` but need not be, as this routine does not require that `array_portion` be shared. Arrays can be 1, 2, or 3-dimensional. The dimensions are understood to be along the "element", "parameter", and @@ -499,7 +499,7 @@ def allgather_local_array(self, array_type, array_portion, extra_elements=0, ret extra_elements : int, optional The number of additional "extra" elements to append to the element dimension, beyond those called for by this layout. Should match - usage in :method:`allocate_local_array`. + usage in :meth:`allocate_local_array`. return_shared : bool, optional If `True` then, when shared memory is being used, the shared array used @@ -552,7 +552,7 @@ def fill_jtf(self, j, f, jtf): Calculate the matrix-vector product `j.T @ f`. Here `j` is often a jacobian matrix, and `f` a vector of objective function term - values. `j` and `f` must be local arrays, created with :method:`allocate_local_array`. + values. `j` and `f` must be local arrays, created with :meth:`allocate_local_array`. This function performs any necessary MPI/shared-memory communication when the arrays are distributed over multiple processors. @@ -611,7 +611,7 @@ def memory_estimate(self, array_type, dtype='d'): array_type : {'e', 'ep', 'epp'} The type of array. This string specifies the shape of the array, with `'e'` indicating dimension holding the layout's elements and - `'p'`s indicating parameter dimensions. + `'p'` indicating parameter dimensions. dtype : numpy.dtype The NumPy data type for the array. @@ -633,10 +633,10 @@ def indices(self, circuit): The element indices corresponding to a circuit in this layout. This is a slice into the element-dimension of arrays allocated using this layout, - e.g. an `'e'`-type array allocated by :method:`allocate_local_array`. The + e.g. an `'e'`-type array allocated by :meth:`allocate_local_array`. The entries of such an array correspond to different outcomes of this circuit, which - are separately given by :method:`outcomes` or alongside the indices in - :method:`indices_and_outcomes`. + are separately given by :meth:`outcomes` or alongside the indices in + :meth:`indices_and_outcomes`. Parameters ---------- @@ -670,7 +670,7 @@ def indices_and_outcomes(self, circuit): Returns both the element indices and outcome labels corresponding to a circuit in this layout. These quantities can be separately obtained - using the :method:`indices` and :method:`outcomes` methods, respectively. + using the :meth:`indices` and :meth:`outcomes` methods, respectively. Parameters ---------- @@ -689,7 +689,7 @@ def indices_for_index(self, index): """ Lookup the element indices corresponding to a given circuit by the circuit's index. - Similar to :method:`indices` but uses a circuit's index within this layout directly, + Similar to :meth:`indices` but uses a circuit's index within this layout directly, thus avoiding having to hash a :class:`Circuit` object and gaining a modicum of performance. @@ -708,7 +708,7 @@ def outcomes_for_index(self, index): """ Lookup the outcomes of a given circuit by the circuit's index. - Similar to :method:`outcomes` but uses a circuit's index within this layout directly, + Similar to :meth:`outcomes` but uses a circuit's index within this layout directly, thus avoiding having to hash a :class:`Circuit` object and gaining a modicum of performance. @@ -727,7 +727,7 @@ def indices_and_outcomes_for_index(self, index): """ Lookup the element indices and outcomes corresponding to a given circuit by the circuit's index. - Similar to :method:`indices_and_outcomes` but uses a circuit's index within this layout + Similar to :meth:`indices_and_outcomes` but uses a circuit's index within this layout directly, thus avoiding having to hash a :class:`Circuit` object and gaining a modicum of performance. @@ -755,7 +755,7 @@ def iter_unique_circuits(self): A generator used to iterate over a `(element_indices, circuit, outcomes)` tuple for each *unique* circuit held by this layout, where `element_indices` and `outcomes` - are the values that would be retrieved by the :method:`indices` and :method:`outcomes` + are the values that would be retrieved by the :meth:`indices` and :meth:`outcomes` methods, and `circuit` is the unique circuit itself. Returns diff --git a/pygsti/layouts/distlayout.py b/pygsti/layouts/distlayout.py index 516415fd3..7a7184529 100644 --- a/pygsti/layouts/distlayout.py +++ b/pygsti/layouts/distlayout.py @@ -165,6 +165,7 @@ class DistributableCOPALayout(_CircuitOutcomeProbabilityArrayLayout): shared-memory structure to the physical processors, where the total number of cores is divided into node-groups that are able to share memory. The total number of cores is divided like this: + - first, we divide the cores into atom-processing groups, i.e. "atom-processors". An atom-processor is most accurately seen as a comm (group of processors). If shared memory is being used, either the entire atom-processor must be contained @@ -197,7 +198,7 @@ class DistributableCOPALayout(_CircuitOutcomeProbabilityArrayLayout): unique_circuits : list of Circuits The same as `circuits`, except duplicates are removed. Often this value is obtained - by a derived class calling the class method :method:`_compute_unique_circuits`. + by a derived class calling the class method :meth:`_compute_unique_circuits`. to_unique : dict A mapping that translates an index into `circuits` to one into `unique_circuits`. @@ -935,7 +936,7 @@ def allocate_local_array(self, array_type, dtype, zero_out=False, memory_tracker memory_tracker : ResourceAllocation, optional If not None, the amount of memory being allocated is added, using - :method:`add_tracked_memory` to this resource allocation object. + :meth:`add_tracked_memory` to this resource allocation object. extra_elements : int, optional The number of additional "extra" elements to append to the element @@ -1023,10 +1024,10 @@ def allocate_local_array(self, array_type, dtype, zero_out=False, memory_tracker def free_local_array(self, local_array): """ - Frees an array allocated by :method:`allocate_local_array`. + Frees an array allocated by :meth:`allocate_local_array`. This method should always be paired with a call to - :method:`allocate_local_array`, since the allocated array + :meth:`allocate_local_array`, since the allocated array may utilize shared memory, which must be explicitly de-allocated. Parameters @@ -1045,11 +1046,11 @@ def free_local_array(self, local_array): def gather_local_array_base(self, array_type, array_portion, extra_elements=0, all_gather=False, return_shared=False): """ - Gathers an array onto the root processor or all the processors.. + Gathers an array onto the root processor or all the processors. Gathers the portions of an array that was distributed using this layout (i.e. according to the host_element_slice, etc. slices in - this layout). This could be an array allocated by :method:`allocate_local_array` + this layout). This could be an array allocated by :meth:`allocate_local_array` but need not be, as this routine does not require that `array_portion` be shared. Arrays can be 1, 2, or 3-dimensional. The dimensions are understood to be along the "element", "parameter", and @@ -1059,7 +1060,7 @@ def gather_local_array_base(self, array_type, array_portion, extra_elements=0, a ---------- array_type : ("e", "ep", "ep2", "epp", "p", "jtj", "jtf", "c", "cp", "cp2", "cpp") The type of array to allocate, often corresponding to the array shape. See - :method:`allocate_local_array` for a more detailed description. + :meth:`allocate_local_array` for a more detailed description. array_portion : numpy.ndarray The portion of the final array that is local to the calling @@ -1070,7 +1071,7 @@ def gather_local_array_base(self, array_type, array_portion, extra_elements=0, a extra_elements : int, optional The number of additional "extra" elements to append to the element dimension, beyond those called for by this layout. Should match - usage in :method:`allocate_local_array`. + usage in :meth:`allocate_local_array`. all_gather : bool, optional Whether the result should be returned on all the processors (when `all_gather=True`) @@ -1081,7 +1082,7 @@ def gather_local_array_base(self, array_type, array_portion, extra_elements=0, a in a small performance gain because the array used internally to gather the results can be returned directly. When `True` a shared memory handle is also returned, and the caller assumes responsibilty for freeing the memory via - :function:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`. + :func:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`. Returns ------- @@ -1196,7 +1197,7 @@ def allsum_local_quantity(self, typ, value, use_shared_mem="auto"): Gathers the portions of an array that was distributed using this layout (i.e. according to the host_element_slice, etc. slices in - this layout). This could be an array allocated by :method:`allocate_local_array` + this layout). This could be an array allocated by :meth:`allocate_local_array` but need not be, as this routine does not require that `array_portion` be shared. Arrays can be 1, 2, or 3-dimensional. The dimensions are understood to be along the "element", "parameter", and @@ -1212,7 +1213,7 @@ def allsum_local_quantity(self, typ, value, use_shared_mem="auto"): extra_elements : int, optional The number of additional "extra" elements to append to the element dimension, beyond those called for by this layout. Should match - usage in :method:`allocate_local_array`. + usage in :meth:`allocate_local_array`. return_shared : bool, optional If `True` then, when shared memory is being used, the shared array used @@ -1257,7 +1258,7 @@ def fill_jtf(self, j, f, jtf): Calculate the matrix-vector product `j.T @ f`. Here `j` is often a jacobian matrix, and `f` a vector of objective function term - values. `j` and `f` must be local arrays, created with :method:`allocate_local_array`. + values. `j` and `f` must be local arrays, created with :meth:`allocate_local_array`. This function performs any necessary MPI/shared-memory communication when the arrays are distributed over multiple processors. diff --git a/pygsti/modelmembers/__init__.py b/pygsti/modelmembers/__init__.py index 209e4fc8c..532ab0033 100644 --- a/pygsti/modelmembers/__init__.py +++ b/pygsti/modelmembers/__init__.py @@ -25,7 +25,7 @@ def from_memoized_dict(mm_dict, serial_memo): mm_dict: dict A dict representation of this ModelMember ready for deserialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype serial_memo: dict Keys are serialize_ids and values are ModelMembers. This is NOT the same as diff --git a/pygsti/modelmembers/errorgencontainer.py b/pygsti/modelmembers/errorgencontainer.py index c4d697a71..331708fd3 100644 --- a/pygsti/modelmembers/errorgencontainer.py +++ b/pygsti/modelmembers/errorgencontainer.py @@ -44,7 +44,7 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): essentially converts the coefficient into a rate that is the contribution this term would have within a depolarizing channel where all stochastic generators had this same coefficient. - This is the value returned by :method:`error_rates`. + This is the value returned by :meth:`error_rates`. Returns ------- @@ -65,7 +65,7 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): def errorgen_coefficient_labels(self): """ - The elementary error-generator labels corresponding to the elements of :method:`errorgen_coefficients_array`. + The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. Returns ------- @@ -79,7 +79,7 @@ def errorgen_coefficients_array(self): """ The weighted coefficients of this operation's error generator in terms of "standard" error generators. - Constructs a 1D array of all the coefficients returned by :method:`errorgen_coefficients`, + Constructs a 1D array of all the coefficients returned by :meth:`errorgen_coefficients`, weighted so that different error generators can be weighted differently when a `errorgen_penalty_factor` is used in an objective function. @@ -93,7 +93,7 @@ def errorgen_coefficients_array(self): def errorgen_coefficients_array_deriv_wrt_params(self): """ - The jacobian of :method:`errogen_coefficients_array` with respect to this operation's parameters. + The jacobian of :meth:`errogen_coefficients_array` with respect to this operation's parameters. Returns ------- @@ -167,11 +167,11 @@ def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscal logscale_nonham : bool, optional Whether or not the values in `lindblad_term_dict` for non-hamiltonian error generators should be interpreted as error *rates* (of an - "equivalent" depolarizing channel, see :method:`errorgen_coefficients`) + "equivalent" depolarizing channel, see :meth:`errorgen_coefficients`) instead of raw coefficients. If True, then the non-hamiltonian coefficients are set to `-log(1 - d^2*rate)/d^2`, where `rate` is the corresponding value given in `lindblad_term_dict`. This is what is - performed by the function :method:`set_error_rates`. + performed by the function :meth:`set_error_rates`. truncate : bool, optional Whether to allow adjustment of the errogen coefficients in @@ -193,7 +193,7 @@ def set_error_rates(self, lindblad_term_dict, action="update"): Values are set so that the contributions of the resulting channel's error rate are given by the values in `lindblad_term_dict`. See - :method:`error_rates` for more details. + :meth:`error_rates` for more details. Parameters ---------- @@ -247,7 +247,7 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): essentially converts the coefficient into a rate that is the contribution this term would have within a depolarizing channel where all stochastic generators had this same coefficient. - This is the value returned by :method:`error_rates`. + This is the value returned by :meth:`error_rates`. Returns ------- @@ -268,7 +268,7 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): def errorgen_coefficient_labels(self): """ - The elementary error-generator labels corresponding to the elements of :method:`errorgen_coefficients_array`. + The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. Returns ------- @@ -282,7 +282,7 @@ def errorgen_coefficients_array(self): """ The weighted coefficients of this operation's error generator in terms of "standard" error generators. - Constructs a 1D array of all the coefficients returned by :method:`errorgen_coefficients`, + Constructs a 1D array of all the coefficients returned by :meth:`errorgen_coefficients`, weighted so that different error generators can be weighted differently when a `errorgen_penalty_factor` is used in an objective function. @@ -296,7 +296,7 @@ def errorgen_coefficients_array(self): def errorgen_coefficients_array_deriv_wrt_params(self): """ - The jacobian of :method:`errogen_coefficients_array` with respect to this operation's parameters. + The jacobian of :meth:`errogen_coefficients_array` with respect to this operation's parameters. Returns ------- @@ -371,7 +371,7 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): essentially converts the coefficient into a rate that is the contribution this term would have within a depolarizing channel where all stochastic generators had this same coefficient. - This is the value returned by :method:`error_rates`. + This is the value returned by :meth:`error_rates`. Returns ------- @@ -419,11 +419,11 @@ def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscal logscale_nonham : bool, optional Whether or not the values in `lindblad_term_dict` for non-hamiltonian error generators should be interpreted as error *rates* (of an - "equivalent" depolarizing channel, see :method:`errorgen_coefficients`) + "equivalent" depolarizing channel, see :meth:`errorgen_coefficients`) instead of raw coefficients. If True, then the non-hamiltonian coefficients are set to `-log(1 - d^2*rate)/d^2`, where `rate` is the corresponding value given in `lindblad_term_dict`. This is what is - performed by the function :method:`set_error_rates`. + performed by the function :meth:`set_error_rates`. truncate : bool, optional Whether to allow adjustment of the errogen coefficients in @@ -440,7 +440,7 @@ def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscal def errorgen_coefficient_labels(self): """ - The elementary error-generator labels corresponding to the elements of :method:`errorgen_coefficients_array`. + The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. Returns ------- @@ -454,7 +454,7 @@ def errorgen_coefficients_array(self): """ The weighted coefficients of this operation's error generator in terms of "standard" error generators. - Constructs a 1D array of all the coefficients returned by :method:`errorgen_coefficients`, + Constructs a 1D array of all the coefficients returned by :meth:`errorgen_coefficients`, weighted so that different error generators can be weighted differently when a `errorgen_penalty_factor` is used in an objective function. @@ -468,7 +468,7 @@ def errorgen_coefficients_array(self): def errorgen_coefficients_array_deriv_wrt_params(self): """ - The jacobian of :method:`errogen_coefficients_array` with respect to this operation's parameters. + The jacobian of :meth:`errogen_coefficients_array` with respect to this operation's parameters. Returns ------- diff --git a/pygsti/modelmembers/instruments/__init__.py b/pygsti/modelmembers/instruments/__init__.py index 2cc1a7339..47cbe0de9 100644 --- a/pygsti/modelmembers/instruments/__init__.py +++ b/pygsti/modelmembers/instruments/__init__.py @@ -87,7 +87,7 @@ def convert(instrument, to_type, basis, ideal_instrument=None, flatten_structure to_type : {"full","TP","static","static unitary"} The type of parameterizaton to convert to. See - :method:`Model.set_all_parameterizations` for more details. + :meth:`Model.set_all_parameterizations` for more details. basis : {'std', 'gm', 'pp', 'qt'} or Basis object The basis for `povm`. Allowed values are Matrix-unit (std), diff --git a/pygsti/modelmembers/instruments/instrument.py b/pygsti/modelmembers/instruments/instrument.py index 81e637fce..8457b86cb 100644 --- a/pygsti/modelmembers/instruments/instrument.py +++ b/pygsti/modelmembers/instruments/instrument.py @@ -127,7 +127,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/instruments/tpinstrument.py b/pygsti/modelmembers/instruments/tpinstrument.py index 1217aa8f4..093f2cd7a 100644 --- a/pygsti/modelmembers/instruments/tpinstrument.py +++ b/pygsti/modelmembers/instruments/tpinstrument.py @@ -32,13 +32,14 @@ class TPInstrument(_mm.ModelMember, _collections.OrderedDict): trace-preserving map. The instrument's elements may or may not have all of the properties associated by a mathematical quantum instrument. - If M1,M2,...Mn are the elements of the instrument, then we parameterize + If `M1,M2,...Mn` are the elements of the instrument, then we parameterize + 1. MT = (M1+M2+...Mn) as a TPParmeterizedGate - 2. Di = Mi - MT for i = 1..(n-1) as FullyParameterizedGates + 2. Di = Mi - MT for `i = 1..(n-1)` as FullyParameterizedGates - So to recover M1...Mn we compute: - Mi = Di + MT for i = 1...(n-1) - = -(n-2)*MT-sum(Di) = -(n-2)*MT-[(MT-Mi)-n*MT] for i == (n-1) + So to recover `M1...Mn` we compute: + Mi = Di + MT for i = `1...(n-1)` + = -(n-2)*MT-sum(Di) = -(n-2)*MT-[(MT-Mi)-n*MT] for i == (n-1) Parameters ---------- @@ -154,7 +155,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/instruments/tpinstrumentop.py b/pygsti/modelmembers/instruments/tpinstrumentop.py index fa24b21ff..4cf2d5cd4 100644 --- a/pygsti/modelmembers/instruments/tpinstrumentop.py +++ b/pygsti/modelmembers/instruments/tpinstrumentop.py @@ -30,7 +30,7 @@ class TPInstrumentOp(_DenseOperator): param_ops : list of LinearOperator objects A list of the underlying operation objects which constitute a simple parameterization of a :class:`TPInstrument`. Namely, this is - the list of [MT,D1,D2,...Dn] operations which parameterize *all* of the + the list of `[MT,D1,D2,...Dn]` operations which parameterize *all* of the `TPInstrument`'s elements. index : int @@ -86,7 +86,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/modelmember.py b/pygsti/modelmembers/modelmember.py index df4b463a0..9915a31a5 100644 --- a/pygsti/modelmembers/modelmember.py +++ b/pygsti/modelmembers/modelmember.py @@ -283,8 +283,8 @@ def relink_parent(self, parent): This operation is appropriate to do when "re-linking" a parent with its children after the parent and child have been serialized. (the parent is *not* saved in serialization - see - ModelChild.__getstate__ -- and so must be manually re-linked - upon de-serialization). + ModelChild.__getstate__ -- and so must be manually re-linked + upon de-serialization). In addition to setting the parent of this object, this method sets the parent of any objects this object contains (i.e. @@ -685,7 +685,7 @@ def init_gpindices(self, allocated_to_parent=None): 2. The sub-members are all allocated to the *same* parent model. This method computes an "anticipated parent" model as the common parent of all - the submembers (if one exists) or `None`, and calls :method:`allocate_gpindices` + the submembers (if one exists) or `None`, and calls :meth:`allocate_gpindices` using this parent model and a starting index of 0. This has the desired behavior in the two cases above. In case 1, parameter indices are set (allocated) but the parent is set to `None`, so that the to-be parent model will see this member as @@ -818,7 +818,7 @@ def is_similar(self, other, rtol=1e-5, atol=1e-8): LindbladErrorgen) should overload this function to account for that. Parameters - --------- + ---------- other: ModelMember ModelMember to compare to rtol: float @@ -853,7 +853,7 @@ def is_equivalent(self, other, rtol=1e-5, atol=1e-8): are the same. Parameters - --------- + ---------- other: ModelMember ModelMember to compare to rtol: float @@ -895,7 +895,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = OrderedDict() @@ -942,7 +942,7 @@ def _from_memoized_dict(cls, mm_dict, serial_memo): """ For subclasses to implement. Submember-existence checks are performed, and the gpindices of the return value is set, by the non-underscored - :method:`from_memoized_dict` implemented in this class. + :meth:`from_memoized_dict` implemented in this class. """ #E.g.: # assert len(mm_dict['submembers']) == 0, 'ModelMember base class has no submembers' @@ -958,7 +958,7 @@ def from_memoized_dict(cls, mm_dict, serial_memo, parent_model): mm_dict: dict A dict representation of this ModelMember ready for deserialization This must have at least the following fields: - module, class, submembers, state_space, evotype + module, class, submembers, state_space, evotype serial_memo: dict Keys are serialize_ids and values are ModelMembers. This is NOT the same as diff --git a/pygsti/modelmembers/modelmembergraph.py b/pygsti/modelmembers/modelmembergraph.py index dee469b9a..839c7f642 100644 --- a/pygsti/modelmembers/modelmembergraph.py +++ b/pygsti/modelmembers/modelmembergraph.py @@ -27,7 +27,7 @@ def load_modelmembers_from_serialization_dict(cls, sdict, parent_model): ---------- sdict: dict Flat dict of the ModelMemberGraph that was serialized by a - prior call to :method:`ModelMemberGraph.create_serialization_dict`. + prior call to :meth:`ModelMemberGraph.create_serialization_dict`. Returns ------- diff --git a/pygsti/modelmembers/operations/__init__.py b/pygsti/modelmembers/operations/__init__.py index 6c9a63178..1b479a53f 100644 --- a/pygsti/modelmembers/operations/__init__.py +++ b/pygsti/modelmembers/operations/__init__.py @@ -264,7 +264,7 @@ def convert(operation, to_type, basis, ideal_operation=None, flatten_structure=F to_type : {"full","full TP","static","static unitary","clifford",LINDBLAD} The type of parameterizaton to convert to. "LINDBLAD" is a placeholder for the various Lindblad parameterization types. See - :method:`Model.set_all_parameterizations` for more details. + :meth:`Model.set_all_parameterizations` for more details. basis : {'std', 'gm', 'pp', 'qt'} or Basis object The basis for `operation`. Allowed values are Matrix-unit (std), diff --git a/pygsti/modelmembers/operations/composederrorgen.py b/pygsti/modelmembers/operations/composederrorgen.py index f55ccd702..2a29d82c8 100644 --- a/pygsti/modelmembers/operations/composederrorgen.py +++ b/pygsti/modelmembers/operations/composederrorgen.py @@ -116,7 +116,7 @@ def coefficients(self, return_basis=False, logscale_nonham=False): essentially converts the coefficient into a rate that is the contribution this term would have within a depolarizing channel where all stochastic generators had this same coefficient. - This is the value returned by :method:`error_rates`. + This is the value returned by :meth:`error_rates`. Returns ------- @@ -186,7 +186,7 @@ def coefficients(self, return_basis=False, logscale_nonham=False): def coefficient_labels(self): """ - The elementary error-generator labels corresponding to the elements of :method:`coefficients_array`. + The elementary error-generator labels corresponding to the elements of :meth:`coefficients_array`. Returns ------- @@ -200,7 +200,7 @@ def coefficients_array(self): """ The weighted coefficients of this error generator in terms of "standard" error generators. - Constructs a 1D array of all the coefficients returned by :method:`coefficients`, + Constructs a 1D array of all the coefficients returned by :meth:`coefficients`, weighted so that different error generators can be weighted differently when a `errorgen_penalty_factor` is used in an objective function. @@ -214,7 +214,7 @@ def coefficients_array(self): def coefficients_array_deriv_wrt_params(self): """ - The jacobian of :method:`coefficients_array` with respect to this error generator's parameters. + The jacobian of :meth:`coefficients_array` with respect to this error generator's parameters. Returns ------- @@ -240,10 +240,10 @@ def error_rates(self): contribution that basis element's term would have to the error rate of a depolarization channel. For example, if the rate corresponding to the term ('S','X') is 0.01 this - means that the coefficient of the rho -> X*rho*X-rho error + means that the coefficient of the `rho -> X*rho*X-rho` error generator is set such that if this coefficient were used for all 3 (X,Y, and Z) terms the resulting depolarizing - channel would have error rate 3*0.01 = 0.03. + channel would have error rate `3*0.01 = 0.03`. Note that because error generator terms do not necessarily commute with one another, the sum of the returned error @@ -290,11 +290,11 @@ def set_coefficients(self, lindblad_term_dict, action="update", logscale_nonham= logscale_nonham : bool, optional Whether or not the values in `lindblad_term_dict` for non-hamiltonian error generators should be interpreted as error *rates* (of an - "equivalent" depolarizing channel, see :method:`errorgen_coefficients`) + "equivalent" depolarizing channel, see :meth:`errorgen_coefficients`) instead of raw coefficients. If True, then the non-hamiltonian coefficients are set to `-log(1 - d^2*rate)/d^2`, where `rate` is the corresponding value given in `lindblad_term_dict`. This is what is - performed by the function :method:`set_error_rates`. + performed by the function :meth:`set_error_rates`. truncate : bool, optional Whether to truncate the projections onto the Lindblad terms in @@ -332,7 +332,7 @@ def set_error_rates(self, lindblad_term_dict, action="update"): Cofficients are set so that the contributions of the resulting channel's error rate are given by the values in `lindblad_term_dict`. See - :method:`error_rates` for more details. + :meth:`error_rates` for more details. Parameters ---------- @@ -457,7 +457,7 @@ def append(self, *factors_to_add): Parameters ---------- - *factors_to_add : LinearOperator + `*factors_to_add` : LinearOperator One or multiple factor operators to add on at the *end* (summed last) of this operator. @@ -482,7 +482,7 @@ def insert(self, insert_at, *factors_to_insert): The index at which to insert `factors_to_insert`. The factor at this index and those after it are shifted back by `len(factors_to_insert)`. - *factors_to_insert : LinearOperator + `*factors_to_insert` : LinearOperator One or multiple factor operators to insert within this operator. Returns @@ -502,7 +502,7 @@ def remove(self, *factor_indices): Parameters ---------- - *factorop_indices : int + `*factorop_indices` : int One or multiple factor indices to remove from this operator. Returns @@ -624,7 +624,7 @@ def from_vector(self, v, close=False, dirty_value=True): def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys=False): """ - Get the `order`-th order Taylor-expansion terms of this error generator.. + Get the `order`-th order Taylor-expansion terms of this error generator. This function either constructs or returns a cached list of the terms at the given order. Each term is "rank-1", meaning that its action on a @@ -658,7 +658,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ assert(order == 0), \ "Error generators currently treat all terms as 0-th order; nothing else should be requested!" diff --git a/pygsti/modelmembers/operations/composedop.py b/pygsti/modelmembers/operations/composedop.py index 4e2574d74..c0bd68a7a 100644 --- a/pygsti/modelmembers/operations/composedop.py +++ b/pygsti/modelmembers/operations/composedop.py @@ -30,7 +30,7 @@ class ComposedOp(_LinearOperator): """ - An operation that is the composition of a number of map-like factors (possibly other `LinearOperator`s). + An operation that is the composition of a number of map-like factors (possibly other `LinearOperator`). Parameters ---------- @@ -184,7 +184,7 @@ def append(self, *factorops_to_add): Parameters ---------- - *factors_to_add : LinearOperator + `*factors_to_add` : LinearOperator One or multiple factor operators to add on at the *end* (evaluated last) of this operator. @@ -211,7 +211,7 @@ def insert(self, insert_at, *factorops_to_insert): The index at which to insert `factorops_to_insert`. The factor at this index and those after it are shifted back by `len(factorops_to_insert)`. - *factors_to_insert : LinearOperator + `*factors_to_insert` : LinearOperator One or multiple factor operators to insert within this operator. Returns @@ -233,7 +233,7 @@ def remove(self, *factorop_indices): Parameters ---------- - *factorop_indices : int + `*factorop_indices` : int One or multiple factor indices to remove from this operator. Returns @@ -457,7 +457,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ if order not in self.terms: self._compute_taylor_order_terms(order, max_polynomial_vars, self.gpindices_as_array()) @@ -509,7 +509,7 @@ def taylor_order_terms_above_mag(self, order, max_polynomial_vars, min_term_mag) This function constructs the terms at the given order which have a magnitude (given by the absolute value of their coefficient) that is greater than or equal to `min_term_mag`. - It calls :method:`taylor_order_terms` internally, so that all the terms at order `order` + It calls :meth:`taylor_order_terms` internally, so that all the terms at order `order` are typically cached for future calls. The coefficients of these terms are typically polynomials of the operation's @@ -543,7 +543,7 @@ def taylor_order_terms_above_mag(self, order, max_polynomial_vars, min_term_mag) # for i, pi in enumerate(p)] factor_lists = [factor_lists_cache[i][pi] for i, pi in enumerate(p)] for factors in _itertools.product(*factor_lists): - mag = _np.product([factor.magnitude for factor in factors]) + mag = _np.prod([factor.magnitude for factor in factors]) if mag >= min_term_mag: terms.append(_term.compose_terms_with_mag(factors, mag)) return terms @@ -585,7 +585,7 @@ def total_term_magnitude(self): # of an errorgen or operator. # In this case, since the taylor expansions are composed (~multiplied), # the total term magnitude is just the product of those of the components. - return _np.product([f.total_term_magnitude for f in self.factorops]) + return _np.prod([f.total_term_magnitude for f in self.factorops]) @property def total_term_magnitude_deriv(self): @@ -601,7 +601,7 @@ def total_term_magnitude_deriv(self): An array of length self.num_params """ opmags = [f.total_term_magnitude for f in self.factorops] - product = _np.product(opmags) + product = _np.prod(opmags) ret = _np.zeros(self.num_params, 'd') for opmag, f, f_local_inds in zip(opmags, self.factorops, self._submember_rpindices): #f_local_inds = _modelmember._decompose_gpindices( @@ -679,7 +679,7 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): essentially converts the coefficient into a rate that is the contribution this term would have within a depolarizing channel where all stochastic generators had this same coefficient. - This is the value returned by :method:`error_rates`. + This is the value returned by :meth:`error_rates`. Returns ------- @@ -753,7 +753,7 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): def errorgen_coefficient_labels(self): """ - The elementary error-generator labels corresponding to the elements of :method:`errorgen_coefficients_array`. + The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. Returns ------- @@ -767,7 +767,7 @@ def errorgen_coefficients_array(self): """ The weighted coefficients of this operation's error generator in terms of "standard" error generators. - Constructs a 1D array of all the coefficients returned by :method:`errorgen_coefficients`, + Constructs a 1D array of all the coefficients returned by :meth:`errorgen_coefficients`, weighted so that different error generators can be weighted differently when a `errorgen_penalty_factor` is used in an objective function. @@ -783,7 +783,7 @@ def errorgen_coefficients_array(self): def errorgen_coefficients_array_deriv_wrt_params(self): """ - The jacobian of :method:`errogen_coefficients_array` with respect to this operation's parameters. + The jacobian of :meth:`errogen_coefficients_array` with respect to this operation's parameters. Returns ------- @@ -862,11 +862,11 @@ def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscal logscale_nonham : bool, optional Whether or not the values in `lindblad_term_dict` for non-hamiltonian error generators should be interpreted as error *rates* (of an - "equivalent" depolarizing channel, see :method:`errorgen_coefficients`) + "equivalent" depolarizing channel, see :meth:`errorgen_coefficients`) instead of raw coefficients. If True, then the non-hamiltonian coefficients are set to `-log(1 - d^2*rate)/d^2`, where `rate` is the corresponding value given in `lindblad_term_dict`. This is what is - performed by the function :method:`set_error_rates`. + performed by the function :meth:`set_error_rates`. truncate : bool, optional Whether to allow adjustment of the errogen coefficients in @@ -906,7 +906,7 @@ def set_error_rates(self, lindblad_term_dict, action="update"): Values are set so that the contributions of the resulting channel's error rate are given by the values in `lindblad_term_dict`. See - :method:`error_rates` for more details. + :meth:`error_rates` for more details. Parameters ---------- diff --git a/pygsti/modelmembers/operations/denseop.py b/pygsti/modelmembers/operations/denseop.py index b138476ea..bef7c2f65 100644 --- a/pygsti/modelmembers/operations/denseop.py +++ b/pygsti/modelmembers/operations/denseop.py @@ -149,7 +149,7 @@ def to_array(self): Return the array used to identify this operation within its range of possible values. For instance, if the operation is a unitary operation, this returns a - unitary matrix regardless of the evolution type. The related :method:`to_dense` + unitary matrix regardless of the evolution type. The related :meth:`to_dense` method, in contrast, returns the dense representation of the operation, which varies by evolution type. @@ -364,7 +364,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) @@ -606,7 +606,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/operations/depolarizeop.py b/pygsti/modelmembers/operations/depolarizeop.py index e8da9d92e..288cb3c92 100644 --- a/pygsti/modelmembers/operations/depolarizeop.py +++ b/pygsti/modelmembers/operations/depolarizeop.py @@ -109,7 +109,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/operations/eigpdenseop.py b/pygsti/modelmembers/operations/eigpdenseop.py index ef425d362..8c1e6ae59 100644 --- a/pygsti/modelmembers/operations/eigpdenseop.py +++ b/pygsti/modelmembers/operations/eigpdenseop.py @@ -47,10 +47,10 @@ class EigenvalueParamDenseOp(_DenseOperator): tp_constrained_and_unital : bool If True, assume the top row of the operation matrix is fixed - to [1, 0, ... 0] and should not be parameterized, and verify + to `[1, 0, ... 0]` and should not be parameterized, and verify that the matrix is unital. In this case, "1" is always a fixed (not-paramterized) eigenvalue with eigenvector - [1,0,...0] and if include_off_diags_in_degen_blocks is True + `[1,...0]` and if include_off_diags_in_degen_blocks is True any off diagonal elements lying on the top row are *not* parameterized as implied by the TP constraint. @@ -327,7 +327,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) # includes 'dense_matrix' from DenseOperator diff --git a/pygsti/modelmembers/operations/embeddederrorgen.py b/pygsti/modelmembers/operations/embeddederrorgen.py index 5f4d29982..531004902 100644 --- a/pygsti/modelmembers/operations/embeddederrorgen.py +++ b/pygsti/modelmembers/operations/embeddederrorgen.py @@ -114,7 +114,7 @@ def coefficients(self, return_basis=False, logscale_nonham=False): essentially converts the coefficient into a rate that is the contribution this term would have within a depolarizing channel where all stochastic generators had this same coefficient. - This is the value returned by :method:`error_rates`. + This is the value returned by :meth:`error_rates`. Returns ------- @@ -135,7 +135,7 @@ def coefficients(self, return_basis=False, logscale_nonham=False): def coefficient_labels(self): """ - The elementary error-generator labels corresponding to the elements of :method:`coefficients_array`. + The elementary error-generator labels corresponding to the elements of :meth:`coefficients_array`. Returns ------- @@ -149,7 +149,7 @@ def coefficients_array(self): """ The weighted coefficients of this error generator in terms of "standard" error generators. - Constructs a 1D array of all the coefficients returned by :method:`coefficients`, + Constructs a 1D array of all the coefficients returned by :meth:`coefficients`, weighted so that different error generators can be weighted differently when a `errorgen_penalty_factor` is used in an objective function. @@ -163,7 +163,7 @@ def coefficients_array(self): def coefficients_array_deriv_wrt_params(self): """ - The jacobian of :method:`coefficients_array` with respect to this error generator's parameters. + The jacobian of :meth:`coefficients_array` with respect to this error generator's parameters. Returns ------- @@ -239,11 +239,11 @@ def set_coefficients(self, lindblad_term_dict, action="update", logscale_nonham= logscale_nonham : bool, optional Whether or not the values in `lindblad_term_dict` for non-hamiltonian error generators should be interpreted as error *rates* (of an - "equivalent" depolarizing channel, see :method:`errorgen_coefficients`) + "equivalent" depolarizing channel, see :meth:`errorgen_coefficients`) instead of raw coefficients. If True, then the non-hamiltonian coefficients are set to `-log(1 - d^2*rate)/d^2`, where `rate` is the corresponding value given in `lindblad_term_dict`. This is what is - performed by the function :method:`set_error_rates`. + performed by the function :meth:`set_error_rates`. truncate : bool, optional Whether to truncate the projections onto the Lindblad terms in @@ -263,7 +263,7 @@ def set_error_rates(self, lindblad_term_dict, action="update"): Coefficients are set so that the contributions of the resulting channel's error rate are given by the values in `lindblad_term_dict`. - See :method:`error_rates` for more details. + See :meth:`error_rates` for more details. Parameters ---------- diff --git a/pygsti/modelmembers/operations/embeddedop.py b/pygsti/modelmembers/operations/embeddedop.py index 3e430a94d..1296ea495 100644 --- a/pygsti/modelmembers/operations/embeddedop.py +++ b/pygsti/modelmembers/operations/embeddedop.py @@ -162,9 +162,9 @@ def _iter_matrix_elements_precalc(self, on_space): # number of basis elements preceding our block's elements if on_space == "Hilbert": - blockDims = [_np.product(tpb_dims) for tpb_dims in self.state_space.tensor_product_blocks_udimensions] + blockDims = [_np.prod(tpb_dims) for tpb_dims in self.state_space.tensor_product_blocks_udimensions] else: - blockDims = [_np.product(tpb_dims) for tpb_dims in self.state_space.tensor_product_blocks_dimensions] + blockDims = [_np.prod(tpb_dims) for tpb_dims in self.state_space.tensor_product_blocks_dimensions] offset = sum(blockDims[0:iTensorProdBlk]) return divisors, multipliers, sorted_bili, basisInds_noop, offset @@ -428,7 +428,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ #Reduce labeldims b/c now working on *state-space* instead of density mx: sslbls = self.state_space.copy() @@ -446,7 +446,7 @@ def taylor_order_terms_above_mag(self, order, max_polynomial_vars, min_term_mag) This function constructs the terms at the given order which have a magnitude (given by the absolute value of their coefficient) that is greater than or equal to `min_term_mag`. - It calls :method:`taylor_order_terms` internally, so that all the terms at order `order` + It calls :meth:`taylor_order_terms` internally, so that all the terms at order `order` are typically cached for future calls. The coefficients of these terms are typically polynomials of the operation's @@ -567,7 +567,7 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): essentially converts the coefficient into a rate that is the contribution this term would have within a depolarizing channel where all stochastic generators had this same coefficient. - This is the value returned by :method:`error_rates`. + This is the value returned by :meth:`error_rates`. Returns ------- @@ -594,7 +594,7 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): def errorgen_coefficient_labels(self): """ - The elementary error-generator labels corresponding to the elements of :method:`errorgen_coefficients_array`. + The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. Returns ------- @@ -613,7 +613,7 @@ def errorgen_coefficients_array(self): """ The weighted coefficients of this operation's error generator in terms of "standard" error generators. - Constructs a 1D array of all the coefficients returned by :method:`errorgen_coefficients`, + Constructs a 1D array of all the coefficients returned by :meth:`errorgen_coefficients`, weighted so that different error generators can be weighted differently when a `errorgen_penalty_factor` is used in an objective function. @@ -627,7 +627,7 @@ def errorgen_coefficients_array(self): def errorgen_coefficients_array_deriv_wrt_params(self): """ - The jacobian of :method:`errogen_coefficients_array` with respect to this operation's parameters. + The jacobian of :meth:`errogen_coefficients_array` with respect to this operation's parameters. Returns ------- @@ -701,11 +701,11 @@ def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscal logscale_nonham : bool, optional Whether or not the values in `lindblad_term_dict` for non-hamiltonian error generators should be interpreted as error *rates* (of an - "equivalent" depolarizing channel, see :method:`errorgen_coefficients`) + "equivalent" depolarizing channel, see :meth:`errorgen_coefficients`) instead of raw coefficients. If True, then the non-hamiltonian coefficients are set to `-log(1 - d^2*rate)/d^2`, where `rate` is the corresponding value given in `lindblad_term_dict`. This is what is - performed by the function :method:`set_error_rates`. + performed by the function :meth:`set_error_rates`. truncate : bool, optional Whether to allow adjustment of the errogen coefficients in @@ -734,7 +734,7 @@ def set_error_rates(self, lindblad_term_dict, action="update"): Values are set so that the contributions of the resulting channel's error rate are given by the values in `lindblad_term_dict`. See - :method:`error_rates` for more details. + :meth:`error_rates` for more details. Parameters ---------- @@ -844,7 +844,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/operations/experrorgenop.py b/pygsti/modelmembers/operations/experrorgenop.py index 15e21c2c1..f260eccd4 100644 --- a/pygsti/modelmembers/operations/experrorgenop.py +++ b/pygsti/modelmembers/operations/experrorgenop.py @@ -436,7 +436,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ if order not in self.terms: self._compute_taylor_order_terms(order, max_polynomial_vars) @@ -492,7 +492,7 @@ def taylor_order_terms_above_mag(self, order, max_polynomial_vars, min_term_mag) This function constructs the terms at the given order which have a magnitude (given by the absolute value of their coefficient) that is greater than or equal to `min_term_mag`. - It calls :method:`taylor_order_terms` internally, so that all the terms at order `order` + It calls :meth:`taylor_order_terms` internally, so that all the terms at order `order` are typically cached for future calls. The coefficients of these terms are typically polynomials of the operation's diff --git a/pygsti/modelmembers/operations/identitypluserrorgenop.py b/pygsti/modelmembers/operations/identitypluserrorgenop.py index 1fb597793..c88395194 100644 --- a/pygsti/modelmembers/operations/identitypluserrorgenop.py +++ b/pygsti/modelmembers/operations/identitypluserrorgenop.py @@ -327,7 +327,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ if order not in self.terms: self._compute_taylor_order_terms(order, max_polynomial_vars) @@ -387,7 +387,7 @@ def taylor_order_terms_above_mag(self, order, max_polynomial_vars, min_term_mag) This function constructs the terms at the given order which have a magnitude (given by the absolute value of their coefficient) that is greater than or equal to `min_term_mag`. - It calls :method:`taylor_order_terms` internally, so that all the terms at order `order` + It calls :meth:`taylor_order_terms` internally, so that all the terms at order `order` are typically cached for future calls. The coefficients of these terms are typically polynomials of the operation's diff --git a/pygsti/modelmembers/operations/lindbladcoefficients.py b/pygsti/modelmembers/operations/lindbladcoefficients.py index 2e51f9bb7..25ebcaab2 100644 --- a/pygsti/modelmembers/operations/lindbladcoefficients.py +++ b/pygsti/modelmembers/operations/lindbladcoefficients.py @@ -772,7 +772,7 @@ def from_vector(self, v): Construct Lindblad coefficients (for this block) from a set of parameter values. This function essentially performs the inverse of - :method:`coefficients_to_paramvals`. + :meth:`coefficients_to_paramvals`. Parameters ---------- @@ -817,10 +817,10 @@ def from_vector(self, v): # cache_mx[i,i] = params[i,i] # cache_mx[i,j] = params[i,j] + 1j*params[j,i] (i > j) cache_mx = self._cache_mx + iparams = 1j * params for i in range(num_bels): cache_mx[i, i] = params[i, i] - for j in range(i): - cache_mx[i, j] = params[i, j] + 1j * params[j, i] + cache_mx[i, :i] = params[i, :i] + iparams[:i, i] #The matrix of (complex) "other"-coefficients is build by assuming # cache_mx is its Cholesky decomp; means otherCoeffs is pos-def. @@ -839,11 +839,11 @@ def from_vector(self, v): elif self._param_mode == "elements": # params mx stores block_data (hermitian) directly #params holds block_data real and imaginary parts directly + iparams = 1j * params for i in range(num_bels): self.block_data[i, i] = params[i, i] - for j in range(i): - self.block_data[i, j] = params[i, j] + 1j * params[j, i] - self.block_data[j, i] = params[i, j] - 1j * params[j, i] + self.block_data[i, :i] = params[i, :i] + iparams[:i, i] + self.block_data[:i, i] = params[i, :i] - iparams[:i, i] else: raise ValueError("Internal error: invalid parameter mode (%s) for block type %s!" % (self._param_mode, self._block_type)) @@ -856,7 +856,7 @@ def deriv_wrt_params(self, v=None): Construct derivative of Lindblad coefficients (for this block) from a set of parameter values. This function gives the Jacobian of what is returned by - :function:`paramvals_to_coefficients` (as a function of the parameters). + :func:`paramvals_to_coefficients` (as a function of the parameters). Parameters ---------- diff --git a/pygsti/modelmembers/operations/lindbladerrorgen.py b/pygsti/modelmembers/operations/lindbladerrorgen.py index 88656fc19..68097dd82 100644 --- a/pygsti/modelmembers/operations/lindbladerrorgen.py +++ b/pygsti/modelmembers/operations/lindbladerrorgen.py @@ -140,7 +140,7 @@ def from_operation_matrix(cls, op_matrix, parameterization='CPTP', lindblad_basi Here "operation" means the exponentiated error generator, so this method essentially takes the matrix log of `op_matrix` and constructs an error - generator from this using :method:`from_error_generator`. + generator from this using :meth:`from_error_generator`. Parameters ---------- @@ -851,7 +851,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ assert(self._rep_type == 'lindblad errorgen'), \ "Only evotypes with native Lindblad errorgen representations can utilize Taylor terms" @@ -1015,7 +1015,7 @@ def coefficients(self, return_basis=False, logscale_nonham=False): essentially converts the coefficient into a rate that is the contribution this term would have within a depolarizing channel where all stochastic generators had this same coefficient. - This is the value returned by :method:`error_rates`. + This is the value returned by :meth:`error_rates`. Returns ------- @@ -1062,7 +1062,7 @@ def coefficients(self, return_basis=False, logscale_nonham=False): def coefficient_labels(self): """ - The elementary error-generator labels corresponding to the elements of :method:`coefficients_array`. + The elementary error-generator labels corresponding to the elements of :meth:`coefficients_array`. Returns ------- @@ -1085,7 +1085,7 @@ def coefficients_array(self): """ The weighted coefficients of this error generator in terms of "standard" error generators. - Constructs a 1D array of all the coefficients returned by :method:`coefficients`, + Constructs a 1D array of all the coefficients returned by :meth:`coefficients`, weighted so that different error generators can be weighted differently when a `errorgen_penalty_factor` is used in an objective function. @@ -1104,7 +1104,7 @@ def coefficients_array(self): def coefficients_array_deriv_wrt_params(self): """ - The jacobian of :method:`coefficients_array` with respect to this error generator's parameters. + The jacobian of :meth:`coefficients_array` with respect to this error generator's parameters. Returns ------- @@ -1194,11 +1194,11 @@ def set_coefficients(self, elementary_errorgens, action="update", logscale_nonha logscale_nonham : bool, optional Whether or not the values in `lindblad_term_dict` for non-hamiltonian error generators should be interpreted as error *rates* (of an - "equivalent" depolarizing channel, see :method:`errorgen_coefficients`) + "equivalent" depolarizing channel, see :meth:`errorgen_coefficients`) instead of raw coefficients. If True, then the non-hamiltonian coefficients are set to `-log(1 - d^2*rate)/d^2`, where `rate` is the corresponding value given in `lindblad_term_dict`. This is what is - performed by the function :method:`set_error_rates`. + performed by the function :meth:`set_error_rates`. truncate : bool, optional Whether to truncate the projections onto the Lindblad terms in @@ -1263,7 +1263,7 @@ def set_error_rates(self, elementary_errorgens, action="update"): TODO: update docstring Coefficients are set so that the contributions of the resulting channel's error rate are given by the values in `lindblad_term_dict`. - See :method:`error_rates` for more details. + See :meth:`error_rates` for more details. Parameters ---------- @@ -1535,7 +1535,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/operations/linearop.py b/pygsti/modelmembers/operations/linearop.py index 580b2e381..b464498a0 100644 --- a/pygsti/modelmembers/operations/linearop.py +++ b/pygsti/modelmembers/operations/linearop.py @@ -238,7 +238,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ raise NotImplementedError("taylor_order_terms(...) not implemented for %s objects!" % self.__class__.__name__) @@ -363,7 +363,7 @@ def taylor_order_terms_above_mag(self, order, max_polynomial_vars, min_term_mag) This function constructs the terms at the given order which have a magnitude (given by the absolute value of their coefficient) that is greater than or equal to `min_term_mag`. - It calls :method:`taylor_order_terms` internally, so that all the terms at order `order` + It calls :meth:`taylor_order_terms` internally, so that all the terms at order `order` are typically cached for future calls. The coefficients of these terms are typically polynomials of the operation's diff --git a/pygsti/modelmembers/operations/lpdenseop.py b/pygsti/modelmembers/operations/lpdenseop.py index ce8ef60ec..4a375affd 100644 --- a/pygsti/modelmembers/operations/lpdenseop.py +++ b/pygsti/modelmembers/operations/lpdenseop.py @@ -65,7 +65,7 @@ class LinearlyParamArbitraryOp(_DenseOperator): parameter_array : numpy array a 1D numpy array that holds the all the parameters for this operation. The shape of this array sets is what is returned by - value_dimension(...). + `value_dimension(...)`. parameter_to_base_indices_map : dict A dictionary with keys == index of a parameter @@ -183,7 +183,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ param_to_base_indices_map = self._construct_param_to_base_indices_map() diff --git a/pygsti/modelmembers/operations/opfactory.py b/pygsti/modelmembers/operations/opfactory.py index 4c3a203af..c79dfc0e8 100644 --- a/pygsti/modelmembers/operations/opfactory.py +++ b/pygsti/modelmembers/operations/opfactory.py @@ -33,11 +33,11 @@ def op_from_factories(factory_dict, lbl): If the label has arguments, then this function looks for an operator factory associated with the label without its arguments. If one exists, the operator is created by calling - :method:`OpFactory.create_simplified_op`. with the label's + :meth:`OpFactory.create_simplified_op`. with the label's arguments. Otherwise, it looks for a factory associated with the label's name (`lbl.name`) and passes both the labe's state-space-labels and arguments (if any) to - :method:`OpFactory.create_simplified_op`. + :meth:`OpFactory.create_simplified_op`. Raises a `KeyError` if a matching factory cannot be found. @@ -106,7 +106,7 @@ def create_object(self, args=None, sslbls=None): Create the object that implements the operation associated with the given `args` and `sslbls`. **Note to developers** - The difference beween this method and :method:`create_op` is that + The difference beween this method and :meth:`create_op` is that this method just creates the foundational object without needing to setup its parameter indices (a technical detail which connects the created object with the originating factory's parameters). The @@ -171,7 +171,7 @@ def create_simplified_op(self, args=None, sslbls=None, item_lbl=None): """ Create the *simplified* operation associated with the given `args`, `sslbls`, and `item_lbl`. - Similar to as :method:`create_op`, but returns a *simplified* operation + Similar to as :meth:`create_op`, but returns a *simplified* operation (i.e. not a POVM or Instrument). In addition, the `item_lbl` argument must be used for POVMs and Instruments, as these need to know which (simple) member of themselves to return (this machinery still needs @@ -290,7 +290,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) # includes 'dense_matrix' from DenseOperator @@ -403,7 +403,7 @@ class EmbeddingOpFactory(OpFactory): This is similar to an `EmbeddedOpFactory` except in this case how the "contained" operation/factory is embedded is *not* determined at creation - time: the `sslbls` argument of :method:`create_op` is used instead. + time: the `sslbls` argument of :meth:`create_op` is used instead. Parameters ---------- @@ -456,7 +456,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) # includes 'dense_matrix' from DenseOperator @@ -504,7 +504,10 @@ def create_op(self, args=None, sslbls=None): raise ValueError("Not allowed to embed onto sslbls=" + str(sslbls)) if self.embeds_factory: - op = self.embedded_factory_or_op.create_op(args, None) # Note: will have its gpindices set already + #Even though the produced op is (or should be) just a local op on `sslbls` (self.embedded_factory_or_op + # should not return an op embedded on `sslbls`) it may still depend on `sslbls` and so is passed to + # create_op call here. + op = self.embedded_factory_or_op.create_op(args, sslbls) # Note: will have its gpindices set already else: op = self.embedded_factory_or_op embedded_op = _EmbeddedOp(self.state_space, sslbls, op) @@ -740,7 +743,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) # includes 'dense_matrix' from DenseOperator @@ -820,7 +823,8 @@ def create_object(self, args=None, sslbls=None): Can be any type of operation, e.g. a LinearOperator, State, Instrument, or POVM, depending on the label requested. """ - assert(sslbls is None), "UnitaryOpFactory.create_object must be called with `sslbls=None`!" + # Note: sslbls is unused, and may be None or non-None depending on the context this UnitaryOpFactory is + # used in (e.g. it will be none when used with an EmbeddedOpFactory but not with an EmbeddingOpFactory). U = self.fn(args) # Expanded call to _bt.change_basis(_ot.unitary_to_std_process_mx(U), 'std', self.basis) for speed @@ -846,7 +850,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) # includes 'dense_matrix' from DenseOperator diff --git a/pygsti/modelmembers/operations/repeatedop.py b/pygsti/modelmembers/operations/repeatedop.py index e6696ec43..f5c21deed 100644 --- a/pygsti/modelmembers/operations/repeatedop.py +++ b/pygsti/modelmembers/operations/repeatedop.py @@ -19,7 +19,7 @@ class RepeatedOp(_LinearOperator): """ - An operation map that is the composition of a number of map-like factors (possibly other `LinearOperator`s) + An operation map that is the composition of a number of map-like factors (possibly other `LinearOperator`) Parameters ---------- @@ -249,7 +249,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/operations/staticcliffordop.py b/pygsti/modelmembers/operations/staticcliffordop.py index 7feffc91b..b24ab4bb8 100644 --- a/pygsti/modelmembers/operations/staticcliffordop.py +++ b/pygsti/modelmembers/operations/staticcliffordop.py @@ -115,7 +115,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ #Same as unitary op -- assume this op acts as a single unitary term -- consolidate in FUTURE? if order == 0: # only 0-th order term exists @@ -202,7 +202,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ U = self.unitary.to_dense() if isinstance(self.unitary, _LinearOperator) else self.unitary # as in __init__ diff --git a/pygsti/modelmembers/operations/staticstdop.py b/pygsti/modelmembers/operations/staticstdop.py index 73bd7b54a..6a7154138 100644 --- a/pygsti/modelmembers/operations/staticstdop.py +++ b/pygsti/modelmembers/operations/staticstdop.py @@ -116,7 +116,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ #Same as unitary op -- assume this op acts as a single unitary term -- consolidate in FUTURE? if order == 0: # only 0-th order term exists @@ -182,7 +182,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/operations/staticunitaryop.py b/pygsti/modelmembers/operations/staticunitaryop.py index 3892e563b..d3e487a15 100644 --- a/pygsti/modelmembers/operations/staticunitaryop.py +++ b/pygsti/modelmembers/operations/staticunitaryop.py @@ -82,7 +82,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ if order == 0: # only 0-th order term exists coeff = _Polynomial({(): 1.0}, max_polynomial_vars) diff --git a/pygsti/modelmembers/operations/stochasticop.py b/pygsti/modelmembers/operations/stochasticop.py index 436e1b68d..52bec317c 100644 --- a/pygsti/modelmembers/operations/stochasticop.py +++ b/pygsti/modelmembers/operations/stochasticop.py @@ -206,7 +206,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ def _compose_poly_indices(terms): @@ -325,7 +325,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/povms/__init__.py b/pygsti/modelmembers/povms/__init__.py index 43fa33114..e946abd51 100644 --- a/pygsti/modelmembers/povms/__init__.py +++ b/pygsti/modelmembers/povms/__init__.py @@ -310,7 +310,7 @@ def convert(povm, to_type, basis, ideal_povm=None, flatten_structure=False): to_type : {"full","full TP","static","static pure","H+S terms", "H+S clifford terms","clifford"} The type of parameterizaton to convert to. See - :method:`Model.set_all_parameterizations` for more details. + :meth:`Model.set_all_parameterizations` for more details. TODO docstring: update the options here. basis : {'std', 'gm', 'pp', 'qt'} or Basis object @@ -425,7 +425,7 @@ def convert_effect(effect, to_type, basis, ideal_effect=None, flatten_structure= to_type : {"full","TP","static","static pure","clifford",LINDBLAD} The type of parameterizaton to convert to. "LINDBLAD" is a placeholder for the various Lindblad parameterization types. See - :method:`Model.set_all_parameterizations` for more details. + :meth:`Model.set_all_parameterizations` for more details. basis : {'std', 'gm', 'pp', 'qt'} or Basis object The basis for `spamvec`. Allowed values are Matrix-unit (std), diff --git a/pygsti/modelmembers/povms/basepovm.py b/pygsti/modelmembers/povms/basepovm.py index 859d2e158..4e4bd0ced 100644 --- a/pygsti/modelmembers/povms/basepovm.py +++ b/pygsti/modelmembers/povms/basepovm.py @@ -75,7 +75,10 @@ def __init__(self, effects, evotype=None, state_space=None, preserve_sum=False, paramlbls = [] for k, v in items: if k == self.complement_label: continue - if isinstance(v, _POVMEffect): + if called_from_reduce: # __reduce__ should always initialize w/POVMEffects except for ... + assert isinstance(v, _POVMEffect) # complement (which hits continue above) + effect = v # don't copy as we want to preserve the gpindices in effects + elif isinstance(v, _POVMEffect): effect = v if (not preserve_sum) else v.copy() # .copy() just to de-allocate parameters else: assert(evotype is not None), "Must specify `evotype` when effect vectors are not POVMEffect objects!" @@ -108,7 +111,7 @@ def __init__(self, effects, evotype=None, state_space=None, preserve_sum=False, identity_for_complement = _np.array(sum([v.to_dense().reshape(comp_val.shape) for v in non_comp_effects]) + comp_val, 'd') # ensure shapes match before summing complement_effect = _ComplementPOVMEffect( - identity_for_complement, non_comp_effects) + identity_for_complement, non_comp_effects, called_from_reduce) items.append((self.complement_label, complement_effect)) super(_BasePOVM, self).__init__(state_space, evotype, None, items) @@ -148,7 +151,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/povms/complementeffect.py b/pygsti/modelmembers/povms/complementeffect.py index 4ca129222..908972148 100644 --- a/pygsti/modelmembers/povms/complementeffect.py +++ b/pygsti/modelmembers/povms/complementeffect.py @@ -43,7 +43,7 @@ class ComplementPOVMEffect(_ConjugatedStatePOVMEffect): "complement" POVM effect vector. """ - def __init__(self, identity, other_effects): + def __init__(self, identity, other_effects, called_from_reduce=False): evotype = other_effects[0]._evotype state_space = other_effects[0].state_space @@ -62,8 +62,11 @@ def __init__(self, identity, other_effects): # 2) set the gpindices of the elements of other_spamvecs so # that they index into our local parameter vector. - _ConjugatedStatePOVMEffect.__init__(self, self.identity.copy()) - self.init_gpindices() # initialize our gpindices based on sub-members + _ConjugatedStatePOVMEffect.__init__(self, self.identity.copy(), called_from_reduce) + if not called_from_reduce: + self.init_gpindices() # initialize our gpindices based on sub-members + else: + self.allocate_gpindices(10000, None, submembers_already_allocated=True) self._construct_vector() # reset's self.base def _construct_vector(self): @@ -89,7 +92,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/povms/composedeffect.py b/pygsti/modelmembers/povms/composedeffect.py index 1b5a68ee8..845085bad 100644 --- a/pygsti/modelmembers/povms/composedeffect.py +++ b/pygsti/modelmembers/povms/composedeffect.py @@ -32,7 +32,7 @@ class ComposedPOVMEffect(_POVMEffect): # , _ErrorMapContainer the "base" preparation or projection that is followed or preceded by, respectively, the parameterized Lindblad-form error generator. (This argument is *not* copied if it is a POVMEffect. A numpy array - is converted to a new static POVM effect.) + is converted to a new static POVM effect.) errormap : MapOperator The error generator action and parameterization, encapsulated in @@ -497,7 +497,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ if order not in self.terms: assert(self.gpindices is not None), "ComposedPOVMEffect must be added to a Model before use!" @@ -530,7 +530,7 @@ def taylor_order_terms_above_mag(self, order, max_polynomial_vars, min_term_mag) This function constructs the terms at the given order which have a magnitude (given by the absolute value of their coefficient) that is greater than or equal to `min_term_mag`. - It calls :method:`taylor_order_terms` internally, so that all the terms at order `order` + It calls :meth:`taylor_order_terms` internally, so that all the terms at order `order` are typically cached for future calls. Parameters diff --git a/pygsti/modelmembers/povms/composedpovm.py b/pygsti/modelmembers/povms/composedpovm.py index 16b9c48ec..b23722007 100644 --- a/pygsti/modelmembers/povms/composedpovm.py +++ b/pygsti/modelmembers/povms/composedpovm.py @@ -126,7 +126,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) @@ -372,7 +372,7 @@ def __str__(self): def errorgen_coefficient_labels(self): """ - The elementary error-generator labels corresponding to the elements of :method:`errorgen_coefficients_array`. + The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. Returns ------- @@ -386,7 +386,7 @@ def errorgen_coefficients_array(self): """ The weighted coefficients of this POVM's error generator in terms of "standard" error generators. - Constructs a 1D array of all the coefficients returned by :method:`errorgen_coefficients`, + Constructs a 1D array of all the coefficients returned by :meth:`errorgen_coefficients`, weighted so that different error generators can be weighted differently when a `errorgen_penalty_factor` is used in an objective function. @@ -419,7 +419,7 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): essentially converts the coefficient into a rate that is the contribution this term would have within a depolarizing channel where all stochastic generators had this same coefficient. - This is the value returned by :method:`error_rates`. + This is the value returned by :meth:`error_rates`. Returns ------- @@ -464,11 +464,11 @@ def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscal logscale_nonham : bool, optional Whether or not the values in `lindblad_term_dict` for non-hamiltonian error generators should be interpreted as error *rates* (of an - "equivalent" depolarizing channel, see :method:`errorgen_coefficients`) + "equivalent" depolarizing channel, see :meth:`errorgen_coefficients`) instead of raw coefficients. If True, then the non-hamiltonian coefficients are set to `-log(1 - d^2*rate)/d^2`, where `rate` is the corresponding value given in `lindblad_term_dict`. This is what is - performed by the function :method:`set_error_rates`. + performed by the function :meth:`set_error_rates`. truncate : bool, optional Whether to allow adjustment of the errogen coefficients in @@ -485,7 +485,7 @@ def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscal def errorgen_coefficients_array_deriv_wrt_params(self): """ - The jacobian of :method:`errogen_coefficients_array` with respect to this POVM's parameters. + The jacobian of :meth:`errogen_coefficients_array` with respect to this POVM's parameters. Returns ------- diff --git a/pygsti/modelmembers/povms/computationaleffect.py b/pygsti/modelmembers/povms/computationaleffect.py index 4bdc41fbe..ea727525d 100644 --- a/pygsti/modelmembers/povms/computationaleffect.py +++ b/pygsti/modelmembers/povms/computationaleffect.py @@ -171,7 +171,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) @@ -252,7 +252,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ if order == 0: # only 0-th order term exists coeff = _Polynomial({(): 1.0}, max_polynomial_vars) diff --git a/pygsti/modelmembers/povms/computationalpovm.py b/pygsti/modelmembers/povms/computationalpovm.py index 961f91dad..98ab4991b 100644 --- a/pygsti/modelmembers/povms/computationalpovm.py +++ b/pygsti/modelmembers/povms/computationalpovm.py @@ -183,7 +183,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/povms/conjugatedeffect.py b/pygsti/modelmembers/povms/conjugatedeffect.py index 9ffacb76d..5af305a44 100644 --- a/pygsti/modelmembers/povms/conjugatedeffect.py +++ b/pygsti/modelmembers/povms/conjugatedeffect.py @@ -139,12 +139,15 @@ class ConjugatedStatePOVMEffect(DenseEffectInterface, _POVMEffect): i.e, a (dim,1)-shaped array. """ - def __init__(self, state): + def __init__(self, state, called_from_reduce=False): self.state = state evotype = state._evotype rep = evotype.create_conjugatedstate_effect_rep(state._rep) _POVMEffect.__init__(self, rep, evotype) - self.init_gpindices() # initialize our gpindices based on sub-members + if not called_from_reduce: + self.init_gpindices() # initialize our gpindices based on sub-members + else: + self.allocate_gpindices(10000, None, submembers_already_allocated=True) @property def _basis(self): @@ -355,7 +358,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ ret = self.state.taylor_order_terms(order, max_polynomial_vars, return_coeff_polys) state_terms = ret[0] if return_coeff_polys else ret diff --git a/pygsti/modelmembers/povms/effect.py b/pygsti/modelmembers/povms/effect.py index 3d09103d5..d583cdc2e 100644 --- a/pygsti/modelmembers/povms/effect.py +++ b/pygsti/modelmembers/povms/effect.py @@ -359,7 +359,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ #NOTE: exact copy of State method - consolidate in FUTURE? raise NotImplementedError("taylor_order_terms(...) not implemented for %s objects!" % @@ -466,7 +466,7 @@ def taylor_order_terms_above_mag(self, order, max_polynomial_vars, min_term_mag) This function constructs the terms at the given order which have a magnitude (given by the absolute value of their coefficient) that is greater than or equal to `min_term_mag`. - It calls :method:`taylor_order_terms` internally, so that all the terms at order `order` + It calls :meth:`taylor_order_terms` internally, so that all the terms at order `order` are typically cached for future calls. Parameters diff --git a/pygsti/modelmembers/povms/marginalizedpovm.py b/pygsti/modelmembers/povms/marginalizedpovm.py index 484f01b59..f6c22f81d 100644 --- a/pygsti/modelmembers/povms/marginalizedpovm.py +++ b/pygsti/modelmembers/povms/marginalizedpovm.py @@ -100,7 +100,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/povms/tensorprodeffect.py b/pygsti/modelmembers/povms/tensorprodeffect.py index afa765753..739da221e 100644 --- a/pygsti/modelmembers/povms/tensorprodeffect.py +++ b/pygsti/modelmembers/povms/tensorprodeffect.py @@ -84,7 +84,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) @@ -174,7 +174,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ terms = [] fnq = [int(round(_np.log2(f.dim))) // 2 for f in self.factors] # num of qubits per factor @@ -337,7 +337,7 @@ def deriv_wrt_params(self, wrt_filter=None): #HACK to deal with fact that output of to_dense is really what is differentiated # but this may not match self.dim == self.state_space.dim, e.g. for pure state vecs. dims = [len(fct.to_dense(on_space='minimal')) for fct in self.factors] - dim = int(_np.product(dims)) + dim = int(_np.prod(dims)) derivMx = _np.zeros((dim, self.num_params), typ) diff --git a/pygsti/modelmembers/povms/tensorprodpovm.py b/pygsti/modelmembers/povms/tensorprodpovm.py index 7750c5874..20c008e52 100644 --- a/pygsti/modelmembers/povms/tensorprodpovm.py +++ b/pygsti/modelmembers/povms/tensorprodpovm.py @@ -43,7 +43,7 @@ class TensorProductPOVM(_POVM): """ def __init__(self, factor_povms, evotype="auto", state_space=None): - dim = _np.product([povm.state_space.dim for povm in factor_povms]) + dim = _np.prod([povm.state_space.dim for povm in factor_povms]) if state_space is None: state_space = _statespace.default_space_for_dim(dim) else: @@ -102,7 +102,7 @@ def __iter__(self): return self.keys() def __len__(self): - return _np.product([len(fk) for fk in self._factor_keys]) + return _np.prod([len(fk) for fk in self._factor_keys]) def keys(self): """ diff --git a/pygsti/modelmembers/states/__init__.py b/pygsti/modelmembers/states/__init__.py index e77c9defd..c691387ff 100644 --- a/pygsti/modelmembers/states/__init__.py +++ b/pygsti/modelmembers/states/__init__.py @@ -203,7 +203,7 @@ def convert(state, to_type, basis, ideal_state=None, flatten_structure=False): to_type : {"full","full TP","static","static unitary","clifford",LINDBLAD} The type of parameterizaton to convert to. "LINDBLAD" is a placeholder for the various Lindblad parameterization types. See - :method:`Model.set_all_parameterizations` for more details. + :meth:`Model.set_all_parameterizations` for more details. basis : {'std', 'gm', 'pp', 'qt'} or Basis object The basis for `state`. Allowed values are Matrix-unit (std), diff --git a/pygsti/modelmembers/states/composedstate.py b/pygsti/modelmembers/states/composedstate.py index ee4f7c9ee..03b555b4f 100644 --- a/pygsti/modelmembers/states/composedstate.py +++ b/pygsti/modelmembers/states/composedstate.py @@ -33,7 +33,7 @@ class ComposedState(_State): # , _ErrorMapContainer the "base" preparation or projection that is followed or preceded by, respectively, the parameterized Lindblad-form error generator. (This argument is *not* copied if it is a State. A numpy array - is converted to a new StaticState.) + is converted to a new StaticState.) errormap : MapOperator The error generator action and parameterization, encapsulated in @@ -494,7 +494,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ if order not in self.terms: assert(self.gpindices is not None), "ComposedSstate must be added to a Model before use!" @@ -519,7 +519,7 @@ def taylor_order_terms_above_mag(self, order, max_polynomial_vars, min_term_mag) This function constructs the terms at the given order which have a magnitude (given by the absolute value of their coefficient) that is greater than or equal to `min_term_mag`. - It calls :method:`taylor_order_terms` internally, so that all the terms at order `order` + It calls :meth:`taylor_order_terms` internally, so that all the terms at order `order` are typically cached for future calls. Parameters @@ -765,7 +765,7 @@ def depolarize(self, amount): def errorgen_coefficient_labels(self): """ - The elementary error-generator labels corresponding to the elements of :method:`errorgen_coefficients_array`. + The elementary error-generator labels corresponding to the elements of :meth:`errorgen_coefficients_array`. Returns ------- @@ -779,7 +779,7 @@ def errorgen_coefficients_array(self): """ The weighted coefficients of this state prep's error generator in terms of "standard" error generators. - Constructs a 1D array of all the coefficients returned by :method:`errorgen_coefficients`, + Constructs a 1D array of all the coefficients returned by :meth:`errorgen_coefficients`, weighted so that different error generators can be weighted differently when a `errorgen_penalty_factor` is used in an objective function. @@ -812,7 +812,7 @@ def errorgen_coefficients(self, return_basis=False, logscale_nonham=False): essentially converts the coefficient into a rate that is the contribution this term would have within a depolarizing channel where all stochastic generators had this same coefficient. - This is the value returned by :method:`error_rates`. + This is the value returned by :meth:`error_rates`. Returns ------- @@ -857,11 +857,11 @@ def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscal logscale_nonham : bool, optional Whether or not the values in `lindblad_term_dict` for non-hamiltonian error generators should be interpreted as error *rates* (of an - "equivalent" depolarizing channel, see :method:`errorgen_coefficients`) + "equivalent" depolarizing channel, see :meth:`errorgen_coefficients`) instead of raw coefficients. If True, then the non-hamiltonian coefficients are set to `-log(1 - d^2*rate)/d^2`, where `rate` is the corresponding value given in `lindblad_term_dict`. This is what is - performed by the function :method:`set_error_rates`. + performed by the function :meth:`set_error_rates`. truncate : bool, optional Whether to allow adjustment of the errogen coefficients in @@ -879,7 +879,7 @@ def set_errorgen_coefficients(self, lindblad_term_dict, action="update", logscal def errorgen_coefficients_array_deriv_wrt_params(self): """ - The jacobian of :method:`errogen_coefficients_array` with respect to this state's parameters. + The jacobian of :meth:`errogen_coefficients_array` with respect to this state's parameters. Returns ------- diff --git a/pygsti/modelmembers/states/computationalstate.py b/pygsti/modelmembers/states/computationalstate.py index 6e2e43a47..9e57cd956 100644 --- a/pygsti/modelmembers/states/computationalstate.py +++ b/pygsti/modelmembers/states/computationalstate.py @@ -226,7 +226,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ if order == 0: # only 0-th order term exists coeff = _Polynomial({(): 1.0}, max_polynomial_vars) @@ -309,7 +309,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) diff --git a/pygsti/modelmembers/states/cptpstate.py b/pygsti/modelmembers/states/cptpstate.py index 74cb8dfc3..68fcf43cd 100644 --- a/pygsti/modelmembers/states/cptpstate.py +++ b/pygsti/modelmembers/states/cptpstate.py @@ -103,7 +103,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) # contains 'dense_state_vector' via DenseState base class diff --git a/pygsti/modelmembers/states/densestate.py b/pygsti/modelmembers/states/densestate.py index ca373f10b..3ece61628 100644 --- a/pygsti/modelmembers/states/densestate.py +++ b/pygsti/modelmembers/states/densestate.py @@ -45,7 +45,7 @@ def to_array(self): Return the array used to identify this state within its range of possible values. For instance, if the state is a pure state, this returns a complex pure-state - vector regardless of the evolution type. The related :method:`to_dense` + vector regardless of the evolution type. The related :meth:`to_dense` method, in contrast, returns the dense representation of the state, which varies by evolution type. @@ -181,8 +181,10 @@ def _ptr(self): return self._rep.base def _ptr_has_changed(self): - """ Derived classes should override this function to handle rep updates - when the `_ptr` property is changed. """ + """ + Derived classes should override this function to handle rep updates + when the `_ptr` property is changed. + """ self._rep.base_has_changed() def to_dense(self, on_space='minimal', scratch=None): @@ -210,7 +212,8 @@ def to_dense(self, on_space='minimal', scratch=None): return self._rep.to_dense(on_space) # both types of possible state reps implement 'to_dense' def to_memoized_dict(self, mmg_memo): - """Create a serializable dict with references to other objects in the memo. + """ + Create a serializable dict with references to other objects in the memo. Parameters ---------- @@ -224,7 +227,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) @@ -242,8 +245,11 @@ def _from_memoized_dict(cls, mm_dict, serial_memo): return cls(vec, basis, mm_dict['evotype'], state_space) def _is_similar(self, other, rtol, atol): - """ Returns True if `other` model member (which it guaranteed to be the same type as self) has - the same local structure, i.e., not considering parameter values or submembers """ + """ + Returns True if `other` model member (which it guaranteed to be the same type as self) has + the same local structure, i.e., not considering parameter values or submembers. + """ + return self._ptr.shape == other._ptr.shape # similar (up to params) if have same data shape @@ -281,12 +287,16 @@ def __init__(self, purevec, basis, evotype, state_space): @property def _ptr(self): - """Gives a handle/pointer to the base numpy array that this object can be accessed as""" + """ + Gives a handle/pointer to the base numpy array that this object can be accessed as. + """ return self._rep.base if self._reptype == 'pure' else self._purevec def _ptr_has_changed(self): - """ Derived classes should override this function to handle rep updates - when the `_ptr` property is changed. """ + """ + Derived classes should override this function to handle rep updates + when the `_ptr` property is changed. + """ if self._reptype == 'superket': self._rep.base[:] = _bt.change_basis(_ot.state_to_dmvec(self._purevec), 'std', self._basis) self._rep.base_has_changed() @@ -318,7 +328,8 @@ def to_dense(self, on_space='minimal', scratch=None): return self._rep.to_dense(on_space) # both types of possible state reps implement 'to_dense' def to_memoized_dict(self, mmg_memo): - """Create a serializable dict with references to other objects in the memo. + """ + Create a serializable dict with references to other objects in the memo. Parameters ---------- @@ -332,7 +343,7 @@ def to_memoized_dict(self, mmg_memo): mm_dict: dict A dict representation of this ModelMember ready for serialization This must have at least the following fields: - module, class, submembers, params, state_space, evotype + module, class, submembers, params, state_space, evotype Additional fields may be added by derived classes. """ mm_dict = super().to_memoized_dict(mmg_memo) @@ -350,6 +361,8 @@ def _from_memoized_dict(cls, mm_dict, serial_memo): return cls(vec, basis, mm_dict['evotype'], state_space) def _is_similar(self, other, rtol, atol): - """ Returns True if `other` model member (which it guaranteed to be the same type as self) has - the same local structure, i.e., not considering parameter values or submembers """ + """ + Returns True if `other` model member (which it guaranteed to be the same type as self) has + the same local structure, i.e., not considering parameter values or submembers + """ return self._ptr.shape == other._ptr.shape # similar (up to params) if have same data shape diff --git a/pygsti/modelmembers/states/purestate.py b/pygsti/modelmembers/states/purestate.py index 627401192..17e8ed4d8 100644 --- a/pygsti/modelmembers/states/purestate.py +++ b/pygsti/modelmembers/states/purestate.py @@ -16,7 +16,7 @@ from pygsti.modelmembers.states.state import State as _State from pygsti.modelmembers.states.staticstate import StaticState as _StaticState from pygsti.modelmembers import term as _term -from pygsti.evotype import Evotype as _Evotype +from pygsti.evotypes import Evotype as _Evotype from pygsti.baseobjs.polynomial import Polynomial as _Polynomial from pygsti.tools import basistools as _bt from pygsti.tools import optools as _ot @@ -149,7 +149,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ if self.num_params > 0: raise ValueError(("EmbeddedPureState.taylor_order_terms(...) is only " diff --git a/pygsti/modelmembers/states/state.py b/pygsti/modelmembers/states/state.py index 39f9ede9a..b3ed55cf9 100644 --- a/pygsti/modelmembers/states/state.py +++ b/pygsti/modelmembers/states/state.py @@ -166,7 +166,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ raise NotImplementedError("taylor_order_terms(...) not implemented for %s objects!" % self.__class__.__name__) @@ -272,7 +272,7 @@ def taylor_order_terms_above_mag(self, order, max_polynomial_vars, min_term_mag) This function constructs the terms at the given order which have a magnitude (given by the absolute value of their coefficient) that is greater than or equal to `min_term_mag`. - It calls :method:`taylor_order_terms` internally, so that all the terms at order `order` + It calls :meth:`taylor_order_terms` internally, so that all the terms at order `order` are typically cached for future calls. Parameters diff --git a/pygsti/modelmembers/states/staticpurestate.py b/pygsti/modelmembers/states/staticpurestate.py index 164a71812..02f2bce99 100644 --- a/pygsti/modelmembers/states/staticpurestate.py +++ b/pygsti/modelmembers/states/staticpurestate.py @@ -81,7 +81,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ if order == 0: # only 0-th order term exists (assumes static pure_state_vec) coeff = _Polynomial({(): 1.0}, max_polynomial_vars) diff --git a/pygsti/modelmembers/states/tensorprodstate.py b/pygsti/modelmembers/states/tensorprodstate.py index 8583e4f15..449f8cce7 100644 --- a/pygsti/modelmembers/states/tensorprodstate.py +++ b/pygsti/modelmembers/states/tensorprodstate.py @@ -140,7 +140,7 @@ def taylor_order_terms(self, order, max_polynomial_vars=100, return_coeff_polys= Only present when `return_coeff_polys == True`. A list of *compact* polynomial objects, meaning that each element is a `(vtape,ctape)` 2-tuple formed by concatenating together the - output of :method:`Polynomial.compact`. + output of :meth:`Polynomial.compact`. """ terms = [] fnq = [int(round(_np.log2(f.dim))) // 2 for f in self.factors] # num of qubits per factor @@ -285,7 +285,7 @@ def deriv_wrt_params(self, wrt_filter=None): #HACK to deal with fact that output of to_dense is really what is differentiated # but this may not match self.dim == self.state_space.dim, e.g. for pure state vecs. dims = [len(fct.to_dense(on_space='minimal')) for fct in self.factors] - dim = int(_np.product(dims)) + dim = int(_np.prod(dims)) derivMx = _np.zeros((dim, self.num_params), typ) diff --git a/pygsti/modelmembers/states/tpstate.py b/pygsti/modelmembers/states/tpstate.py index 3ee59419e..b9720c11b 100644 --- a/pygsti/modelmembers/states/tpstate.py +++ b/pygsti/modelmembers/states/tpstate.py @@ -25,7 +25,7 @@ class TPState(_DenseState): A fixed-unit-trace state vector. This state vector is fully parameterized except for the first element, which - is frozen to be 1/(d**0.25). This is so that, when the state vector is + is frozen to be `1/(d**0.25)`. This is so that, when the state vector is interpreted in the Pauli or Gell-Mann basis, the represented density matrix has trace == 1. This restriction is frequently used in conjuction with trace-preserving (TP) gates, hence its name. diff --git a/pygsti/modelmembers/term.py b/pygsti/modelmembers/term.py index 8c3e16232..b0ca406a6 100644 --- a/pygsti/modelmembers/term.py +++ b/pygsti/modelmembers/term.py @@ -27,7 +27,7 @@ def compose_terms_with_mag(terms, magnitude): Parameters ---------- terms : sequence - A sequence of :class:`RankOneTerm`s. + A sequence of :class:`RankOneTerm` objects. magnitude : float The magnitude of the compsed term. @@ -56,7 +56,7 @@ def compose_terms(terms): Parameters ---------- terms : list - A list of :class:`RankOneTerm`s to compose. + A list of :class:`RankOneTerm` objects to compose. Returns ------- @@ -361,7 +361,7 @@ class RankOnePrepTerm(RankOneTerm, _NoMagnitude): @classmethod def create_from(cls, coeff, pre_state, post_state, evotype, state_space): """ - Creates a :class:`RankOnePrepTerm`s using natural arguments. + Creates a :class:`RankOnePrepTerm` using natural arguments. Parameters ---------- @@ -436,7 +436,7 @@ class RankOneEffectTerm(RankOneTerm, _NoMagnitude): @classmethod def create_from(cls, coeff, pre_effect, post_effect, evotype, state_space): """ - Creates a :class:`RankOneEffectTerm`s using natural arguments. + Creates a :class:`RankOneEffectTerm` using natural arguments. Parameters ---------- @@ -511,7 +511,7 @@ class RankOneOpTerm(RankOneTerm, _NoMagnitude): @classmethod def create_from(cls, coeff, pre_op, post_op, evotype, state_space): """ - Creates a :class:`RankOneOpTerm`s using natural arguments. + Creates a :class:`RankOneOpTerm` using natural arguments. Parameters ---------- @@ -699,7 +699,7 @@ def coeff(self): class _HasPolynomialCoefficient(object): """ - A base class for terms that have polynomial coefficients.. + A base class for terms that have polynomial coefficients. Attributes ---------- @@ -749,7 +749,7 @@ def mapvec_indices_inplace(self, mapvec): """ Performs a bulk find & replace on this polynomial's variable indices. - This function is similar to :method:`map_indices` but uses a *vector* + This function is similar to :meth:`map_indices` but uses a *vector* to describe *individual* index updates instead of a function for increased performance. @@ -761,7 +761,7 @@ def mapvec_indices_inplace(self, mapvec): mapvec : numpy.ndarray An array whose i-th element gives the updated "new" index for the i-th variable. Note that this vector maps *individual* - variable indices old->new, whereas `mapfn` in :method:`map_indices` + variable indices old->new, whereas `mapfn` in :meth:`map_indices` maps between *tuples* of indices. Returns diff --git a/pygsti/modelpacks/_modelpack.py b/pygsti/modelpacks/_modelpack.py index 4614fab8e..41f6d472e 100644 --- a/pygsti/modelpacks/_modelpack.py +++ b/pygsti/modelpacks/_modelpack.py @@ -77,7 +77,7 @@ def target_model(self, gate_type="full", prep_type="auto", povm_type="auto", ins ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. simulator : ForwardSimulator or {"auto", "matrix", "map"} The simulator (or type) to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std1Q_Cliffords.py b/pygsti/modelpacks/legacy/std1Q_Cliffords.py index 268fa5b41..1948ff63b 100644 --- a/pygsti/modelpacks/legacy/std1Q_Cliffords.py +++ b/pygsti/modelpacks/legacy/std1Q_Cliffords.py @@ -57,7 +57,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std1Q_XY.py b/pygsti/modelpacks/legacy/std1Q_XY.py index ca616ea76..a688307d0 100644 --- a/pygsti/modelpacks/legacy/std1Q_XY.py +++ b/pygsti/modelpacks/legacy/std1Q_XY.py @@ -52,7 +52,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std1Q_XYI.py b/pygsti/modelpacks/legacy/std1Q_XYI.py index 97399abff..bba962982 100644 --- a/pygsti/modelpacks/legacy/std1Q_XYI.py +++ b/pygsti/modelpacks/legacy/std1Q_XYI.py @@ -57,7 +57,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std1Q_XYZI.py b/pygsti/modelpacks/legacy/std1Q_XYZI.py index deb7d8e31..d9e924a6a 100644 --- a/pygsti/modelpacks/legacy/std1Q_XYZI.py +++ b/pygsti/modelpacks/legacy/std1Q_XYZI.py @@ -53,7 +53,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std1Q_XZ.py b/pygsti/modelpacks/legacy/std1Q_XZ.py index ef73eb4e4..1f9813e02 100644 --- a/pygsti/modelpacks/legacy/std1Q_XZ.py +++ b/pygsti/modelpacks/legacy/std1Q_XZ.py @@ -64,7 +64,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std1Q_ZN.py b/pygsti/modelpacks/legacy/std1Q_ZN.py index 88279d48d..397831f52 100644 --- a/pygsti/modelpacks/legacy/std1Q_ZN.py +++ b/pygsti/modelpacks/legacy/std1Q_ZN.py @@ -58,7 +58,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std1Q_pi4_pi2_XZ.py b/pygsti/modelpacks/legacy/std1Q_pi4_pi2_XZ.py index 57ad2da4e..6ed73bd2e 100644 --- a/pygsti/modelpacks/legacy/std1Q_pi4_pi2_XZ.py +++ b/pygsti/modelpacks/legacy/std1Q_pi4_pi2_XZ.py @@ -61,7 +61,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std2Q_XXII.py b/pygsti/modelpacks/legacy/std2Q_XXII.py index 2f89741d5..f31d04a67 100644 --- a/pygsti/modelpacks/legacy/std2Q_XXII.py +++ b/pygsti/modelpacks/legacy/std2Q_XXII.py @@ -206,7 +206,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std2Q_XXYYII.py b/pygsti/modelpacks/legacy/std2Q_XXYYII.py index c9cdfef3f..f2796172a 100644 --- a/pygsti/modelpacks/legacy/std2Q_XXYYII.py +++ b/pygsti/modelpacks/legacy/std2Q_XXYYII.py @@ -273,7 +273,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std2Q_XY.py b/pygsti/modelpacks/legacy/std2Q_XY.py index f7778f123..408765bd8 100644 --- a/pygsti/modelpacks/legacy/std2Q_XY.py +++ b/pygsti/modelpacks/legacy/std2Q_XY.py @@ -162,7 +162,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std2Q_XYCNOT.py b/pygsti/modelpacks/legacy/std2Q_XYCNOT.py index 675f7013b..00d1627ec 100644 --- a/pygsti/modelpacks/legacy/std2Q_XYCNOT.py +++ b/pygsti/modelpacks/legacy/std2Q_XYCNOT.py @@ -251,7 +251,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std2Q_XYCPHASE.py b/pygsti/modelpacks/legacy/std2Q_XYCPHASE.py index 30c8a8f64..fa0bc1868 100644 --- a/pygsti/modelpacks/legacy/std2Q_XYCPHASE.py +++ b/pygsti/modelpacks/legacy/std2Q_XYCPHASE.py @@ -246,7 +246,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std2Q_XYI.py b/pygsti/modelpacks/legacy/std2Q_XYI.py index c46e9c8f9..d5397ff76 100644 --- a/pygsti/modelpacks/legacy/std2Q_XYI.py +++ b/pygsti/modelpacks/legacy/std2Q_XYI.py @@ -183,7 +183,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std2Q_XYI1.py b/pygsti/modelpacks/legacy/std2Q_XYI1.py index 119d45569..b2bbff5f2 100644 --- a/pygsti/modelpacks/legacy/std2Q_XYI1.py +++ b/pygsti/modelpacks/legacy/std2Q_XYI1.py @@ -50,7 +50,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std2Q_XYI2.py b/pygsti/modelpacks/legacy/std2Q_XYI2.py index 4de71c59f..41a56ee05 100644 --- a/pygsti/modelpacks/legacy/std2Q_XYI2.py +++ b/pygsti/modelpacks/legacy/std2Q_XYI2.py @@ -49,7 +49,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std2Q_XYICNOT.py b/pygsti/modelpacks/legacy/std2Q_XYICNOT.py index 3cd8cc554..3391ee30d 100644 --- a/pygsti/modelpacks/legacy/std2Q_XYICNOT.py +++ b/pygsti/modelpacks/legacy/std2Q_XYICNOT.py @@ -301,7 +301,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std2Q_XYICPHASE.py b/pygsti/modelpacks/legacy/std2Q_XYICPHASE.py index cb3b0ef55..cbb12e6c3 100644 --- a/pygsti/modelpacks/legacy/std2Q_XYICPHASE.py +++ b/pygsti/modelpacks/legacy/std2Q_XYICPHASE.py @@ -296,7 +296,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/std2Q_XYZICNOT.py b/pygsti/modelpacks/legacy/std2Q_XYZICNOT.py index 424c01415..2613f2b1b 100644 --- a/pygsti/modelpacks/legacy/std2Q_XYZICNOT.py +++ b/pygsti/modelpacks/legacy/std2Q_XYZICNOT.py @@ -255,7 +255,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/legacy/stdQT_XYIMS.py b/pygsti/modelpacks/legacy/stdQT_XYIMS.py index ab8cfc60d..f08fdea09 100644 --- a/pygsti/modelpacks/legacy/stdQT_XYIMS.py +++ b/pygsti/modelpacks/legacy/stdQT_XYIMS.py @@ -253,7 +253,7 @@ def target_model(parameterization_type="full", sim_type="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. sim_type : {"auto", "matrix", "map", "termorder:X" } The simulator type to be used for model calculations (leave as diff --git a/pygsti/modelpacks/stdtarget.py b/pygsti/modelpacks/stdtarget.py index 4db162a59..20f3587f8 100644 --- a/pygsti/modelpacks/stdtarget.py +++ b/pygsti/modelpacks/stdtarget.py @@ -309,7 +309,7 @@ def _copy_target(std_module, param_type, simulator="auto", gscache=None): param_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. simulator : ForwardSimulator or {"auto", "matrix", "map"} The simulator (or type) to be used for model calculations (leave as @@ -512,7 +512,7 @@ def target_model(parameterization_type="full", simulator="auto"): ---------- parameterization_type : {"TP", "CPTP", "H+S", "S", ... } The gate and SPAM vector parameterization type. See - :function:`Model.set_all_parameterizations` for all allowed values. + :func:`Model.set_all_parameterizations` for all allowed values. simulator : ForwardSimulator or {"auto", "matrix", "map"} The simulator (or type) to be used for model calculations (leave as diff --git a/pygsti/models/cloudnoisemodel.py b/pygsti/models/cloudnoisemodel.py index ff2e38b34..acca14f1c 100644 --- a/pygsti/models/cloudnoisemodel.py +++ b/pygsti/models/cloudnoisemodel.py @@ -96,8 +96,8 @@ class CloudNoiseModel(_ImplicitOpModel): simulator : ForwardSimulator or {"auto", "matrix", "map"} The circuit simulator used to compute any - requested probabilities, e.g. from :method:`probs` or - :method:`bulk_probs`. The default value of `"auto"` automatically + requested probabilities, e.g. from :meth:`probs` or + :meth:`bulk_probs`. The default value of `"auto"` automatically selects the simulation type, and is usually what you want. Other special allowed values are: diff --git a/pygsti/models/explicitcalc.py b/pygsti/models/explicitcalc.py index 78a524e2b..70d1e20a4 100644 --- a/pygsti/models/explicitcalc.py +++ b/pygsti/models/explicitcalc.py @@ -449,7 +449,7 @@ def _buildup_dpg(self): """ Helper function for building gauge/non-gauge projectors and for computing the number of gauge/non-gauge elements. - Returns the [ dP | dG ] matrix, i.e. np.concatenate( (dP,dG), axis=1 ) + Returns the `[ dP | dG ]` matrix, i.e. np.concatenate( (dP,dG), axis=1 ) whose nullspace gives the gauge directions in parameter space. """ diff --git a/pygsti/models/explicitmodel.py b/pygsti/models/explicitmodel.py index bc9020aef..086b197c3 100644 --- a/pygsti/models/explicitmodel.py +++ b/pygsti/models/explicitmodel.py @@ -64,7 +64,7 @@ class ExplicitOpModel(_mdl.OpModel): default_param : {"full", "TP", "CPTP", etc.}, optional Specifies the default gate and SPAM vector parameterization type. - Can be any value allowed by :method:`set_all_parameterizations`, + Can be any value allowed by :meth:`set_all_parameterizations`, which also gives a description of each parameterization type. prep_prefix: string, optional @@ -89,8 +89,8 @@ class ExplicitOpModel(_mdl.OpModel): simulator : ForwardSimulator or {"auto", "matrix", "map"} The circuit simulator used to compute any - requested probabilities, e.g. from :method:`probs` or - :method:`bulk_probs`. The default value of `"auto"` automatically + requested probabilities, e.g. from :meth:`probs` or + :meth:`bulk_probs`. The default value of `"auto"` automatically selects the simulation type, and is usually what you want. Other special allowed values are: @@ -1512,7 +1512,7 @@ def extract_unitary(Umx, U_sslbls, extracted_sslbls): # assume this is a kronecker product (check this in FUTURE?), so just fill extracted # unitary by fixing all non-extracted qudits (assumed identity-action on these) to 0 # and looping over extracted ones: - U_extracted = _np.zeros((_np.product(extracted_udims), _np.product(extracted_udims)), complex) + U_extracted = _np.zeros((_np.prod(extracted_udims), _np.prod(extracted_udims)), complex) for ii, itup in enumerate(_itertools.product(*[range(ud) for ud in extracted_udims])): i = _np.dot(extracted_inc, itup) for jj, jtup in enumerate(_itertools.product(*[range(ud) for ud in extracted_udims])): diff --git a/pygsti/models/fogistore.py b/pygsti/models/fogistore.py index 517165cbe..389925321 100644 --- a/pygsti/models/fogistore.py +++ b/pygsti/models/fogistore.py @@ -553,7 +553,7 @@ def create_fogi_aggregate_single_op_space(self, op_label, errorgen_type='H', @classmethod def merge_binned_fogi_infos(cls, binned_fogi_infos, index_offsets): """ - Merge together multiple FOGI-info dictionaries created by :method:`create_binned_fogi_infos`. + Merge together multiple FOGI-info dictionaries created by :meth:`create_binned_fogi_infos`. Parameters ---------- diff --git a/pygsti/models/gaugegroup.py b/pygsti/models/gaugegroup.py index 464589565..ca644a72b 100644 --- a/pygsti/models/gaugegroup.py +++ b/pygsti/models/gaugegroup.py @@ -52,7 +52,7 @@ def __init__(self, name): @property def num_params(self): """ - Return the number of parameters (degrees of freedom) of this gauge group.. + Return the number of parameters (degrees of freedom) of this gauge group. Returns ------- @@ -67,7 +67,7 @@ def compute_element(self, param_vec): Parameters ---------- param_vec : numpy.ndarray - A 1D array of length :method:`num_params`. + A 1D array of length :meth:`num_params`. Returns ------- @@ -83,7 +83,7 @@ def initial_params(self): Returns ------- numpy.ndarray - A 1D array of length :method:`num_params`. + A 1D array of length :meth:`num_params`. """ return _np.array([], 'd') @@ -155,7 +155,7 @@ def from_vector(self, v): Parameters ---------- v : numpy.ndarray - A 1D array of length :method:`num_params` + A 1D array of length :meth:`num_params` Returns ------- @@ -267,7 +267,7 @@ def from_vector(self, v): Parameters ---------- v : numpy.ndarray - A 1D array of length :method:`num_params` + A 1D array of length :meth:`num_params` Returns ------- @@ -360,7 +360,7 @@ def compute_element(self, param_vec): Parameters ---------- param_vec : numpy.ndarray - A 1D array of length :method:`num_params`. + A 1D array of length :meth:`num_params`. Returns ------- @@ -378,7 +378,7 @@ def initial_params(self): Returns ------- numpy.ndarray - A 1D array of length :method:`num_params`. + A 1D array of length :meth:`num_params`. """ return self._operation.to_vector() @@ -501,7 +501,7 @@ def from_vector(self, v): Parameters ---------- v : numpy.ndarray - A 1D array of length :method:`num_params` + A 1D array of length :meth:`num_params` Returns ------- @@ -805,11 +805,11 @@ class SpamGaugeGroup(OpGaugeGroup): """ Gauge transformations which scale the SPAM and non-unital portions of the gates in a gate set. - A 2-dimensional gauge group spanning transform matrices of the form: - [ [ a 0 ... 0] where a and b are the 2 parameters. These diagonal - [ 0 b ... 0] transform matrices do not affect the SPAM operations - [ . . ... .] much more than typical near-unital and TP operations, and - [ 0 0 ... b] ] so we call this group of transformations the "SPAM gauge". + A 2-dimensional gauge group spanning transform matrices of the form :: + [ [ a 0 ... 0] where a and b are the 2 parameters. These diagonal + [ 0 b ... 0] transform matrices do not affect the SPAM operations + [ . . ... .] much more than typical near-unital and TP operations, and + [ 0 0 ... b] ] so we call this group of transformations the "SPAM gauge". Parameters ---------- @@ -959,7 +959,7 @@ def compute_element(self, param_vec): Parameters ---------- param_vec : numpy.ndarray - A 1D array of length :method:`num_params`. + A 1D array of length :meth:`num_params`. Returns ------- @@ -976,7 +976,7 @@ def initial_params(self): Returns ------- numpy.ndarray - A 1D array of length :method:`num_params`. + A 1D array of length :meth:`num_params`. """ return _np.empty(0, 'd') @@ -1067,7 +1067,7 @@ def from_vector(self, v): Parameters ---------- v : numpy.ndarray - A 1D array of length :method:`num_params` + A 1D array of length :meth:`num_params` Returns ------- diff --git a/pygsti/models/implicitmodel.py b/pygsti/models/implicitmodel.py index d0b765cce..c3501649e 100644 --- a/pygsti/models/implicitmodel.py +++ b/pygsti/models/implicitmodel.py @@ -51,7 +51,7 @@ class ImplicitOpModel(_mdl.OpModel): simulator : ForwardSimulator or {"auto", "matrix", "map"} The circuit simulator used to compute any - requested probabilities, e.g. from :method:`probs` or + requested probabilities, e.g. from :meth:`probs` or evotype : {"densitymx", "statevec", "stabilizer", "svterm", "cterm"} The evolution type of this model, describing how states are diff --git a/pygsti/models/localnoisemodel.py b/pygsti/models/localnoisemodel.py index aad3f8bfd..b76613179 100644 --- a/pygsti/models/localnoisemodel.py +++ b/pygsti/models/localnoisemodel.py @@ -92,8 +92,8 @@ class LocalNoiseModel(_ImplicitOpModel): simulator : ForwardSimulator or {"auto", "matrix", "map"} The circuit simulator used to compute any - requested probabilities, e.g. from :method:`probs` or - :method:`bulk_probs`. The default value of `"auto"` automatically + requested probabilities, e.g. from :meth:`probs` or + :meth:`bulk_probs`. The default value of `"auto"` automatically selects the simulation type, and is usually what you want. Other special allowed values are: @@ -121,7 +121,7 @@ class LocalNoiseModel(_ImplicitOpModel): name. If True, then gates with the same name acting on different qudits may have different local noise, and so the `operation_bks['gates']` dictionary contains a key for each gate - available gate placement. + available gate placement. ensure_composed_gates : bool, optional If True then the elements of the `operation_bks['gates']` will always diff --git a/pygsti/models/model.py b/pygsti/models/model.py index dce3b589a..5c7070a50 100644 --- a/pygsti/models/model.py +++ b/pygsti/models/model.py @@ -110,7 +110,7 @@ def num_modeltest_params(self): """ The parameter count to use when testing this model against data. - Often times, this is the same as :method:`num_params`, but there are times + Often times, this is the same as :meth:`num_params`, but there are times when it can convenient or necessary to use a parameter count different than the actual number of parameters in this model. @@ -1643,7 +1643,7 @@ def is_similar(self, other_model, rtol=1e-5, atol=1e-8): If `True`, then the two models are the same except for, perhaps, being at different parameter-space points (i.e. having different parameter vectors). - Similar models, A and B, can be made equivalent (see :method:`is_equivalent`) by + Similar models, A and B, can be made equivalent (see :meth:`is_equivalent`) by calling `modelA.from_vector(modelB.to_vector())`. Parameters diff --git a/pygsti/models/modelconstruction.py b/pygsti/models/modelconstruction.py index dc53a906c..3c477f8c3 100644 --- a/pygsti/models/modelconstruction.py +++ b/pygsti/models/modelconstruction.py @@ -153,22 +153,22 @@ def create_operation(op_expr, state_space, basis="pp", parameterization="full", delimited by the colon (:) character, which are composed together to create the final gate. Each part takes on of the allowed forms: - - I(ssl_0, ...) = identity operation on one or more state space labels + * `I(ssl_0, ...)` = identity operation on one or more state space labels (ssl_i) - - X(theta, ssl) = x-rotation by theta radians of qubit labeled by ssl - - Y(theta, ssl) = y-rotation by theta radians of qubit labeled by ssl - - Z(theta, ssl) = z-rotation by theta radians of qubit labeled by ssl - - CX(theta, ssl0, ssl1) = controlled x-rotation by theta radians. Acts + * X(theta, ssl) = x-rotation by theta radians of qubit labeled by ssl + * Y(theta, ssl) = y-rotation by theta radians of qubit labeled by ssl + * Z(theta, ssl) = z-rotation by theta radians of qubit labeled by ssl + * CX(theta, ssl0, ssl1) = controlled x-rotation by theta radians. Acts on qubit labeled by ssl1 with ssl0 being the control. - - CY(theta, ssl0, ssl1) = controlled y-rotation by theta radians. Acts + * CY(theta, ssl0, ssl1) = controlled y-rotation by theta radians. Acts on qubit labeled by ssl1 with ssl0 being the control. - - CZ(theta, ssl0, ssl1) = controlled z-rotation by theta radians. Acts + * CZ(theta, ssl0, ssl1) = controlled z-rotation by theta radians. Acts on qubit labeled by ssl1 with ssl0 being the control. - - CNOT(ssl0, ssl1) = standard controlled-not gate. Acts on qubit + * CNOT(ssl0, ssl1) = standard controlled-not gate. Acts on qubit labeled by ssl1 with ssl0 being the control. - - CPHASE(ssl0, ssl1) = standard controlled-phase gate. Acts on qubit + * CPHASE(ssl0, ssl1) = standard controlled-phase gate. Acts on qubit labeled by ssl1 with ssl0 being the control. - - LX(theta, i0, i1) = leakage between states i0 and i1. Implemented as + * LX(theta, i0, i1) = leakage between states i0 and i1. Implemented as an x-rotation between states with integer indices i0 and i1 followed by complete decoherence between the states. @@ -240,7 +240,7 @@ def to_labels(lbls): if opName == "I": # qubit labels (TODO: what about 'L' labels? -- not sure if they work with this...) labels = to_labels(args) - stateSpaceUDim = int(_np.product([state_space.label_udimension(l) for l in labels])) + stateSpaceUDim = int(_np.prod([state_space.label_udimension(l) for l in labels])) # a complex 2x2 mx unitary for the identity in Pauli-product basis Uop = _op.StaticUnitaryOp(_np.identity(stateSpaceUDim, 'complex'), 'pp', build_evotype) @@ -489,15 +489,15 @@ def _create_explicit_model_from_expressions(state_space, basis, then `effect_labels` must be a list of lists of effect labels, each list corresponding to a POVM. If set to the special string `"standard"` then the length-n binary strings are used when the state space consists - of n qubits (e.g. `"000"`, `"001"`, ... `"111"` for 3 qubits) and - the labels `"0"`, `"1"`, ... `""` are used, where `` + of n qubits (e.g. `"000"`, `"001"`, `...` `"111"` for 3 qubits) and + the labels `"0"`, `"1"`, `...` `""` are used, where `` is the dimension of the state space, in all non-qubit cases. effect_expressions : list, optional A list or list-of-lists of (string) vector expressions for each POVM effect vector (see documentation for :meth:`_create_spam_vector`). Expressions correspond to labels in `effect_labels`. If set to the special string - `"standard"`, then the expressions `"0"`, `"1"`, ... `""` are used, + `"standard"`, then the expressions `"0"`, `"1"`, `...` `""` are used, where `` is the dimension of the state space. povm_labels : list or string, optional @@ -1512,9 +1512,10 @@ def create_crosstalk_free_model(processor_spec, custom_gates=None, Errors can be specified using any combination of the 4 error rate/coeff arguments, but each gate name must be provided exclusively to one type of specification. Each specification results in a different type of operation, depending on the parameterization: - - `depolarization_strengths` -> DepolarizeOp, StochasticNoiseOp, or exp(LindbladErrorgen) - - `stochastic_error_probs` -> StochasticNoiseOp or exp(LindbladErrorgen) - - `lindblad_error_coeffs` -> exp(LindbladErrorgen) + + - `depolarization_strengths` -> DepolarizeOp, StochasticNoiseOp, or exp(LindbladErrorgen) + - `stochastic_error_probs` -> StochasticNoiseOp or exp(LindbladErrorgen) + - `lindblad_error_coeffs` -> exp(LindbladErrorgen) In addition to the gate names, the special values `"prep"` and `"povm"` may be used as keys to specify the error on the state preparation, measurement, respectively. @@ -1601,7 +1602,7 @@ def create_crosstalk_free_model(processor_spec, custom_gates=None, name. If True, then gates with the same name acting on different qudits may have different local noise, and so the `operation_bks['gates']` dictionary contains a key for each gate - available gate placement. + available gate placement. ensure_composed_gates : bool, optional If True then the elements of the `operation_bks['gates']` will always @@ -1659,7 +1660,7 @@ def _create_crosstalk_free_model(processor_spec, modelnoise, custom_gates=None, """ Create a n-qudit "crosstalk-free" model. - Similar to :method:`create_crosstalk_free_model` but the noise is input more generally, + Similar to :meth:`create_crosstalk_free_model` but the noise is input more generally, as a :class:`ModelNoise` object. Arguments are the same as this function except that `modelnoise` is given instead of several more specific noise-describing arguments. @@ -1719,9 +1720,10 @@ def create_cloud_crosstalk_model(processor_spec, custom_gates=None, Errors can be specified using any combination of the 4 error rate/coeff arguments, but each gate name must be provided exclusively to one type of specification. Each specification results in a different type of operation, depending on the parameterization: - - `depolarization_strengths` -> DepolarizeOp, StochasticNoiseOp, or exp(LindbladErrorgen) - - `stochastic_error_probs` -> StochasticNoiseOp or exp(LindbladErrorgen) - - `lindblad_error_coeffs` -> exp(LindbladErrorgen) + + - `depolarization_strengths` -> DepolarizeOp, StochasticNoiseOp, or exp(LindbladErrorgen) + - `stochastic_error_probs` -> StochasticNoiseOp or exp(LindbladErrorgen) + - `lindblad_error_coeffs` -> exp(LindbladErrorgen) In addition to the gate names, the special values `"prep"` and `"povm"` may be used as keys to specify the error on the state preparation, measurement, respectively. @@ -1801,7 +1803,7 @@ def create_cloud_crosstalk_model(processor_spec, custom_gates=None, name. If True, then gates with the same name acting on different qudits may have different local noise, and so the `operation_bks['cloudnoise']` dictionary contains a key for each gate - available gate placement. + available gate placement. independent_spam : bool, optional Similar to `indepenent_gates` but for SPAM operations. @@ -1849,7 +1851,7 @@ def _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates=None, """ Create a n-qudit "cloud-crosstalk" model. - Similar to :method:`create_cloud_crosstalk_model` but the noise is input more generally, + Similar to :meth:`create_cloud_crosstalk_model` but the noise is input more generally, as a :class:`ModelNoise` object. Arguments are the same as this function except that `modelnoise` is given instead of several more specific noise-describing arguments. @@ -2019,8 +2021,8 @@ def create_cloud_crosstalk_model_from_hops_and_weights( simulator : ForwardSimulator or {"auto", "matrix", "map"} The circuit simulator used to compute any - requested probabilities, e.g. from :method:`probs` or - :method:`bulk_probs`. Using `"auto"` selects `"matrix"` when there + requested probabilities, e.g. from :meth:`probs` or + :meth:`bulk_probs`. Using `"auto"` selects `"matrix"` when there are 2 qudits or less, and otherwise selects `"map"`. evotype : Evotype or str, optional diff --git a/pygsti/models/modelnoise.py b/pygsti/models/modelnoise.py index e84a1cfa4..ae02b7126 100644 --- a/pygsti/models/modelnoise.py +++ b/pygsti/models/modelnoise.py @@ -141,7 +141,7 @@ def apply_errorgen_stencil(self, stencil, evotype, state_space, target_labels=No Parameters ---------- stencil : OrderedDict - The stencil to apply, usually created by :method:`create_errorgen_stencil`. + The stencil to apply, usually created by :meth:`create_errorgen_stencil`. evotype : str or Evotype The evolution type of to use when creating the embedded and composed operators, @@ -154,7 +154,7 @@ def apply_errorgen_stencil(self, stencil, evotype, state_space, target_labels=No target_labels : tuple or None, optional The target labels that determine where on the qudit graph this stencil will be placed. When a tuple, it should have length equal to the `num_target_labels` argument passed to - :method:`create_errorgen_stencil`. `None` indicates that the entire space is the "target" + :meth:`create_errorgen_stencil`. `None` indicates that the entire space is the "target" space of the stencil (e.g. a global idle, preparation, or measurement). qudit_graph : QubitGraph, optional @@ -216,7 +216,7 @@ def create_errormap_stencil(self, opkey, evotype, state_space, num_target_labels so that, if desired, the same errors can be used on multiple sets of target qudits (often this is done when a "independent" argument to a model-creation function is `False`). An "error map stencil" is a stencil whose operators are error maps - rather than error generators. + rather than error generators. Parameters ---------- @@ -262,7 +262,7 @@ def apply_errormap_stencil(self, stencil, evotype, state_space, target_labels=No Parameters ---------- stencil : OrderedDict - The stencil to apply, usually created by :method:`create_errormap_stencil`. + The stencil to apply, usually created by :meth:`create_errormap_stencil`. evotype : str or Evotype The evolution type of to use when creating the embedded and composed operators, @@ -275,7 +275,7 @@ def apply_errormap_stencil(self, stencil, evotype, state_space, target_labels=No target_labels : tuple or None, optional The target labels that determine where on the qudit graph this stencil will be placed. When a tuple, it should have length equal to the `num_target_labels` argument passed to - :method:`create_errormap_stencil`. `None` indicates that the entire space is the "target" + :meth:`create_errormap_stencil`. `None` indicates that the entire space is the "target" space of the stencil (e.g. a global idle, preparation, or measurement). qudit_graph : QubitGraph, optional @@ -339,7 +339,7 @@ def reset_access_counters(self): These counters tally the number of times each operation key is accessed, and are used to identify model noise specification that are supplied by the user - but never used. See :method:`warn_about_zero_counters`. + but never used. See :meth:`warn_about_zero_counters`. Returns ------- @@ -443,7 +443,7 @@ def __contains__(self, key): def create_errorgen_stencil(self, opkey, evotype, state_space, num_target_labels=None): """ - See :method:`OpModelNoise.create_errorgen_stencil`. + See :meth:`OpModelNoise.create_errorgen_stencil`. """ if opkey not in self.per_op_noise: return {} # an empty stencil opnoise = self.per_op_noise[opkey] @@ -474,7 +474,7 @@ def create_errorgen_stencil(self, opkey, evotype, state_space, num_target_labels def apply_errorgen_stencil(self, stencil, evotype, state_space, target_labels=None, qudit_graph=None, copy=False): """ - See :method:`OpModelNoise.apply_errorgen_stencil`. + See :meth:`OpModelNoise.apply_errorgen_stencil`. """ embedded_errgens = [] for stencil_sslbls, local_errorgen in stencil.items(): @@ -495,7 +495,7 @@ def apply_errorgen_stencil(self, stencil, evotype, state_space, target_labels=No def create_errormap_stencil(self, opkey, evotype, state_space, num_target_labels=None): """ - See :method:`OpModelNoise.create_errormap_stencil`. + See :meth:`OpModelNoise.create_errormap_stencil`. """ if opkey not in self.per_op_noise: return {} # an empty stencil opnoise = self.per_op_noise[opkey] @@ -528,7 +528,7 @@ def create_errormap_stencil(self, opkey, evotype, state_space, num_target_labels def apply_errormap_stencil(self, stencil, evotype, state_space, target_labels=None, qudit_graph=None, copy=False): """ - See :method:`OpModelNoise.apply_errormap_stencil`. + See :meth:`OpModelNoise.apply_errormap_stencil`. """ embedded_errmaps = [] for stencil_sslbls, local_errormap in stencil.items(): @@ -649,7 +649,7 @@ def __contains__(self, key): def create_errorgen_stencil(self, opkey, evotype, state_space, num_target_labels=None): """ - See :method:`OpModelNoise.create_errorgen_stencil`. + See :meth:`OpModelNoise.create_errorgen_stencil`. """ self._increment_touch_count(opkey) return tuple([modelnoise.create_errorgen_stencil(opkey, evotype, state_space, num_target_labels) @@ -657,7 +657,7 @@ def create_errorgen_stencil(self, opkey, evotype, state_space, num_target_labels def apply_errorgen_stencil(self, stencil, evotype, state_space, target_labels=None, qudit_graph=None, copy=False): """ - See :method:`OpModelNoise.apply_errorgen_stencil`. + See :meth:`OpModelNoise.apply_errorgen_stencil`. """ noise_errgens = [modelnoise.apply_errorgen_stencil(s, evotype, state_space, target_labels, qudit_graph, copy) for s, modelnoise in zip(stencil, self.opmodelnoises)] @@ -667,7 +667,7 @@ def apply_errorgen_stencil(self, stencil, evotype, state_space, target_labels=No def create_errormap_stencil(self, opkey, evotype, state_space, num_target_labels=None): """ - See :method:`OpModelNoise.create_errormap_stencil`. + See :meth:`OpModelNoise.create_errormap_stencil`. """ self._increment_touch_count(opkey) return tuple([modelnoise.create_errormap_stencil(opkey, evotype, state_space, num_target_labels) @@ -675,7 +675,7 @@ def create_errormap_stencil(self, opkey, evotype, state_space, num_target_labels def apply_errormap_stencil(self, stencil, evotype, state_space, target_labels=None, qudit_graph=None, copy=False): """ - See :method:`OpModelNoise.apply_errormap_stencil`. + See :meth:`OpModelNoise.apply_errormap_stencil`. """ noise_ops = [modelnoise.apply_errormap_stencil(s, evotype, state_space, target_labels, qudit_graph, copy) for s, modelnoise in zip(stencil, self.opmodelnoises)] diff --git a/pygsti/models/stencillabel.py b/pygsti/models/stencillabel.py index e4c544284..de82e6607 100644 --- a/pygsti/models/stencillabel.py +++ b/pygsti/models/stencillabel.py @@ -28,7 +28,7 @@ class StencilLabel(object): ---------- local_state_space : StateSpace A manually supplied local state space for this label, which is returned by - :method:`create_local_state_space` instead of generating a local state space. + :meth:`create_local_state_space` instead of generating a local state space. """ @classmethod @@ -109,7 +109,7 @@ def create_local_state_space(self, entire_state_space): the sub-space where they act -- a "local" state space. A stencil label expands into one or more state space label tuples, and this function constructs a *single* local state space that is appropriate for any and all of these tuples (i.e. what is returned - by :method:`compute_absolute_sslbls`), and that is therefore appropriate for constructing + by :meth:`compute_absolute_sslbls`), and that is therefore appropriate for constructing the to-be-embedded operation. Importantly, this function can be called without knowing where this stencil label will be placed, that is, it doesn't require a "target labels" argument. @@ -130,7 +130,7 @@ def create_local_state_space(self, entire_state_space): return self._create_local_state_space(entire_state_space) def _create_local_state_space(self, entire_state_space): - """ Stub that derived classes implement - same function as :method:`create_local_state_space` """ + """ Stub that derived classes implement - same function as :meth:`create_local_state_space` """ raise NotImplementedError("Derived classes should implement this!") def _create_local_state_space_for_sslbls(self, sslbls, entire_state_space): @@ -171,7 +171,7 @@ def compute_absolute_sslbls(self, qubit_graph, state_space, target_lbls): """ Creates a list of all the state space label tuples this stencil label expands into. - See :method:`StencilLabel.compute_absolute_sslbls` + See :meth:`StencilLabel.compute_absolute_sslbls` """ # Return a *list* of sslbls, since some stencil labels may resolve into multiple absolute sslbls if self.sslbls is None: @@ -208,7 +208,7 @@ def compute_absolute_sslbls(self, qubit_graph, state_space, target_lbls): """ Creates a list of all the state space label tuples this stencil label expands into. - See :method:`StencilLabel.compute_absolute_sslbls` + See :meth:`StencilLabel.compute_absolute_sslbls` """ # return a *list* of sslbls, since some stencil labels may resolve into multiple absolute sslbls return [self._resolve_single_sslbls_tuple(sslbls, qubit_graph, state_space, target_lbls) @@ -254,7 +254,7 @@ def compute_absolute_sslbls(self, qubit_graph, state_space, target_lbls): """ Creates a list of all the state space label tuples this stencil label expands into. - See :method:`StencilLabel.compute_absolute_sslbls` + See :meth:`StencilLabel.compute_absolute_sslbls` """ ret = [] for chosen_sslbls in _itertools.combinations(self.possible_sslbls, self.num_to_choose): @@ -318,7 +318,7 @@ def compute_absolute_sslbls(self, qubit_graph, state_space, target_lbls): """ Creates a list of all the state space label tuples this stencil label expands into. - See :method:`StencilLabel.compute_absolute_sslbls` + See :meth:`StencilLabel.compute_absolute_sslbls` """ ret = [] assert (qubit_graph is not None), "A qubit graph is required by StencilLabelRadiusCombos!" diff --git a/pygsti/objectivefns/objectivefns.py b/pygsti/objectivefns/objectivefns.py index cca2a3045..f7195235e 100644 --- a/pygsti/objectivefns/objectivefns.py +++ b/pygsti/objectivefns/objectivefns.py @@ -170,7 +170,7 @@ def cast(cls, obj): @classmethod def create_from(cls, objective='logl', freq_weighted_chi2=False): """ - Creates common :class:`ObjectiveFunctionBuilder`s from a few arguments. + Creates common :class:`ObjectiveFunctionBuilder` from a few arguments. Parameters ---------- @@ -524,7 +524,7 @@ def lsvec(self, probs, counts, total_counts, freqs, intermediates=None): """ Compute the least-squares vector of the objective function. - This is the square-root of the terms-vector returned from :method:`terms`. + This is the square-root of the terms-vector returned from :meth:`terms`. This vector is the objective function value used by a least-squares optimizer when optimizing this objective function. Note that the existence of this quantity requires that the terms be non-negative. If this is not @@ -637,8 +637,8 @@ def dlsvec_and_lsvec(self, probs, counts, total_counts, freqs, intermediates=Non """ Compute the derivatives of the least-squares vector together with the vector itself. - This is sometimes more computationally efficient than calling :method:`dlsvec` and - :method:`lsvec` separately, as the former call may require computing the latter. + This is sometimes more computationally efficient than calling :meth:`dlsvec` and + :meth:`lsvec` separately, as the former call may require computing the latter. Parameters ---------- @@ -1129,7 +1129,7 @@ def lsvec(self, paramvec=None, oob_check=False): """ Compute the least-squares vector of the objective function. - This is the square-root of the terms-vector returned from :method:`terms`. + This is the square-root of the terms-vector returned from :meth:`terms`. This vector is the objective function value used by a least-squares optimizer when optimizing this objective function. Note that the existence of this quantity requires that the terms be non-negative. If this is not @@ -1306,7 +1306,7 @@ def lsvec_percircuit(self, paramvec=None): def dlsvec_percircuit(self, paramvec=None): """ - Compute the jacobian of the sqrt(per-circuit) values given by :method:`lsvec_percircuit`. + Compute the jacobian of the sqrt(per-circuit) values given by :meth:`lsvec_percircuit`. This jacobian is primarily useful for interfacing with a least-squares optimizer. @@ -1335,7 +1335,7 @@ def fn_local(self, paramvec=None): When the objective function's layout is distributed, each processor only holds a portion of the objective function terms, and this function returns only the - sum of these local terms. See :method:`fn` for the global objective function value. + sum of these local terms. See :meth:`fn` for the global objective function value. Parameters @@ -1416,7 +1416,7 @@ def approximate_hessian(self, paramvec=None): """ Compute an approximate Hessian of this objective function. - This is typically much less expensive than :method:`hessian` and + This is typically much less expensive than :meth:`hessian` and does not require that `enable_hessian=True` was set upon initialization. It computes an approximation to the Hessian that only utilizes the information in the Jacobian. Derivatives are takes with respect to model @@ -1443,7 +1443,7 @@ def approximate_hessian(self, paramvec=None): # Compute the amount of memory needed to perform evaluations of this objective function. # # This number includes both intermediate and final results, and assumes - # that the types of evauations given by :method:`_evaltree_subcalls` + # that the types of evauations given by :meth:`_evaltree_subcalls` # are required. # # Parameters @@ -1759,7 +1759,7 @@ def lsvec(self, probs, counts, total_counts, freqs, intermediates=None): """ Compute the least-squares vector of the objective function. - This is the square-root of the terms-vector returned from :method:`terms`. + This is the square-root of the terms-vector returned from :meth:`terms`. This vector is the objective function value used by a least-squares optimizer when optimizing this objective function. Note that the existence of this quantity requires that the terms be non-negative. If this is not @@ -1866,7 +1866,7 @@ def hterms_alt(self, probs, counts, total_counts, freqs, intermediates=None): """ Alternate computation of the 2nd derivatives of the terms of this objective function. - This should give exactly the same results as :method:`hterms`, but may be a little faster. + This should give exactly the same results as :meth:`hterms`, but may be a little faster. Parameters ---------- @@ -2014,7 +2014,7 @@ def _dweights(self, p, f, wts): # derivative of weights w.r.t. p The frequencies wts : numpy.ndarray - The weights, as computed by :method:`_weights`. + The weights, as computed by :meth:`_weights`. Returns ------- @@ -2039,7 +2039,7 @@ def _hweights(self, p, f, wts): # 2nd derivative of weights w.r.t. p The frequencies wts : numpy.ndarray - The weights, as computed by :method:`_weights`. + The weights, as computed by :meth:`_weights`. Returns ------- @@ -2435,7 +2435,7 @@ def _dweights(self, p, f, wts): The frequencies wts : numpy.ndarray - The weights, as computed by :method:`_weights`. + The weights, as computed by :meth:`_weights`. Returns ------- @@ -2456,7 +2456,7 @@ def _hweights(self, p, f, wts): The frequencies wts : numpy.ndarray - The weights, as computed by :method:`_weights`. + The weights, as computed by :meth:`_weights`. Returns ------- @@ -2617,7 +2617,7 @@ def _dweights(self, p, f, wts): The frequencies wts : numpy.ndarray - The weights, as computed by :method:`_weights`. + The weights, as computed by :meth:`_weights`. Returns ------- @@ -2638,7 +2638,7 @@ def _hweights(self, p, f, wts): The frequencies wts : numpy.ndarray - The weights, as computed by :method:`_weights`. + The weights, as computed by :meth:`_weights`. Returns ------- @@ -2882,7 +2882,7 @@ def set_regularization(self, min_prob_clip=1e-4, pfratio_stitchpt=None, pfratio_ self.fmin = None def _intermediates(self, probs, counts, total_counts, freqs): - """ Intermediate values used by both terms(...) and dterms(...) """ + """ Intermediate values used by both `terms(...)` and `dterms(...)` """ # Quantities depending on data only (not probs): could be computed once and # passed in as arguments to this (and other) functions? freqs_nozeros = _np.where(counts == 0, 1.0, freqs) @@ -2986,7 +2986,7 @@ def lsvec(self, probs, counts, total_counts, freqs, intermediates=None): """ Compute the least-squares vector of the objective function. - This is the square-root of the terms-vector returned from :method:`terms`. + This is the square-root of the terms-vector returned from :meth:`terms`. This vector is the objective function value used by a least-squares optimizer when optimizing this objective function. Note that the existence of this quantity requires that the terms be non-negative. If this is not @@ -3240,7 +3240,7 @@ def set_regularization(self, min_prob_clip=1e-4, pfratio_stitchpt=None, pfratio_ self.regtype = "pfratio" def _intermediates(self, probs, counts, total_counts, freqs): - """ Intermediate values used by both terms(...) and dterms(...) """ + """ Intermediate values used by both `terms(...)` and `dterms(...)` """ # Quantities depending on data only (not probs): could be computed once and # passed in as arguments to this (and other) functions? freqs_nozeros = _np.where(counts == 0, 1.0, freqs) @@ -3417,7 +3417,7 @@ def lsvec(self, probs, counts, total_counts, freqs, intermediates=None): """ Compute the least-squares vector of the objective function. - This is the square-root of the terms-vector returned from :method:`terms`. + This is the square-root of the terms-vector returned from :meth:`terms`. This vector is the objective function value used by a least-squares optimizer when optimizing this objective function. Note that the existence of this quantity requires that the terms be non-negative. If this is not @@ -3486,8 +3486,8 @@ def dlsvec_and_lsvec(self, probs, counts, total_counts, freqs, intermediates=Non """ Compute the derivatives of the least-squares vector together with the vector itself. - This is sometimes more computationally efficient than calling :method:`dlsvec` and - :method:`lsvec` separately, as the former call may require computing the latter. + This is sometimes more computationally efficient than calling :meth:`dlsvec` and + :meth:`lsvec` separately, as the former call may require computing the latter. Parameters ---------- @@ -3761,7 +3761,7 @@ def lsvec(self, probs, counts, total_counts, freqs, intermediates=None): """ Compute the least-squares vector of the objective function. - This is the square-root of the terms-vector returned from :method:`terms`. + This is the square-root of the terms-vector returned from :meth:`terms`. This vector is the objective function value used by a least-squares optimizer when optimizing this objective function. Note that the existence of this quantity requires that the terms be non-negative. If this is not @@ -3830,8 +3830,8 @@ def dlsvec_and_lsvec(self, probs, counts, total_counts, freqs): """ Compute the derivatives of the least-squares vector together with the vector itself. - This is sometimes more computationally efficient than calling :method:`dlsvec` and - :method:`lsvec` separately, as the former call may require computing the latter. + This is sometimes more computationally efficient than calling :meth:`dlsvec` and + :meth:`lsvec` separately, as the former call may require computing the latter. Parameters ---------- @@ -4561,7 +4561,7 @@ def _omitted_prob_first_terms(self, probs): ------- numpy.ndarray """ - omitted_probs = 1.0 - _np.array([_np.sum(probs[self.layout.indices_for_index(i)]) + omitted_probs = 1.0 - _np.array([probs[self.layout.indices_for_index(i)].sum() for i in self.indicesOfCircuitsWithOmittedData]) return self.raw_objfn.zero_freq_terms(self.total_counts[self.firsts], omitted_probs) @@ -4609,12 +4609,12 @@ def _update_terms_for_omitted_probs(self, terms, probs): def _omitted_prob_first_dterms(self, probs): """ - Compute the derivative of the first-terms vector returned by :method:`_omitted_prob_first_terms`. + Compute the derivative of the first-terms vector returned by :meth:`_omitted_prob_first_terms`. This derivative is just with respect to the *probabilities*, not the model parameters, as it anticipates a final dot product with the jacobian of the computed probabilities with respect to the model parameters (see - :method:`_update_dterms_for_omitted_probs`). + :meth:`_update_dterms_for_omitted_probs`). Parameters ---------- @@ -4731,7 +4731,7 @@ def lsvec(self, paramvec=None, oob_check=False): """ Compute the least-squares vector of the objective function. - This is the square-root of the terms-vector returned from :method:`terms`. + This is the square-root of the terms-vector returned from :meth:`terms`. This vector is the objective function value used by a least-squares optimizer when optimizing this objective function. Note that the existence of this quantity requires that the terms be non-negative. If this is not @@ -5056,7 +5056,7 @@ def approximate_hessian(self, paramvec=None): """ Compute an approximate Hessian of this objective function. - This is typically much less expensive than :method:`hessian` and + This is typically much less expensive than :meth:`hessian` and does not require that `enable_hessian=True` was set upon initialization. It computes an approximation to the Hessian that only utilizes the information in the Jacobian. Derivatives are takes with respect to model @@ -5778,7 +5778,7 @@ def lsvec(self, paramvec=None): """ Compute the least-squares vector of the objective function. - This is the square-root of the terms-vector returned from :method:`terms`. + This is the square-root of the terms-vector returned from :meth:`terms`. This vector is the objective function value used by a least-squares optimizer when optimizing this objective function. Note that the existence of this quantity requires that the terms be non-negative. If this is not @@ -5917,7 +5917,7 @@ def lsvec(self, paramvec=None): """ Compute the least-squares vector of the objective function. - This is the square-root of the terms-vector returned from :method:`terms`. + This is the square-root of the terms-vector returned from :meth:`terms`. This vector is the objective function value used by a least-squares optimizer when optimizing this objective function. Note that the existence of this quantity requires that the terms be non-negative. If this is not @@ -6085,7 +6085,7 @@ def lsvec(self, paramvec=None): """ Compute the least-squares vector of the objective function. - This is the square-root of the terms-vector returned from :method:`terms`. + This is the square-root of the terms-vector returned from :meth:`terms`. This vector is the objective function value used by a least-squares optimizer when optimizing this objective function. Note that the existence of this quantity requires that the terms be non-negative. If this is not @@ -6538,7 +6538,7 @@ def lsvec(self, wvec=None): """ Compute the least-squares vector of the objective function. - This is the square-root of the terms-vector returned from :method:`terms`. + This is the square-root of the terms-vector returned from :meth:`terms`. This vector is the objective function value used by a least-squares optimizer when optimizing this objective function. Note that the existence of this quantity requires that the terms be non-negative. If this is not diff --git a/pygsti/objectivefns/wildcardbudget.py b/pygsti/objectivefns/wildcardbudget.py index 4f184ee07..ff16f94d9 100644 --- a/pygsti/objectivefns/wildcardbudget.py +++ b/pygsti/objectivefns/wildcardbudget.py @@ -27,11 +27,11 @@ class WildcardBudget(_NicelySerializable): an amount "slack" in its outcomes probabilities. The way in which this slack is computed - or "distributed", though it need not necessarily sum to a fixed total - per circuit depends on each derived class's implementation - of the :method:`circuit_budget` method. Goodness-of-fit quantities such as + of the :meth:`circuit_budget` method. Goodness-of-fit quantities such as the log-likelihood or chi2 can utilize a `WildcardBudget` object to compute a value that shifts the circuit outcome probabilities within their allowed slack (so `|p_used - p_actual| <= slack`) to achieve the best goodness of - fit. For example, see the `wildcard` argument of :function:`two_delta_logl_terms`. + fit. For example, see the `wildcard` argument of :func:`two_delta_logl_terms`. This is a base class, which must be inherited from in order to obtain a full functional wildcard budge (the `circuit_budget` method must be @@ -119,7 +119,7 @@ def circuit_budgets(self, circuits, precomp=None): precomp : numpy.ndarray, optional A precomputed quantity that speeds up the computation of circuit - budgets. Given by :method:`precompute_for_same_circuits`. + budgets. Given by :meth:`precompute_for_same_circuits`. Returns ------- @@ -303,7 +303,7 @@ def update_probs(self, probs_in, probs_out, freqs, layout, precomp=None, probs_f probs_freqs_precomp : list, optional A precomputed list of quantities re-used when calling `update_probs` using the same `probs_in`, `freqs`, and `layout`. Generate by calling - :method:`precompute_for_same_probs_freqs`. + :meth:`precompute_for_same_probs_freqs`. return_deriv : bool, optional When True, returns the derivative of each updated probability with @@ -617,7 +617,7 @@ def circuit_budgets(self, circuits, precomp=None): precomp : numpy.ndarray, optional A precomputed quantity that speeds up the computation of circuit - budgets. Given by :method:`precompute_for_same_circuits`. + budgets. Given by :meth:`precompute_for_same_circuits`. Returns ------- diff --git a/pygsti/optimize/arraysinterface.py b/pygsti/optimize/arraysinterface.py index 8c97d0b1f..fa64e0cec 100644 --- a/pygsti/optimize/arraysinterface.py +++ b/pygsti/optimize/arraysinterface.py @@ -203,7 +203,7 @@ def allscatter_x(self, global_x, x): so there's no need to do any MPI communication. x : numpy.array or LocalNumpyArray - The output vector, typically a slice of `global_x`.. + The output vector, typically a slice of `global_x`. Returns ------- @@ -261,7 +261,7 @@ def gather_jtj(self, jtj, return_shared=False): in a small performance gain because the array used internally to gather the results can be returned directly. When `True` a shared memory handle is also returned, and the caller assumes responsibilty for freeing the memory via - :function:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`. + :func:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`. Returns ------- @@ -306,7 +306,7 @@ def gather_jtf(self, jtf, return_shared=False): in a small performance gain because the array used internally to gather the results can be returned directly. When `True` a shared memory handle is also returned, and the caller assumes responsibilty for freeing the memory via - :function:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`. + :func:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`. Returns ------- @@ -525,7 +525,7 @@ def fill_jtj(self, j, jtj, shared_mem_buf=None): shared_mem_buf : tuple or None Scratch space of shared memory used to speed up repeated calls to `fill_jtj`. - If not none, the value returned from :method:`allocate_jtj_shared_mem_buf`. + If not none, the value returned from :meth:`allocate_jtj_shared_mem_buf`. Returns ------- @@ -535,7 +535,7 @@ def fill_jtj(self, j, jtj, shared_mem_buf=None): def allocate_jtj_shared_mem_buf(self): """ - Allocate scratch space to be used for repeated calls to :method:`fill_jtj`. + Allocate scratch space to be used for repeated calls to :meth:`fill_jtj`. Returns ------- @@ -549,12 +549,12 @@ def allocate_jtj_shared_mem_buf(self): def deallocate_jtj_shared_mem_buf(self, jtj_buf): """ - Frees the scratch memory allocated by :method:`allocate_jtj_shared_mem_buf`. + Frees the scratch memory allocated by :meth:`allocate_jtj_shared_mem_buf`. Parameters ---------- jtj_buf : tuple or None - The value returned from :method:`allocate_jtj_shared_mem_buf` + The value returned from :meth:`allocate_jtj_shared_mem_buf` """ pass @@ -772,7 +772,7 @@ def allscatter_x(self, global_x, x): so there's no need to do any MPI communication. x : numpy.array or LocalNumpyArray - The output vector, typically a slice of `global_x`.. + The output vector, typically a slice of `global_x`. Returns ------- @@ -837,7 +837,7 @@ def gather_jtj(self, jtj, return_shared=False): in a small performance gain because the array used internally to gather the results can be returned directly. When `True` a shared memory handle is also returned, and the caller assumes responsibilty for freeing the memory via - :function:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`. + :func:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`. Returns ------- @@ -890,7 +890,7 @@ def gather_jtf(self, jtf, return_shared=False): in a small performance gain because the array used internally to gather the results can be returned directly. When `True` a shared memory handle is also returned, and the caller assumes responsibilty for freeing the memory via - :function:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`. + :func:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`. Returns ------- @@ -1208,7 +1208,7 @@ def fill_jtj(self, j, jtj, shared_mem_buf=None): shared_mem_buf : tuple or None Scratch space of shared memory used to speed up repeated calls to `fill_jtj`. - If not none, the value returned from :method:`allocate_jtj_shared_mem_buf`. + If not none, the value returned from :meth:`allocate_jtj_shared_mem_buf`. Returns ------- @@ -1218,7 +1218,7 @@ def fill_jtj(self, j, jtj, shared_mem_buf=None): def allocate_jtj_shared_mem_buf(self): """ - Allocate scratch space to be used for repeated calls to :method:`fill_jtj`. + Allocate scratch space to be used for repeated calls to :meth:`fill_jtj`. Returns ------- @@ -1232,12 +1232,12 @@ def allocate_jtj_shared_mem_buf(self): def deallocate_jtj_shared_mem_buf(self, jtj_buf): """ - Frees the scratch memory allocated by :method:`allocate_jtj_shared_mem_buf`. + Frees the scratch memory allocated by :meth:`allocate_jtj_shared_mem_buf`. Parameters ---------- jtj_buf : tuple or None - The value returned from :method:`allocate_jtj_shared_mem_buf` + The value returned from :meth:`allocate_jtj_shared_mem_buf` """ buf, buf_shm = jtj_buf _smt.cleanup_shared_ndarray(buf_shm) diff --git a/pygsti/optimize/optimize.py b/pygsti/optimize/optimize.py index 238826701..fcb0835f0 100644 --- a/pygsti/optimize/optimize.py +++ b/pygsti/optimize/optimize.py @@ -58,7 +58,7 @@ def minimize(fn, x0, method='cg', callback=None, callback : function, optional A callback function to be called in order to track optimizer progress. Should have signature: myCallback(x, f=None, accepted=None). Note that - create_objfn_printer(...) function can be used to create a callback. + `create_objfn_printer(...)` function can be used to create a callback. tol : float, optional Tolerance value used for all types of tolerances available in a given method. diff --git a/pygsti/processors/compilationrules.py b/pygsti/processors/compilationrules.py index d2cb3e2da..7dcd0ec9b 100644 --- a/pygsti/processors/compilationrules.py +++ b/pygsti/processors/compilationrules.py @@ -50,7 +50,7 @@ class CompilationRules(object): Values are 2-tuples of (gate unitary, gate template). The gate unitary can either be a unitary matrix, function returning a matrix, or None if the gate name is a standard PyGSTi unitary. The gate template is either a Circuit - with local state space labels (i.e. 0..k-1 for k qubits) or a function that takes + with local state space labels (i.e. `0..k-1` for k qubits) or a function that takes the target gate label and returns the proper Circuit. If the key is a gate label, the gate template (second entry of the value tuple) MUST be a Circuit with absolute state space labels. @@ -1178,7 +1178,7 @@ def _get_nonlocal_compilation_of(self, oplabel, force=False, Get a potentially non-local compilation of `oplabel`. This function does *not* add this compilation to the library, it merely - returns it. To add it, use :method:`add_nonlocal_compilation_of`. + returns it. To add it, use :meth:`add_nonlocal_compilation_of`. This method currently only generates a compilation for a non-local CNOT, up to arbitrary Pauli gates, between a pair of unconnected qubits. It diff --git a/pygsti/processors/processorspec.py b/pygsti/processors/processorspec.py index f88853b70..bfec7e562 100644 --- a/pygsti/processors/processorspec.py +++ b/pygsti/processors/processorspec.py @@ -1106,7 +1106,7 @@ def compute_multiqubit_inversion_relations(self): gate_inverse[`name1`] = `name2` and gate_inverse[`name2`] = `name1` 1-qubit gates are not computed by this method, as they are be computed by the method - :method:`compute_one_qubit_gate_relations`. + :meth:`compute_one_qubit_gate_relations`. Returns ------- diff --git a/pygsti/protocols/confidenceregionfactory.py b/pygsti/protocols/confidenceregionfactory.py index 155e6572a..5c1eb6806 100644 --- a/pygsti/protocols/confidenceregionfactory.py +++ b/pygsti/protocols/confidenceregionfactory.py @@ -298,7 +298,7 @@ def can_construct_views(self): Checks whether this factory has enough information to construct 'views' of itself. `ConfidenceRegionFactoryView` view objects are created using the - :method:`view` method, which can in turn be used to construct + :meth:`view` method, which can in turn be used to construct confidence intervals. Returns @@ -340,7 +340,7 @@ def compute_hessian(self, comm=None, mem_limit=None, approximate=False): approximate : bool, optional Whether to compute the true Hessian or just an approximation of it. - See :function:`logl_approximate_hessian`. Setting to True can + See :func:`logl_approximate_hessian`. Setting to True can significantly reduce the run time. Returns @@ -819,7 +819,7 @@ def __init__(self, model, inv_projected_hessian, mlgst_params, confidence_level, Creates a new ConfidenceRegionFactoryView. Usually this constructor is not called directly, and objects of - this type are obtained by calling the :method:`view` method of + this type are obtained by calling the :meth:`view` method of a `ConfidenceRegionFactory` object. Parameters diff --git a/pygsti/protocols/estimate.py b/pygsti/protocols/estimate.py index 03d51fd38..244a8bc3b 100644 --- a/pygsti/protocols/estimate.py +++ b/pygsti/protocols/estimate.py @@ -751,7 +751,7 @@ def final_objective_fn_cache(self, resource_alloc=None): The resource allocation object used to create the MDC store underlying the objective function. This can just be left as `None` unless multiple processors are being utilized - and in this case the *cached* objective function doesn't even benefit from these processors (but calls to - :method:`final_objective_fn` will return an objective function setup for multiple processors). + :meth:`final_objective_fn` will return an objective function setup for multiple processors). Note that this argument is only used when there is no existing cached objective function and an underlying MDC store needs to be created. diff --git a/pygsti/protocols/freeformsim.py b/pygsti/protocols/freeformsim.py index 888822011..6c34a1adb 100644 --- a/pygsti/protocols/freeformsim.py +++ b/pygsti/protocols/freeformsim.py @@ -96,7 +96,7 @@ class ModelFreeformSimulator(FreeformDataSimulator): Holds a dictionary of models and provides basic functionality for computing probabilities, final states, and process matrices corresponding to circuits - which make implementing :method:`compute_freeform_data` easier. + which make implementing :meth:`compute_freeform_data` easier. Parameters ---------- diff --git a/pygsti/protocols/gst.py b/pygsti/protocols/gst.py index 6ef5b73ad..601bd3b87 100644 --- a/pygsti/protocols/gst.py +++ b/pygsti/protocols/gst.py @@ -147,13 +147,13 @@ class StandardGSTDesign(GateSetTomographyDesign): The processor API used by this experiment design. prep_fiducial_list_or_filename : list or str - A list of preparation fiducial :class:`Circuit`s or the path to a filename containing them. + A list of preparation fiducial :class:`Circuit` objects or the path to a filename containing them. meas_fiducial_list_or_filename : list or str - A list of measurement fiducial :class:`Circuit`s or the path to a filename containing them. + A list of measurement fiducial :class:`Circuit` objects or the path to a filename containing them. germ_list_or_filename : list or str - A list of germ :class:`Circuit`s or the path to a filename containing them. + A list of germ :class:`Circuit` objects or the path to a filename containing them. max_lengths : list List of integers, one per LSGST iteration, which set truncation lengths @@ -323,9 +323,10 @@ def copy_with_maxlengths(self, max_lengths, germ_length_limits=None, `dscheck` (only relevant when `dscheck` is not None). "raise" causes a ValueError to be raised; "drop" causes the missing sequences to be dropped from the returned set. - + + Returns ------- - StandardGSTDesign + StandardGSTDesign """ if germ_length_limits is None: gll = self.germ_length_limits @@ -587,11 +588,12 @@ class GSTBadFitOptions(_NicelySerializable): actions : tuple, optional Actions to take when a GST fit is unsatisfactory. Allowed actions include: - - 'wildcard': Find an admissable wildcard model... - - 'ddist_wildcard': Fits a single parameter wildcard model in which + + * 'wildcard': Find an admissable wildcard model. + * 'ddist_wildcard': Fits a single parameter wildcard model in which the amount of wildcard error added to an operation is proportional to the diamond distance between that operation and the target. - - 'robust': scale data according out "robust statistics v1" algorithm, + * 'robust': scale data according out "robust statistics v1" algorithm, where we drastically scale down (reduce) the data due to especially poorly fitting circuits. Namely, if a circuit's log-likelihood ratio exceeds the 95% confidence region about its expected value (the # of @@ -599,17 +601,17 @@ class GSTBadFitOptions(_NicelySerializable): by the `expected_value / actual_value`, so that the new value exactly matches what would be expected. Ideally there are only a few of these "outlier" circuits, which correspond errors in the measurement apparatus. - - 'Robust': same as 'robust', but re-optimize the final objective function + * 'Robust': same as 'robust', but re-optimize the final objective function (usually the log-likelihood) after performing the scaling to get the final estimate. - - 'robust+': scale data according out "robust statistics v2" algorithm, + * 'robust+': scale data according out "robust statistics v2" algorithm, which performs the v1 algorithm (see 'robust' above) and then further rescales all the circuit data to achieve the desired chi2 distribution of per-circuit goodness-of-fit values *without reordering* these values. - - 'Robust+': same as 'robust+', but re-optimize the final objective function + * 'Robust+': same as 'robust+', but re-optimize the final objective function (usually the log-likelihood) after performing the scaling to get the final estimate. - - 'do nothing': do not perform any additional actions. Used to help avoid + * 'do nothing': do not perform any additional actions. Used to help avoid the need for special cases when working with multiple types of bad-fit actions. wildcard_budget_includes_spam : bool, optional @@ -734,7 +736,7 @@ def cast(cls, obj): ---------- obj : object Object to cast. Can be a `GSTObjFnBuilders` (naturally), a - dictionary of :method:`create_from` arguments (or None), or a + dictionary of :meth:`create_from` arguments (or None), or a list or tuple of the `(iteration_builders, final_builders)` constructor arguments. Returns @@ -899,7 +901,7 @@ def to_dictionary(self, model, unreliable_ops=(), verbosity=0): This essentially renders the gauge-optimization directives within this object in an "expanded" form for either running gauge optimization (e.g. within - a :method:`GateSetTomography.run` call) or for constructing the would-be gauge + a :meth:`GateSetTomography.run` call) or for constructing the would-be gauge optimization call arguments so they can be slightly modeified before passing them in as the actual gauge-optimization suite used in an analysis (the resulting dictionary can be used to initialize a new `GSTGaugeOptSuite` object @@ -2549,7 +2551,7 @@ def _reoptimize_with_weights(mdc_objfn, circuit_weights_dict, optimizer, verbosi circuit_weights_dict : dict A dictionary of circuit weights, such as that returned by - :function:`_compute_robust_scaling`, giving the data-count scaling factors. + :func:`_compute_robust_scaling`, giving the data-count scaling factors. objfn_builder : ObjectiveFunctionBuilder The objective function (builder) that represents the final stage of diff --git a/pygsti/protocols/modeltest.py b/pygsti/protocols/modeltest.py index 5a740c418..133b94e34 100644 --- a/pygsti/protocols/modeltest.py +++ b/pygsti/protocols/modeltest.py @@ -31,7 +31,7 @@ class ModelTest(_proto.Protocol): Parameters ---------- model_to_test : Model - The model to compare with data when :method:`run` is called. + The model to compare with data when :meth:`run` is called. target_model : Model, optional The ideal or desired model of perfect operations. It is often useful to bundle this @@ -77,9 +77,9 @@ def create_objective_builder(cls, obj): ---------- obj : object If `obj` is already an :class:`ObjectiveFunctionBuilder` it is used directly. A - dictionary is assumed to hold arguments of :method:`ObjectiveFunctionBuilder.simple`. + dictionary is assumed to hold arguments of :meth:`ObjectiveFunctionBuilder.simple`. A list or tuple is assumed to hold positional arguments of - :method:`ObjectiveFunctionBuilder.__init__`. + :meth:`ObjectiveFunctionBuilder.__init__`. Returns ------- diff --git a/pygsti/protocols/protocol.py b/pygsti/protocols/protocol.py index 9026a9067..cf7d7cc63 100644 --- a/pygsti/protocols/protocol.py +++ b/pygsti/protocols/protocol.py @@ -260,9 +260,9 @@ def run(self, data, memlimit=None, comm=None): class ProtocolRunner(object): """ - Used to run :class:`Protocol`(s) on an entire *tree* of data + Used to run :class:`Protocol` objects on an entire *tree* of data - This class provides a way of combining multiple calls to :method:`Protocol.run`, + This class provides a way of combining multiple calls to :meth:`Protocol.run`, potentially running multiple protocols on different data. From the outside, a :class:`ProtocolRunner` object behaves similarly, and can often be used interchangably, with a Protocol object. It posesses a `run` method that takes a @@ -553,7 +553,7 @@ class ExperimentDesign(_TreeNode, _MongoSerializable): names (the same as the keys of `children`). If None, then the keys of `children` must be strings and are used as directory names. Directory names are used when saving the object (via - :method:`write`). + :meth:`write`). child_category : str, optional The category that describes the children of this object. This @@ -663,7 +663,7 @@ def __init__(self, circuits=None, qubit_labels=None, names (the same as the keys of `children`). If None, then the keys of `children` must be strings and are used as directory names. Directory names are used when saving the object (via - :method:`write`). + :meth:`write`). Returns ------- @@ -1214,7 +1214,7 @@ class CombinedExperimentDesign(ExperimentDesign): # for multiple designs on the A dictionary of other :class:`ExperimentDesign` objects whose keys are names for each sub-edesign (used for directories and to index the sub-edesigns from this experiment design). If a list is given instead, - a default names of the form "**" are used. + a default names of the form " `**` " are used. all_circuits : list, optional A list of :class:`Circuit`s, specifying all the circuits needing @@ -1231,7 +1231,7 @@ class CombinedExperimentDesign(ExperimentDesign): # for multiple designs on the names (the same as the keys of `sub_designs`). If None, then the keys of `sub_designs` must be strings and are used as directory names. Directory names are used when saving the object (via - :method:`write`). + :meth:`write`). interleave : bool, optional Whether the circuits of the `sub_designs` should be interleaved to @@ -1277,7 +1277,7 @@ def __init__(self, sub_designs, all_circuits=None, qubit_labels=None, sub_design A dictionary of other :class:`ExperimentDesign` objects whose keys are names for each sub-edesign (used for directories and to index the sub-edesigns from this experiment design). If a list is given instead, - a default names of the form "**" are used. + a default names of the form " `**` " are used. all_circuits : list, optional A list of :class:`Circuit`s, specifying all the circuits needing @@ -1294,7 +1294,7 @@ def __init__(self, sub_designs, all_circuits=None, qubit_labels=None, sub_design names (the same as the keys of `sub_designs`). If None, then the keys of `sub_designs` must be strings and are used as directory names. Directory names are used when saving the object (via - :method:`write`). + :meth:`write`). interleave : bool, optional Whether the circuits of the `sub_designs` should be interleaved to @@ -2538,6 +2538,7 @@ class ProtocolResultsDir(_TreeNode, _MongoSerializable): child-:class:`ProtocolResultsDir` objects representing sub-directories. This container object holds two things: + 1. A `.for_protocol` dictionary of :class:`ProtocolResults` corresponding to different protocols (keys are protocol names). @@ -2680,6 +2681,7 @@ def __init__(self, data, protocol_results=None, children=None): Create a new ProtocolResultsDir object. This container object holds two things: + 1. A `.for_protocol` dictionary of :class:`ProtocolResults` corresponding to different protocols (keys are protocol names). @@ -3213,7 +3215,7 @@ def _convert_nameddict_attributes(obj): """ A helper function that converts the elements of the "_nameddict_attributes" attribute of several classes to the (key, value, type) array expected by - :method:`NamedDict.create_nested`. + :meth:`NamedDict.create_nested`. """ keys_vals_types = [] for tup in obj._nameddict_attributes: diff --git a/pygsti/protocols/rb.py b/pygsti/protocols/rb.py index a9c14551a..1741f0fe1 100644 --- a/pygsti/protocols/rb.py +++ b/pygsti/protocols/rb.py @@ -24,30 +24,29 @@ class CliffordRBDesign(_vb.BenchmarkingDesign): """ Experiment design for Clifford randomized benchmarking. - This encapsulates a "Clifford randomized benchmarking" (CRB) experiment. CRB is the RB protocol defined + This encapsulates a "Clifford randomized benchmarking" (CRB) experiment. CRB is the RB protocol defined in "Scalable and robust randomized benchmarking of quantum processes", Magesan et al. PRL 106 180504 (2011). - The circuits created by this function will respect the connectivity and gate-set of the device encoded - by `pspec` (see the :class:`QubitProcessorSpec` object docstring for how to construct the relevant `pspec` + The circuits created by this function will respect the connectivity and gate-set of the device encoded by + `pspec` (see the :class:`QubitProcessorSpec` object docstring for how to construct the relevant `pspec` for a device). - Note that this function uses the convention that a depth "l" CRB circuit consists of "l"+2 Clifford gates + Note that this function uses the convention that a depth "l" CRB circuit consists of "l"+2 Clifford gates before compilation. Parameters ---------- pspec : QubitProcessorSpec - The QubitProcessorSpec for the device that the CRB experiment is being generated for, which defines the - "native" gate-set and the connectivity of the device. The returned CRB circuits will be over - the gates in `pspec`, and will respect the connectivity encoded by `pspec`. + The QubitProcessorSpec for the device that the CRB experiment is being generated for, which defines the + "native" gate-set and the connectivity of the device. The returned CRB circuits will be over the gates in + `pspec`, and will respect the connectivity encoded by `pspec`. clifford_compilations : dict - A dictionary with the potential keys `'absolute'` and `'paulieq'` and corresponding - :class:`CompilationRules` values. These compilation rules specify how to compile the - "native" gates of `pspec` into Clifford gates. + A dictionary with the potential keys `'absolute'` and `'paulieq'` and corresponding class:`CompilationRules` values. + These compilation rules specify how to compile the "native" gates of `pspec` into Clifford gates. depths : list of ints - The "CRB depths" of the circuit; a list of integers >= 0. The CRB length is the number of Cliffords - in the circuit - 2 *before* each Clifford is compiled into the native gate-set. + The "CRB depths" of the circuit; a list of integers >= 0. The CRB length is the number of Cliffords in the + circuit - 2 *before* each Clifford is compiled into the native gate-set. circuits_per_depth : int The number of (possibly) different CRB circuits sampled at each length. @@ -56,7 +55,7 @@ class CliffordRBDesign(_vb.BenchmarkingDesign): If not None, a list of the qubits that the RB circuits are to be sampled for. This should be all or a subset of the qubits in the device specified by the QubitProcessorSpec `pspec`. If None, it is assumed that the RB circuit should be over all the qubits. Note that the - ordering of this list is the order of the ``wires'' in the returned circuit, but is otherwise + ordering of this list is the order of the "wires" in the returned circuit, but is otherwise irrelevant. If desired, a circuit that explicitly idles on the other qubits can be obtained by using methods of the Circuit object. @@ -70,7 +69,7 @@ class CliffordRBDesign(_vb.BenchmarkingDesign): Some of the Clifford compilation algorithms in pyGSTi (including the default algorithm) are randomized, and the lowest-cost circuit is chosen from all the circuit generated in the iterations of the algorithm. This is the number of iterations used. The time required to - generate a CRB circuit is linear in `citerations` * (CRB length + 2). Lower-depth / lower 2-qubit + generate a CRB circuit is linear in `citerations * (CRB length + 2)`. Lower-depth / lower 2-qubit gate count compilations of the Cliffords are important in order to successfully implement CRB on more qubits. @@ -78,19 +77,21 @@ class CliffordRBDesign(_vb.BenchmarkingDesign): A list of arguments that are handed to compile_clifford() function, which includes all the optional arguments of compile_clifford() *after* the `iterations` option (set by `citerations`). In order, this list should be values for: - - algorithm : str. A string that specifies the compilation algorithm. The default in - compile_clifford() will always be whatever we consider to be the 'best' all-round - algorith, - - aargs : list. A list of optional arguments for the particular compilation algorithm. - - costfunction : 'str' or function. The cost-function from which the "best" compilation - for a Clifford is chosen from all `citerations` compilations. The default costs a - circuit as 10x the num. of 2-qubit gates in the circuit + 1x the depth of the circuit. - - prefixpaulis : bool. Whether to prefix or append the Paulis on each Clifford. - - paulirandomize : bool. Whether to follow each layer in the Clifford circuit with a - random Pauli on each qubit (compiled into native gates). I.e., if this is True the - native gates are Pauli-randomized. When True, this prevents any coherent errors adding - (on average) inside the layers of each compiled Clifford, at the cost of increased - circuit depth. Defaults to False. + + * algorithm : str. A string that specifies the compilation algorithm. The default in + compile_clifford() will always be whatever we consider to be the 'best' all-round + algorithm. + * aargs : list. A list of optional arguments for the particular compilation algorithm. + * costfunction : 'str' or function. The cost-function from which the "best" compilation + for a Clifford is chosen from all `citerations` compilations. The default costs a + circuit as 10x the num. of 2-qubit gates in the circuit + 1x the depth of the circuit. + * prefixpaulis : bool. Whether to prefix or append the Paulis on each Clifford. + * paulirandomize : bool. Whether to follow each layer in the Clifford circuit with a + random Pauli on each qubit (compiled into native gates). I.e., if this is True the + native gates are Pauli-randomized. When True, this prevents any coherent errors adding + (on average) inside the layers of each compiled Clifford, at the cost of increased + circuit depth. Defaults to False. + For more information on these options, see the compile_clifford() docstring. descriptor : str, optional @@ -123,15 +124,15 @@ def from_existing_circuits(cls, circuits_and_idealouts_by_depth, qubit_labels=No Parameters ---------- circuits_and_idealouts_by_depth : dict - A dictionary whose keys are integer depths and whose values are lists - of `(circuit, ideal_outcome)` 2-tuples giving each RB circuit and its + A dictionary whose keys are integer depths and whose values are lists of `(circuit, ideal_outcome)` + 2-tuples giving each RB circuit and its ideal (correct) outcome. qubit_labels : list, optional If not None, a list of the qubits that the RB circuits are to be sampled for. This should be all or a subset of the qubits in the device specified by the QubitProcessorSpec `pspec`. If None, it is assumed that the RB circuit should be over all the qubits. Note that the - ordering of this list is the order of the ``wires'' in the returned circuit, but is otherwise + ordering of this list is the order of the "wires" in the returned circuit, but is otherwise irrelevant. If desired, a circuit that explicitly idles on the other qubits can be obtained by using methods of the Circuit object. @@ -153,19 +154,21 @@ def from_existing_circuits(cls, circuits_and_idealouts_by_depth, qubit_labels=No A list of arguments that are handed to compile_clifford() function, which includes all the optional arguments of compile_clifford() *after* the `iterations` option (set by `citerations`). In order, this list should be values for: - - algorithm : str. A string that specifies the compilation algorithm. The default in - compile_clifford() will always be whatever we consider to be the 'best' all-round - algorith, - - aargs : list. A list of optional arguments for the particular compilation algorithm. - - costfunction : 'str' or function. The cost-function from which the "best" compilation - for a Clifford is chosen from all `citerations` compilations. The default costs a - circuit as 10x the num. of 2-qubit gates in the circuit + 1x the depth of the circuit. - - prefixpaulis : bool. Whether to prefix or append the Paulis on each Clifford. - - paulirandomize : bool. Whether to follow each layer in the Clifford circuit with a - random Pauli on each qubit (compiled into native gates). I.e., if this is True the - native gates are Pauli-randomized. When True, this prevents any coherent errors adding - (on average) inside the layers of each compiled Clifford, at the cost of increased - circuit depth. Defaults to False. + + * algorithm : str. A string that specifies the compilation algorithm. The default in + compile_clifford() will always be whatever we consider to be the 'best' all-round + algorithm. + * aargs : list. A list of optional arguments for the particular compilation algorithm. + * costfunction : 'str' or function. The cost-function from which the "best" compilation + for a Clifford is chosen from all `citerations` compilations. The default costs a + circuit as 10x the num. of 2-qubit gates in the circuit + 1x the depth of the circuit. + * prefixpaulis : bool. Whether to prefix or append the Paulis on each Clifford. + * paulirandomize : bool. Whether to follow each layer in the Clifford circuit with a + random Pauli on each qubit (compiled into native gates). I.e., if this is True the + native gates are Pauli-randomized. When True, this prevents any coherent errors adding + (on average) inside the layers of each compiled Clifford, at the cost of increased + circuit depth. Defaults to False. + For more information on these options, see the compile_clifford() docstring. descriptor : str, optional @@ -316,10 +319,9 @@ class DirectRBDesign(_vb.BenchmarkingDesign): in `pspec`. sampler : str or function, optional - If a string, this should be one of: - {'edgegrab', pairingQs', 'Qelimination', 'co2Qgates', 'local'}. + If a string, this should be one of: {'edgegrab', pairingQs', 'Qelimination', 'co2Qgates', 'local'}. Except for 'local', this corresponds to sampling layers according to the sampling function - in rb.sampler named circuit_layer_by_* (with * replaced by 'sampler'). For 'local', this + in rb.sampler named `circuit_layer_by_*` (with `*` replaced by 'sampler'). For 'local', this corresponds to sampling according to rb.sampler.circuit_layer_of_oneQgates [which is not a valid form of sampling for n-qubit DRB, but is not explicitly forbidden in this function]. If `sampler` is a function, it should be a function that takes as the first argument a @@ -425,8 +427,7 @@ def from_existing_circuits(cls, circuits_and_idealouts_by_depth, qubit_labels=No in `pspec`. sampler : str or function, optional - If a string, this should be one of: - {'edgegrab', pairingQs', 'Qelimination', 'co2Qgates', 'local'}. + If a string, this should be one of: {'edgegrab', pairingQs', 'Qelimination', 'co2Qgates', 'local'}. Except for 'local', this corresponds to sampling layers according to the sampling function in rb.sampler named circuit_layer_by_* (with * replaced by 'sampler'). For 'local', this corresponds to sampling according to rb.sampler.circuit_layer_of_oneQgates [which is not @@ -622,8 +623,7 @@ class MirrorRBDesign(_vb.BenchmarkingDesign): the option of Pauli randomization and local Clifford twirling. To implement mirror RB it is necessary for U^(-1) to in the gate set for every gate U in the gate set. - **THIS METHOD IS IN DEVELOPEMENT. DO NOT EXPECT THAT THIS FUNCTION WILL BEHAVE THE SAME IN FUTURE RELEASES - OF PYGSTI!** + **THIS METHOD IS IN DEVELOPEMENT. DO NOT EXPECT THAT THIS FUNCTION WILL BEHAVE THE SAME IN FUTURE RELEASES OF PYGSTI!** Parameters ---------- @@ -640,20 +640,17 @@ class MirrorRBDesign(_vb.BenchmarkingDesign): The "mirror RB depths" of the circuits, which is closely related to the circuit depth. A MRB length must be an even integer, and can be zero. - - If `localclifford` and `paulirandomize` are False, the depth of a sampled circuit = the MRB length. + * If `localclifford` and `paulirandomize` are False, the depth of a sampled circuit = the MRB length. The first length/2 layers are all sampled independently according to the sampler specified by `sampler`. The remaining half of the circuit is the "inversion" circuit that is determined by the first half. - - - If `paulirandomize` is True and `localclifford` is False, the depth of a circuit is - 2*length+1 with odd-indexed layers sampled according to the sampler specified by `sampler, and + * If `paulirandomize` is True and `localclifford` is False, the depth of a circuit is + `2*length+1` with odd-indexed layers sampled according to the sampler specified by `sampler`, and the the zeroth layer + the even-indexed layers consisting of random 1-qubit Pauli gates. - - - If `paulirandomize` and `localclifford` are True, the depth of a circuit is - 2*length+1 + X where X is a random variable (between 0 and normally <= ~12-16) that accounts for + * If `paulirandomize` and `localclifford` are True, the depth of a circuit is + `2*length+1 + X` where X is a random variable (between 0 and normally <= ~12-16) that accounts for the depth from the layer of random 1-qubit Cliffords at the start and end of the circuit. - - - If `paulirandomize` is False and `localclifford` is True, the depth of a circuit is + * If `paulirandomize` is False and `localclifford` is True, the depth of a circuit is length + X where X is a random variable (between 0 and normally <= ~12-16) that accounts for the depth from the layer of random 1-qubit Cliffords at the start and end of the circuit. @@ -664,13 +661,13 @@ class MirrorRBDesign(_vb.BenchmarkingDesign): If not None, a list of the qubits that the RB circuit is to be sampled for. This should be all or a subset of the qubits in the device specified by the QubitProcessorSpec `pspec`. If None, it is assumed that the RB circuit should be over all the qubits. Note that the - ordering of this list is the order of the ``wires'' in the returned circuit, but is otherwise + ordering of this list is the order of the "wires" in the returned circuit, but is otherwise irrelevant. sampler : str or function, optional If a string, this should be one of: {'edgegrab', 'Qelimination', 'co2Qgates', 'local'}. Except for 'local', this corresponds to sampling layers according to the sampling function - in rb.sampler named circuit_layer_by* (with * replaced by 'sampler'). For 'local', this + in rb.sampler named `circuit_layer_by*` (with `*` replaced by 'sampler'). For 'local', this corresponds to sampling according to rb.sampler.circuit_layer_of_oneQgates [which is not a valid option for n-qubit MRB -- it results in sim. 1-qubit MRB -- but it is not explicitly forbidden by this function]. If `sampler` is a function, it should be a function that takes @@ -1113,7 +1110,7 @@ class RandomizedBenchmarkingResults(_proto.ProtocolResults): of the RB fit curve. defaultfit : str - The default key within `fits` to plot when calling :method:`plot`. + The default key within `fits` to plot when calling :meth:`plot`. """ def __init__(self, data, protocol_instance, fits, depths, defaultfit): diff --git a/pygsti/protocols/rpe.py b/pygsti/protocols/rpe.py index bd7ec1e09..250f3e4ef 100644 --- a/pygsti/protocols/rpe.py +++ b/pygsti/protocols/rpe.py @@ -30,25 +30,25 @@ class RobustPhaseEstimationDesign(_proto.CircuitListsDesign): outcomes_neg determine which of those computational basis states count towards each of the probabilities - P^{γ'γ}_{Ns} = |<γ' y| U^N |γ x>|² = |<γ' x| U^N |-γ y>|² = (1 ± sin(θ))/2 - P^{γ'γ}_{Nc} = |<γ' x| U^N |γ x>|² = |<γ' y| U^N | γ y>|² = (1 ± cos(θ))/2 + `P^{γ'γ}_{Ns} = |<γ' y| U^N |γ x>|² = |<γ' x| U^N |-γ y>|² = (1 ± sin(θ))/2` + `P^{γ'γ}_{Nc} = |<γ' x| U^N |γ x>|² = |<γ' y| U^N | γ y>|² = (1 ± cos(θ))/2` (Computational basis state measurements in neither of these sets are silently dropped.) - In the above, the +x refers to the |E_0> + |E_1> combination of eigenstates + In the above, the +x refers to the `|E_0> + |E_1>` combination of eigenstates of U, *not* of computational basis states. For instance, if U is rotation in the X basis, then cos_prep and cos_meas could be simply the identity: - |± U> = |0> ± |1> + `|± U> = |0> ± |1>` - where |±U> are the eigenstates of U, so that, in the notation of the above, + where `|±U>` are the eigenstates of U, so that, in the notation of the above, - |+x> = |+U> + |-U> = |0> + `|+x> = |+U> + |-U> = |0>` The circuit would then calculate - P^+_{Nc} = |<+x| U^N | +x>|² + `P^+_{Nc} = |<+x| U^N | +x>|²` provided that cos_outcomes_pos = [0] and cos_outcomes_neg = [1]. @@ -83,6 +83,7 @@ class RobustPhaseEstimationDesign(_proto.CircuitListsDesign): cos_outcomes_neg : + """ def __init__( @@ -109,25 +110,25 @@ def __init__( outcomes_neg determine which of those computational basis states count towards each of the probabilities - P^{γ'γ}_{Ns} = |<γ' y| U^N |γ x>|² = |<γ' x| U^N |-γ y>|² = (1 ± sin(θ))/2 - P^{γ'γ}_{Nc} = |<γ' x| U^N |γ x>|² = |<γ' y| U^N | γ y>|² = (1 ± cos(θ))/2 + `P^{γ'γ}_{Ns} = |<γ' y| U^N |γ x>|² = |<γ' x| U^N |-γ y>|² = (1 ± sin(θ))/2` + `P^{γ'γ}_{Nc} = |<γ' x| U^N |γ x>|² = |<γ' y| U^N | γ y>|² = (1 ± cos(θ))/2` (Computational basis state measurements in neither of these sets are silently dropped.) - In the above, the +x refers to the |E_0> + |E_1> combination of eigenstates + In the above, the +x refers to the `|E_0> + |E_1>` combination of eigenstates of U, *not* of computational basis states. For instance, if U is rotation in the X basis, then cos_prep and cos_meas could be simply the identity: - |± U> = |0> ± |1> + `|± U> = |0> ± |1>` - where |±U> are the eigenstates of U, so that, in the notation of the above, + where `|±U>` are the eigenstates of U, so that, in the notation of the above, - |+x> = |+U> + |-U> = |0> + `|+x> = |+U> + |-U> = |0>` The circuit would then calculate - P^+_{Nc} = |<+x| U^N | +x>|² + `P^+_{Nc} = |<+x| U^N | +x>|²` provided that cos_outcomes_pos = [0] and cos_outcomes_neg = [1]. """ @@ -188,6 +189,7 @@ def parse_dataset(self, design, dataset): dataset : + """ measured = _collections.OrderedDict() for n, sin_circ, cos_circ in zip(design.req_lengths, *design.circuit_lists): @@ -204,7 +206,7 @@ def compute_raw_angles(self, measured): """ Determine the raw angles from the count data. - This corresponds to the angle of U^N, i.e., it is N times the phase of U. + This corresponds to the angle of `U^N`, i.e., it is N times the phase of U. Parameters ---------- @@ -214,6 +216,7 @@ def compute_raw_angles(self, measured): Returns ------- + """ angles = _collections.OrderedDict() diff --git a/pygsti/protocols/stability.py b/pygsti/protocols/stability.py index 2ec29b96c..aebd453f9 100644 --- a/pygsti/protocols/stability.py +++ b/pygsti/protocols/stability.py @@ -49,24 +49,24 @@ class StabilityAnalysis(_proto.Protocol): transform : str, optional The type of transform to use in the spectral analysis. Options are: - - 'auto': An attempt is made to choose the best transform given the "meta-data" of the data, - e.g., the variability in the time-step between data points. For beginners, - 'auto' is the best option. If you are familiar with the underlying methods, the - meta-data of the input, and the relative merits of the different transform, then - it is probably better to choose this yourself -- as the auto-selection is not hugely - sophisticated. + * 'auto': An attempt is made to choose the best transform given the "meta-data" of the data, + e.g., the variability in the time-step between data points. For beginners, + 'auto' is the best option. If you are familiar with the underlying methods, the + meta-data of the input, and the relative merits of the different transform, then + it is probably better to choose this yourself -- as the auto-selection is not hugely + sophisticated. - - 'dct' : The Type-II Discrete Cosine Transform (with an orthogonal normalization). This is - the only tested option, and it is our recommended option when the data is - approximately equally-spaced, i.e., the time-step between each "click" for each - circuit is almost a constant. (the DCT transform implicitly assumes that this - time-step is exactly constant) + * 'dct' : The Type-II Discrete Cosine Transform (with an orthogonal normalization). This is + the only tested option, and it is our recommended option when the data is + approximately equally-spaced, i.e., the time-step between each "click" for each + circuit is almost a constant. (the DCT transform implicitly assumes that this + time-step is exactly constant) - - 'dft' : The discrete Fourier transform (with an orthogonal normalization). *** This is an - experimental feature, and the results are unreliable with this transform *** + * 'dft' : The discrete Fourier transform (with an orthogonal normalization). + **This is an experimental feature, and the results are unreliable with this transform** - - 'lsp' : The Lomb-Scargle periodogram. *** This is an experimental feature, and the code is - untested with this transform *** + * 'lsp' : The Lomb-Scargle periodogram. + **This is an experimental feature, and the code is untested with this transform** marginalize : str or bool, optional True, False or 'auto'. Whether or not to marginalize multi-qubit data, to look for instability @@ -176,19 +176,17 @@ class StabilityAnalysis(_proto.Protocol): model for each probability trajectory, after that parameterized model has been selected with the model selection methods. Allowed values are: - - 'auto'. The estimation method is chosen automatically, default to the fast method that is also - reasonably reliable. - - - 'filter'. Performs a type of signal filtering: implements the transform used for generating power - spectra (e.g., the DCT), sets the amplitudes to zero for all freuquencies that the model selection - has not included in the model, inverts the transform, and then performs some minor post-processing - to guarantee probabilities within [0, 1]. This method is less statically well-founded than 'mle', - but it is faster and typically gives similar results. This method is not an option for - non-invertable transforms, such as the Lomb-Scargle periodogram. - - - 'mle'. Implements maximum likelihood estimation, on the parameterized model chosen by the model - selection. The most statistically well-founded option, but can be slower than 'filter' and relies - on numerical optimization. + * 'auto'. The estimation method is chosen automatically, default to the fast method that is also + reasonably reliable. + * 'filter'. Performs a type of signal filtering: implements the transform used for generating power + spectra (e.g., the DCT), sets the amplitudes to zero for all freuquencies that the model selection + has not included in the model, inverts the transform, and then performs some minor post-processing + to guarantee probabilities within [0, 1]. This method is less statically well-founded than 'mle', + but it is faster and typically gives similar results. This method is not an option for + non-invertable transforms, such as the Lomb-Scargle periodogram. + * 'mle'. Implements maximum likelihood estimation, on the parameterized model chosen by the model + selection. The most statistically well-founded option, but can be slower than 'filter' and relies + on numerical optimization. modelselector : tuple, optional The model selection method. If not None, a "test class" tuple, specifying which test results to use to @@ -228,24 +226,21 @@ def __init__(self, significance=0.05, transform='auto', marginalize='auto', merg transform : str, optional The type of transform to use in the spectral analysis. Options are: - - 'auto': An attempt is made to choose the best transform given the "meta-data" of the data, - e.g., the variability in the time-step between data points. For beginners, - 'auto' is the best option. If you are familiar with the underlying methods, the - meta-data of the input, and the relative merits of the different transform, then - it is probably better to choose this yourself -- as the auto-selection is not hugely - sophisticated. - - - 'dct' : The Type-II Discrete Cosine Transform (with an orthogonal normalization). This is - the only tested option, and it is our recommended option when the data is - approximately equally-spaced, i.e., the time-step between each "click" for each - circuit is almost a constant. (the DCT transform implicitly assumes that this - time-step is exactly constant) - - - 'dft' : The discrete Fourier transform (with an orthogonal normalization). *** This is an - experimental feature, and the results are unreliable with this transform *** - - - 'lsp' : The Lomb-Scargle periodogram. *** This is an experimental feature, and the code is - untested with this transform *** + * 'auto': An attempt is made to choose the best transform given the "meta-data" of the data, + e.g., the variability in the time-step between data points. For beginners, + 'auto' is the best option. If you are familiar with the underlying methods, the + meta-data of the input, and the relative merits of the different transform, then + it is probably better to choose this yourself -- as the auto-selection is not hugely + sophisticated. + * 'dct' : The Type-II Discrete Cosine Transform (with an orthogonal normalization). This is + the only tested option, and it is our recommended option when the data is + approximately equally-spaced, i.e., the time-step between each "click" for each + circuit is almost a constant. (the DCT transform implicitly assumes that this + time-step is exactly constant) + * 'dft' : The discrete Fourier transform (with an orthogonal normalization). + **This is an experimental feature, and the results are unreliable with this transform** + * 'lsp' : The Lomb-Scargle periodogram. + **This is an experimental feature, and the code is untested with this transform** marginalize : str or bool, optional True, False or 'auto'. Whether or not to marginalize multi-qubit data, to look for instability @@ -355,19 +350,17 @@ def __init__(self, significance=0.05, transform='auto', marginalize='auto', merg model for each probability trajectory, after that parameterized model has been selected with the model selection methods. Allowed values are: - - 'auto'. The estimation method is chosen automatically, default to the fast method that is also - reasonably reliable. - - - 'filter'. Performs a type of signal filtering: implements the transform used for generating power - spectra (e.g., the DCT), sets the amplitudes to zero for all freuquencies that the model selection - has not included in the model, inverts the transform, and then performs some minor post-processing - to guarantee probabilities within [0, 1]. This method is less statically well-founded than 'mle', - but it is faster and typically gives similar results. This method is not an option for - non-invertable transforms, such as the Lomb-Scargle periodogram. - - - 'mle'. Implements maximum likelihood estimation, on the parameterized model chosen by the model - selection. The most statistically well-founded option, but can be slower than 'filter' and relies - on numerical optimization. + * 'auto'. The estimation method is chosen automatically, default to the fast method that is also + reasonably reliable. + * 'filter'. Performs a type of signal filtering: implements the transform used for generating power + spectra (e.g., the DCT), sets the amplitudes to zero for all freuquencies that the model selection + has not included in the model, inverts the transform, and then performs some minor post-processing + to guarantee probabilities within [0, 1]. This method is less statically well-founded than 'mle', + but it is faster and typically gives similar results. This method is not an option for + non-invertable transforms, such as the Lomb-Scargle periodogram. + * 'mle'. Implements maximum likelihood estimation, on the parameterized model chosen by the model + selection. The most statistically well-founded option, but can be slower than 'filter' and relies + on numerical optimization. modelselector : tuple, optional The model selection method. If not None, a "test class" tuple, specifying which test results to use to diff --git a/pygsti/protocols/vb.py b/pygsti/protocols/vb.py index 3011152ea..d1275668a 100644 --- a/pygsti/protocols/vb.py +++ b/pygsti/protocols/vb.py @@ -139,9 +139,9 @@ class PeriodicMirrorCircuitDesign(BenchmarkingDesign): """ Experiment design for periodic mirror-circuit benchmarking. - **THIS METHOD IS IN DEVELOPEMENT. DO NOT EXPECT THAT THIS FUNCTION WILL BEHAVE THE SAME IN FUTURE RELEASES + THIS METHOD IS IN DEVELOPEMENT. DO NOT EXPECT THAT THIS FUNCTION WILL BEHAVE THE SAME IN FUTURE RELEASES OF PYGSTI! THE DOCSTRINGS SHOULD ALSO NOT BE TRUSTED -- MANY (MAYBE ALL) OF THEM ARE COPIED FROM THE - MIRRORBDESIGN OBJECT AND SO SOME BITS ARE WRONG OR NOT APPLICABLE.** + MIRRORBDESIGN OBJECT AND SO SOME BITS ARE WRONG OR NOT APPLICABLE. Parameters ---------- @@ -153,21 +153,18 @@ class PeriodicMirrorCircuitDesign(BenchmarkingDesign): The "mirror RB depths" of the circuits, which is closely related to the circuit depth. A MRB length must be an even integer, and can be zero. - - If `localclifford` and `paulirandomize` are False, the depth of a sampled circuit = the MRB length. + * If `localclifford` and `paulirandomize` are False, the depth of a sampled circuit = the MRB length. The first length/2 layers are all sampled independently according to the sampler specified by `sampler`. The remaining half of the circuit is the "inversion" circuit that is determined by the first half. - - - If `paulirandomize` is True and `localclifford` is False, the depth of a circuit is - 2*length+1 with odd-indexed layers sampled according to the sampler specified by `sampler, and + * If `paulirandomize` is True and `localclifford` is False, the depth of a circuit is + `2*length+1` with odd-indexed layers sampled according to the sampler specified by `sampler`, and the the zeroth layer + the even-indexed layers consisting of random 1-qubit Pauli gates. - - - If `paulirandomize` and `localclifford` are True, the depth of a circuit is - 2*length+1 + X where X is a random variable (between 0 and normally <= ~12-16) that accounts for + * If `paulirandomize` and `localclifford` are True, the depth of a circuit is + `2*length+1 + X` where X is a random variable (between 0 and normally `<= ~12-16`) that accounts for the depth from the layer of random 1-qubit Cliffords at the start and end of the circuit. - - - If `paulirandomize` is False and `localclifford` is True, the depth of a circuit is - length + X where X is a random variable (between 0 and normally <= ~12-16) that accounts for + * If `paulirandomize` is False and `localclifford` is True, the depth of a circuit is + length + X where X is a random variable (between 0 and normally `<= ~12-16`) that accounts for the depth from the layer of random 1-qubit Cliffords at the start and end of the circuit. circuits_per_depth : int @@ -177,13 +174,13 @@ class PeriodicMirrorCircuitDesign(BenchmarkingDesign): If not None, a list of the qubits that the RB circuit is to be sampled for. This should be all or a subset of the qubits in the device specified by the QubitProcessorSpec `pspec`. If None, it is assumed that the RB circuit should be over all the qubits. Note that the - ordering of this list is the order of the ``wires'' in the returned circuit, but is otherwise + ordering of this list is the order of the "wires" in the returned circuit, but is otherwise irrelevant. sampler : str or function, optional If a string, this should be one of: {'pairingQs', 'Qelimination', 'co2Qgates', 'local'}. Except for 'local', this corresponds to sampling layers according to the sampling function - in rb.sampler named circuit_layer_by* (with * replaced by 'sampler'). For 'local', this + in rb.sampler named `circuit_layer_by*` (with `*` replaced by 'sampler'). For 'local', this corresponds to sampling according to rb.sampler.circuit_layer_of_oneQgates [which is not a valid option for n-qubit MRB -- it results in sim. 1-qubit MRB -- but it is not explicitly forbidden by this function]. If `sampler` is a function, it should be a function that takes @@ -238,7 +235,7 @@ def from_existing_circuits(cls, circuits_and_idealouts_by_depth, qubit_labels=No If not None, a list of the qubits that the RB circuit is to be sampled for. This should be all or a subset of the qubits in the device specified by the QubitProcessorSpec `pspec`. If None, it is assumed that the RB circuit should be over all the qubits. Note that the - ordering of this list is the order of the ``wires'' in the returned circuit, but is otherwise + ordering of this list is the order of the "wires" in the returned circuit, but is otherwise irrelevant. sampler : str or function, optional @@ -675,9 +672,9 @@ class ByDepthSummaryStatistics(SummaryStatistics): statistics_to_compute : tuple, optional A sequence of the statistic names to compute. Allowed names are: - 'success_counts', 'total_counts', 'hamming_distance_counts', 'success_probabilities', 'polarization', - 'adjusted_success_probabilities', 'two_q_gate_count', 'depth', 'idealout', 'circuit_index', - and 'width'. + 'success_counts', 'total_counts', 'hamming_distance_counts', 'success_probabilities', 'polarization', + 'adjusted_success_probabilities', 'two_q_gate_count', 'depth', 'idealout', 'circuit_index', + and 'width'. names_to_compute : tuple, optional A sequence of user-defined names for the statistics in `statistics_to_compute`. If `None`, then diff --git a/pygsti/protocols/vbdataframe.py b/pygsti/protocols/vbdataframe.py index 7e1e9df63..6c7bbeb57 100644 --- a/pygsti/protocols/vbdataframe.py +++ b/pygsti/protocols/vbdataframe.py @@ -43,7 +43,7 @@ def polarization_to_success_probability(p, n): def success_probability_to_polarization(s, n): """ Maps a success probablity s for an n-qubit circuit to - the polarization, defined by p = (s - 1/2^n)/(1 - 1/2^n) + the polarization, defined by `p = (s - 1/2^n)/(1 - 1/2^n)` """ return (s - 1 / 2**n) / (1 - 1 / 2**n) @@ -57,9 +57,9 @@ def classify_circuit_shape(success_probabilities, total_counts, threshold, signi Returns an integer that classifies the input list of success probabilities (SPs) as either - -- "success": all SPs above the specified threshold, specified by the int 2. - -- "indeterminate": some SPs are above and some are below the threshold, specified by the int 1. - -- "fail": all SPs are below the threshold, specified by the int 0. + * "success": all SPs above the specified threshold, specified by the int 2. + * "indeterminate": some SPs are above and some are below the threshold, specified by the int 1. + * "fail": all SPs are below the threshold, specified by the int 0. This classification is based on a hypothesis test whereby the null hypothesis is "success" or "fail". That is, the set of success probabilities are designated to be "indeterminate" @@ -355,24 +355,25 @@ def vb_data(self, metric='polarization', statistic='mean', lower_cutoff=0., no_d statistics : string, optional The statistic on the data to be computed at each value of (x, y). Options are: - - 'max': the maximum - - 'min': the minimum. - - 'mean': the mean. - - 'monotonic_max': the maximum of all the data with (x, y) values that are that large or larger - - 'monotonic_min': the minimum of all the data with (x, y) values that are that small or smaller + * 'max': the maximum + * 'min': the minimum. + * 'mean': the mean. + * 'monotonic_max': the maximum of all the data with (x, y) values that are that large or larger + * 'monotonic_min': the minimum of all the data with (x, y) values that are that small or smaller - All these options ignore nan values. + All these options ignore nan values. lower_cutoff : float, optional The value to cutoff the statistic at: takes the maximum of the calculated static and this value. no_data_action: string, optional Sets what to do when there is no data, or only NaN data, at an (x, y) value: - - If 'discard' then when there is no data, or only NaN data, for an (x,y) value then this (x,y) + + * If 'discard' then when there is no data, or only NaN data, for an (x,y) value then this (x,y) value will not be a key in the returned dictionary - - If 'nan' then when there is no data, or only NaN data, for an (x,y) value then this (x,y) + * If 'nan' then when there is no data, or only NaN data, for an (x,y) value then this (x,y) value will be a key in the returned dictionary and its value will be NaN. - - If 'min' then when there is no data, or only NaN data, for an (x,y) value then this (x,y) + * If 'min' then when there is no data, or only NaN data, for an (x,y) value then this (x,y) value will be a key in the returned dictionary and its value will be the minimal value allowed for this statistic, as specified by `lower_cutoff`. diff --git a/pygsti/report/formatter.py b/pygsti/report/formatter.py index 725fa85c0..30e9cc08e 100644 --- a/pygsti/report/formatter.py +++ b/pygsti/report/formatter.py @@ -20,11 +20,11 @@ class Formatter(object): """ Class defining the formatting rules for an object - Once created, is used like a function with the signature: item, specs -> string - See __call__ method for details + Once created, is used like a function with the signature: item, `specs -> string` + See `__call__` method for details - __call__ could be renamed to render() for compatibility with table.render(), row.render(), etc.. - However, using __call__ allows for the user to drop in custom functions in place of Formatter objects, + `__call__` could be renamed to render() for compatibility with table.render(), row.render(), `etc..` + However, using `__call__` allows for the user to drop in custom functions in place of Formatter objects, which is useful (i.e. in creating figure formatters) Parameters @@ -35,11 +35,11 @@ class Formatter(object): stringreplacers : tuple, optional A tuple of tuples of the form (pattern, replacement) where replacement is a normal - string. Ex : [('rho', 'ρ')] + string. Ex : `[('rho', 'ρ')]` - regexreplace : tuple, optional + regexreplace : tuple, optional A tuple of the form (regex, replacement) where replacement is formattable string, - and gets formatted with grouped result of regex matching on item) Ex : ('.*?([0-9]+)$', '_{%s}') + and gets formatted with grouped result of regex matching on item) Ex : `('.*?([0-9]+)$', '_{%s}')` formatstring : str, optional Outer formatting for after both replacements have been made @@ -68,22 +68,24 @@ def __init__(self, nmebstring=None, stringreturn=None, defaults=None): - ''' + """ Create a Formatter object by supplying formatting rules to be applied Parameters ---------- stringreplacers : tuples of the form (pattern, replacement) (optional) - (replacement is a normal string) - Ex : [('rho', 'ρ')] + (replacement is a normal string) + Ex : [('rho', 'ρ')] + regexreplace : A tuple of the form (regex, replacement) (optional) - (replacement is formattable string, - gets formatted with grouped result of regex matching on item) - Ex : ('.*?([0-9]+)$', '_{%s}') + (replacement is formattable string, gets formatted with grouped result of regex matching on item) + Ex : ('.*?([0-9]+)$', '_{%s}') - formatstring : string (optional) Outer formatting for after both replacements have been made + formatstring : string (optional) + Outer formatting for after both replacements have been made - ebstring : string (optional) formatstring used if the item being formatted has attached error bars + ebstring : string (optional) + formatstring used if the item being formatted has attached error bars stringreturn : tuple (string, string) return the second string if the label is equal to the first @@ -91,7 +93,8 @@ def __init__(self, defaults : dictionary (string, any) overriden values to the dictionary passed in during formatted. ie for rounded formatters, which override the precision key to be set to two - ''' + """ + self.custom = custom self.stringreplacers = stringreplacers self.stringreturn = stringreturn @@ -108,20 +111,21 @@ def __init__(self, self.defaults = defaults def __call__(self, item, specs): - ''' + """ Formatting function template Parameters - -------- + ---------- item : string, the item to be formatted! specs : dictionary dictionary of options to be sent to the formatter and custom functions Returns - -------- + ------- formatted item : string - ''' + """ + specs = deepcopy(specs) # Modifying other dictionaries would be rude specs.update(self.defaults) diff --git a/pygsti/report/merge_helpers.py b/pygsti/report/merge_helpers.py index fdd41402a..3ea09c8e6 100644 --- a/pygsti/report/merge_helpers.py +++ b/pygsti/report/merge_helpers.py @@ -18,8 +18,6 @@ import webbrowser as _webbrowser from pathlib import Path -from markupsafe import Markup - from pygsti.baseobjs.verbosityprinter import VerbosityPrinter as _VerbosityPrinter from pygsti.tools import timed_block as _timed_block @@ -29,6 +27,7 @@ _Undefined = () + def _read_contents(filename): """ Read the contents from `filename` as a string. @@ -87,11 +86,11 @@ def insert_resource(connected, online_url, offline_filename, integrity : str, optional The "integrity" attribute string of the