Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 15 additions & 5 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,16 @@ local_repository(
path = "third_party/pypi_setuptools",
)

local_repository(
name = "pypi_wheel",
path = "third_party/pypi_wheel",
)

local_repository(
name = "local_config_cuda",
path = "third_party/local_config_cuda",
)

# TensorFlow's .bzl files, loaded later in this file, also load rules_python
# but we need a slightly newer version that is still compatible with TF's.
http_archive(
Expand Down Expand Up @@ -47,11 +57,11 @@ install_deps()
# Eigen commit used by TensorFlow / TFQ.
# This commit corresponds to Eigen version 3.4.90
# (verified via Eigen/src/Core/util/Macros.h).
EIGEN_COMMIT = "c1d637433e3b3f9012b226c2c9125c494b470ae6"
EIGEN_COMMIT = "33d0937c6bdf5ec999939fb17f2a553183d14a74"
Comment on lines 58 to +60
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The commit hash is changed, but the comment still refers to version 3.4.90. Does this commit still correspond to 3.4.90?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes it is the same version, not fully sure why commit→version is not 1:1, Eigen snapshots can share the same version macros across many commits


http_archive(
name = "eigen",
sha256 = "0992b93a590c39e196a9efdb5b4919fbf3fb485e7e656c6a87b21ddadb7f6ad2",
sha256 = "1f4babf536ce8fc2129dbf92ff3be54cd18ffb2171e9eb40edd00f0a045a54fa",
build_file_content = """
cc_library(
name = "eigen3",
Expand Down Expand Up @@ -79,9 +89,9 @@ http_archive(

http_archive(
name = "org_tensorflow",
sha256 = "75d63eab5c4b41a831e39e2e3795e1f9dcc247e8fcca11bf3f3e1a6ac35e55d0",
strip_prefix = "tensorflow-2.17.1",
urls = ["https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.17.1.zip"],
sha256 = "f73e6d838b388c7b4d1ef88d1422a35bb5532644117a472fb0fee28a2215176c",
strip_prefix = "tensorflow-2.18.1",
urls = ["https://github.com/tensorflow/tensorflow/archive/refs/tags/v2.18.1.zip"],
)


Expand Down
28 changes: 14 additions & 14 deletions release/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
from setuptools.command.install import install
from setuptools.dist import Distribution

CUR_VERSION = "0.7.6"
CUR_VERSION = "0.7.7"

DOCLINES = __doc__.split("\n")

Expand All @@ -46,26 +46,26 @@ def finalize_options(self):


REQUIRED_PACKAGES = [
"cirq-core==1.3.0",
"cirq-google==1.3.0",
"numpy<2.0",
"scipy~=1.15.3",
"cirq-core==1.5.0",
"cirq-google==1.5.0",
"numpy>=2,<3",
"scipy>=1.15.3,<2",
"sympy==1.14",
"tf-keras~=2.17.0",

# The reset of these constraints are on transitive dependencies to avoid
# installation conflicts, which can happen if pip finds a newer version of a
# package & that newer version requires, e.g., NumPy 2.x or Python 3.11+.
# Ideally these can be removed once TFQ is compatible with recent TFs.
"jax<0.5",
"contourpy<1.3.3",
"tf-keras>=2.18,<2.19",

# The following are transitive dependencies that need to be constrained to
# avoid incompatible versions or because some (e.g., contourpy 1.3.3)
# require Python 3.11+ and we want to maintain Python 3.9 compatibility.
# TODO: revisit after we reach compatibility with TensorFlow 2.19+.
"jax>=0.5,<0.6",
"contourpy<=1.3.2",
]

# TF requirement is placed as an extras to avoid overwriting existing nightly TF
# installations. Users can run "pip install tensorflow-quantum[and-tensorflow]"
# to get everything in one go (or "pip install tensorflow tensorflow-quantum").
EXTRA_PACKAGES = {}
EXTRA_PACKAGES["and-tensorflow"] = ["tensorflow>=2.17,<2.18"]
EXTRA_PACKAGES["and-tensorflow"] = ["tensorflow>=2.18,<2.19"]
# "extras" was used before 0.7.4. Prefer "and-tensorflow" in 0.7.4+.
EXTRA_PACKAGES["extras"] = EXTRA_PACKAGES["and-tensorflow"]

Expand Down
16 changes: 8 additions & 8 deletions requirements.in
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,15 @@
# Core development requirements for TensorFlow Quantum. This file is processed
# by pip-compile (from pip-tools) to produce requirements.txt using the script
# scripts/generate_requirements.sh.

cirq-core~=1.3.0
cirq-google~=1.3.0
jax<0.5
numpy<2.0
scipy~=1.15.3
cirq-core<=1.5.0
cirq-google<=1.5.0
contourpy<=1.3.2
numpy<=2.2.6
scipy<1.16
sympy==1.14
tensorflow>=2.17,<2.18
tf-keras~=2.17.0
tensorflow>=2.18,<2.19
tf-keras~=2.18.0
wheel==0.46.2

# TODO: the next ones are not truly core requirements. A better place should be
# found for them (long with others needed by scripts/*). They're here as a
Expand Down
76 changes: 43 additions & 33 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#
# ./scripts/generate_requirements.sh
#
absl-py==2.3.1
absl-py==2.4.0
# via
# keras
# tensorboard
Expand All @@ -18,22 +18,32 @@ astunparse==1.6.3
# via tensorflow
attrs==25.4.0
# via
# cirq-core
# jsonschema
# referencing
# typedunits
certifi==2026.1.4
# via requests
cffi==2.0.0
# via cryptography
charset-normalizer==3.4.4
# via requests
cirq-core==1.3.0
cirq-core==1.5.0
# via
# -r requirements.in
# cirq-google
cirq-google==1.3.0
cirq-google==1.5.0
# via -r requirements.in
contourpy==1.3.0
# via matplotlib
contourpy==1.3.2
# via
# -r requirements.in
# matplotlib
cryptography==46.0.4
# via google-auth
cycler==0.12.1
# via matplotlib
cython==3.2.4
# via typedunits
dill==0.4.1
# via pylint
duet==0.2.9
Expand All @@ -46,7 +56,7 @@ gast==0.7.0
# via tensorflow
google-api-core[grpc]==2.29.0
# via cirq-google
google-auth==2.47.0
google-auth==2.48.0
# via google-api-core
google-pasta==0.2.0
# via tensorflow
Expand All @@ -60,9 +70,9 @@ grpcio==1.76.0
# grpcio-status
# tensorboard
# tensorflow
grpcio-status==1.62.3
grpcio-status==1.71.2
# via google-api-core
h5py==3.10.0
h5py==3.15.1
# via
# keras
# tensorflow
Expand All @@ -72,10 +82,6 @@ ipython-genutils==0.2.0
# via nbformat
isort==5.13.2
# via pylint
jax==0.4.34
# via -r requirements.in
jaxlib==0.4.34
# via jax
jinja2==3.1.6
# via tensorflow-docs
jsonschema==4.26.0
Expand All @@ -94,7 +100,7 @@ kiwisolver==1.4.7
# via matplotlib
libclang==18.1.1
# via tensorflow
markdown==3.9
markdown==3.10.1
# via tensorboard
markdown-it-py==3.0.0
# via rich
Expand All @@ -108,10 +114,8 @@ mccabe==0.7.0
# via pylint
mdurl==0.1.2
# via markdown-it-py
ml-dtypes==0.3.2
ml-dtypes==0.5.4
# via
# jax
# jaxlib
# keras
# tensorflow
mpmath==1.3.0
Expand All @@ -129,25 +133,22 @@ nest-asyncio==1.6.0
# via nbclient
networkx==3.2.1
# via cirq-core
numpy==1.26.4
numpy==2.0.2
# via
# -r requirements.in
# cirq-core
# contourpy
# h5py
# jax
# jaxlib
# keras
# matplotlib
# ml-dtypes
# pandas
# scipy
# tensorboard
# tensorflow
# typedunits
opt-einsum==3.4.0
# via
# jax
# tensorflow
# via tensorflow
optree==0.18.0
# via keras
packaging==26.0
Expand All @@ -156,6 +157,7 @@ packaging==26.0
# matplotlib
# tensorboard
# tensorflow
# wheel
pandas==2.3.3
# via cirq-core
pillow==11.0.0
Expand All @@ -169,7 +171,7 @@ proto-plus==1.27.0
# via
# cirq-google
# google-api-core
protobuf==4.25.8
protobuf==5.29.5
# via
# cirq-google
# google-api-core
Expand All @@ -179,18 +181,23 @@ protobuf==4.25.8
# tensorboard
# tensorflow
# tensorflow-docs
# typedunits
pyasn1==0.6.2
# via
# pyasn1-modules
# rsa
pyasn1-modules==0.4.2
# via google-auth
pycparser==3.0
# via cffi
pygments==2.19.2
# via rich
pylint==3.3.3
# via -r requirements.in
pyparsing==3.3.2
# via matplotlib
# via
# matplotlib
# typedunits
python-dateutil==2.9.0.post0
# via
# jupyter-client
Expand All @@ -208,7 +215,7 @@ requests==2.32.5
# via
# google-api-core
# tensorflow
rich==14.2.0
rich==14.3.1
# via keras
rpds-py==0.30.0
# via
Expand All @@ -220,9 +227,7 @@ scipy==1.15.3
# via
# -r requirements.in
# cirq-core
# jax
# jaxlib
setuptools==80.10.1
setuptools==75.0.0
# via
# tensorboard
# tensorflow
Expand All @@ -239,11 +244,11 @@ sympy==1.14.0
# via
# -r requirements.in
# cirq-core
tensorboard==2.17.1
tensorboard==2.18.0
# via tensorflow
tensorboard-data-server==0.7.2
# via tensorboard
tensorflow==2.17.1
tensorflow==2.18.1
# via
# -r requirements.in
# tf-keras
Expand All @@ -253,7 +258,7 @@ tensorflow-io-gcs-filesystem==0.37.1
# via tensorflow
termcolor==3.1.0
# via tensorflow
tf-keras==2.17.0
tf-keras==2.18.0
# via -r requirements.in
tomli==2.4.0
# via
Expand All @@ -271,10 +276,13 @@ traitlets==5.14.3
# jupyter-core
# nbclient
# nbformat
typedunits==0.0.1
# via cirq-google
typing-extensions==4.15.0
# via
# astroid
# cirq-core
# cryptography
# grpcio
# optree
# referencing
Expand All @@ -285,8 +293,10 @@ urllib3==2.6.3
# via requests
werkzeug==3.1.5
# via tensorboard
wheel==0.44.0
# via astunparse
wheel==0.46.2
# via
# -r requirements.in
# astunparse
wrapt==1.17.3
# via tensorflow
yapf==0.43.0
Expand Down
2 changes: 1 addition & 1 deletion tensorflow_quantum/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,4 +64,4 @@
del core
# pylint: enable=undefined-variable

__version__ = '0.7.6'
__version__ = '0.7.7'
11 changes: 6 additions & 5 deletions tensorflow_quantum/python/differentiators/linear_combination.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
# limitations under the License.
# ==============================================================================
"""Compute gradients by combining function values linearly."""
import math
import numbers

import numpy as np
Expand Down Expand Up @@ -240,9 +241,9 @@ def __init__(self, error_order=1, grid_spacing=0.001):
weight = -1 * np.sum(
[1 / j for j in np.arange(1, error_order + 1)])
else:
weight = ((-1) ** (point+1) * np.math.factorial(error_order))/\
(point * np.math.factorial(error_order-point)
* np.math.factorial(point))
weight = ((-1) ** (point+1) * math.factorial(error_order))/\
(point * math.factorial(error_order-point)
* math.factorial(point))
weights.append(weight / grid_spacing)
super().__init__(weights, grid_points_to_eval * grid_spacing)

Expand Down Expand Up @@ -308,7 +309,7 @@ def __init__(self, error_order=2, grid_spacing=0.001):
n = int(error_order / 2)
for k in grid_points_to_eval:
k = int(k)
numerator = (-1)**(k + 1) * np.math.factorial(n)**2
denom = k * np.math.factorial(n - k) * np.math.factorial(n + k)
numerator = (-1)**(k + 1) * math.factorial(n)**2
denom = k * math.factorial(n - k) * math.factorial(n + k)
weights.append(numerator / (denom * grid_spacing))
super().__init__(weights, grid_points_to_eval * grid_spacing)
1 change: 1 addition & 0 deletions third_party/local_config_cuda/WORKSPACE
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
workspace(name = "local_config_cuda")
Loading
Loading