Skip to content
This repository has been archived by the owner on Aug 18, 2023. It is now read-only.

Commit

Permalink
Remove counts argument from inference calls (#175)
Browse files Browse the repository at this point in the history
  • Loading branch information
zaqqwerty authored Feb 14, 2022
1 parent a734491 commit 2fbc816
Show file tree
Hide file tree
Showing 9 changed files with 154 additions and 188 deletions.
32 changes: 11 additions & 21 deletions qhbmlib/circuit_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,36 +81,28 @@ def backend(self):
def differentiator(self):
return self._differentiator

def expectation(self,
qnn: circuit_model.QuantumCircuit,
initial_states: tf.Tensor,
counts: tf.Tensor,
operators: tf.Tensor,
reduce: bool = True):
def expectation(self, qnn: circuit_model.QuantumCircuit,
initial_states: tf.Tensor, operators: tf.Tensor):
"""Returns the expectation values of the operators against the QNN.
Args:
qnn: The parameterized quantum circuit on which to do inference.
initial_states: Shape [batch_size, num_qubits] of dtype `tf.int8`.
These are the initial states of each qubit in the circuit.
counts: Shape [batch_size] of dtype `tf.int32` such that `counts[i]` is
the weight of `initial_states[i]` when computing expectations.
Additionally, if `self.backend != "noiseless", `counts[i]` samples
are drawn from `(qnn)|initial_states[i]>` and used to compute
the the corresponding expectation.
Each entry is an initial state for the set of qubits. For each state,
`qnn` is applied and the pure state expectation value is calculated.
operators: `tf.Tensor` of strings with shape [n_ops], result of calling
`tfq.convert_to_tensor` on a list of cirq.PauliSum, `[op1, op2, ...]`.
Will be tiled to measure `<op_j>_((qnn)|initial_states[i]>)`
for each i and j.
reduce: bool flag for whether or not to average over i.
Returns:
If `reduce` is true, a `tf.Tensor` with shape [n_ops] whose entries are
are the batch-averaged expectation values of `operators`.
Else, a `tf.Tensor` with shape [batch_size, n_ops] whose entries are the
unaveraged expectation values of each `operator` against each `circuit`.
`tf.Tensor` with shape [batch_size, n_ops] whose entries are the
unaveraged expectation values of each `operator` against each
transformed initial state.
"""
circuits = qnn(initial_states)
unique_states, idx, counts = utils.unique_bitstrings_with_counts(
initial_states)
circuits = qnn(unique_states)
num_circuits = tf.shape(circuits)[0]
num_operators = tf.shape(operators)[0]
tiled_values = tf.tile(
Expand All @@ -123,9 +115,7 @@ def expectation(self,
tiled_operators,
tf.tile(tf.expand_dims(counts, 1), [1, num_operators]),
)
if reduce:
return utils.weighted_average(counts, expectations)
return expectations
return utils.expand_unique_results(expectations, idx)

def sample(self, qnn: circuit_model.QuantumCircuit, initial_states: tf.Tensor,
counts: tf.Tensor):
Expand Down
39 changes: 30 additions & 9 deletions qhbmlib/energy_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,17 +132,15 @@ def entropy(self):
return self._entropy()

@preface_inference
def expectation(self, function, num_samples: int):
def expectation(self, function):
"""Returns an estimate of the expectation value of the given function.
Args:
function: Mapping from a 2D tensor of bitstrings to a possibly nested
structure. The structure must have atomic elements all of which are
float tensors with the same batch size as the input bitstrings.
num_samples: The number of bitstring samples to use when estimating the
expectation value of `function`.
"""
return self._expectation(function, num_samples)
return self._expectation(function)

@preface_inference
def log_partition(self):
Expand All @@ -169,7 +167,7 @@ def _entropy(self):
raise NotImplementedError()

@abc.abstractmethod
def _expectation(self, function, num_samples: int):
def _expectation(self, function):
"""Default implementation wrapped by `self.expectation`."""
raise NotImplementedError()

Expand Down Expand Up @@ -199,7 +197,24 @@ def infer(self, energy: energy_model.BitstringEnergy):
class EnergyInference(EnergyInferenceBase):
"""Provides some default method implementations."""

def _expectation(self, function, num_samples: int):
def __init__(self,
num_expectation_samples: int,
initial_seed: Union[None, tf.Tensor] = None,
name: Union[None, str] = None):
"""Initializes an EnergyInference.
Args:
num_expectation_samples: Number of samples to draw and use for estimating
the expectation value.
initial_seed: PRNG seed; see tfp.random.sanitize_seed for details. This
seed will be used in the `sample` method. If None, the seed is updated
after every inference call. Otherwise, the seed is fixed.
name: Optional name for the model.
"""
super().__init__(initial_seed, name)
self.num_expectation_samples = num_expectation_samples

def _expectation(self, function):
"""Default implementation wrapped by `self.expectation`.
Estimates an expectation value using sample averaging.
Expand All @@ -208,7 +223,7 @@ def _expectation(self, function, num_samples: int):
@tf.custom_gradient
def _inner_expectation():
"""Enables derivatives."""
samples = tf.stop_gradient(self.sample(num_samples))
samples = tf.stop_gradient(self.sample(self.num_expectation_samples))
bitstrings, _, counts = utils.unique_bitstrings_with_counts(samples)

# TODO(#157): try to parameterize the persistence.
Expand Down Expand Up @@ -274,6 +289,7 @@ class AnalyticEnergyInference(EnergyInference):

def __init__(self,
num_bits: int,
num_expectation_samples: int,
initial_seed: Union[None, tf.Tensor] = None,
name: Union[None, str] = None):
"""Initializes an AnalyticEnergyInference.
Expand All @@ -284,12 +300,14 @@ def __init__(self,
Args:
num_bits: Number of bits on which this layer acts.
num_expectation_samples: Number of samples to draw and use for estimating
the expectation value.
initial_seed: PRNG seed; see tfp.random.sanitize_seed for details. This
seed will be used in the `sample` method. If None, the seed is updated
after every inference call. Otherwise, the seed is fixed.
name: Optional name for the model.
"""
super().__init__(initial_seed, name)
super().__init__(num_expectation_samples, initial_seed, name)
self._all_bitstrings = tf.constant(
list(itertools.product([0, 1], repeat=num_bits)), dtype=tf.int8)
self._logits_variable = tf.Variable(
Expand Down Expand Up @@ -345,18 +363,21 @@ class BernoulliEnergyInference(EnergyInference):

def __init__(self,
num_bits: int,
num_expectation_samples: int,
initial_seed: Union[None, tf.Tensor] = None,
name: Union[None, str] = None):
"""Initializes a BernoulliEnergyInference.
Args:
num_bits: Number of bits on which this layer acts.
num_expectation_samples: Number of samples to draw and use for estimating
the expectation value.
initial_seed: PRNG seed; see tfp.random.sanitize_seed for details. This
seed will be used in the `sample` method. If None, the seed is updated
after every inference call. Otherwise, the seed is fixed.
name: Optional name for the model.
"""
super().__init__(initial_seed, name)
super().__init__(num_expectation_samples, initial_seed, name)
self._logits_variable = tf.Variable(tf.zeros([num_bits]), trainable=False)
self._distribution = tfd.Bernoulli(
logits=self._logits_variable, dtype=tf.int8)
Expand Down
35 changes: 17 additions & 18 deletions qhbmlib/hamiltonian_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,7 @@ def circuits(self, model: hamiltonian_model.Hamiltonian, num_samples: int):
return states, counts

def expectation(self, model: hamiltonian_model.Hamiltonian,
ops: Union[tf.Tensor,
hamiltonian_model.Hamiltonian], num_samples: int):
ops: Union[tf.Tensor, hamiltonian_model.Hamiltonian]):
"""Estimates observable expectation values against the density operator.
TODO(#119): add expectation and derivative equations and discussions
Expand All @@ -134,25 +133,25 @@ def expectation(self, model: hamiltonian_model.Hamiltonian,
ops: The observables to measure. If `tf.Tensor`, strings with shape
[n_ops], result of calling `tfq.convert_to_tensor` on a list of
cirq.PauliSum, `[op1, op2, ...]`. Otherwise, a Hamiltonian.
num_samples: Number of draws from the EBM associated with `model` to
average over.
Returns:
`tf.Tensor` with shape [n_ops] whose entries are are the sample averaged
expectation values of each entry in `ops`.
"""

def expectation_f(bitstrings):
if isinstance(ops, tf.Tensor):
return self.q_inference.expectation(model.circuit, bitstrings, ops)
elif isinstance(ops.energy, energy_model.PauliMixin):
u_dagger_u = model.circuit + ops.circuit_dagger
expectation_shards = self.q_inference.expectation(
u_dagger_u, bitstrings, ops.operator_shards)
return tf.map_fn(
lambda x: tf.expand_dims(ops.energy.operator_expectation(x), 0),
expectation_shards)
else:
raise NotImplementedError(
"General `BitstringEnergy` models not yet supported.")

self.e_inference.infer(model.energy)
samples = self.e_inference.sample(num_samples)
bitstrings, _, counts = utils.unique_bitstrings_with_counts(samples)
if isinstance(ops, tf.Tensor):
return self.q_inference.expectation(
model.circuit, bitstrings, counts, ops, reduce=True)
elif isinstance(ops.energy, energy_model.PauliMixin):
u_dagger_u = model.circuit + ops.circuit_dagger
expectation_shards = self.q_inference.expectation(
u_dagger_u, bitstrings, counts, ops.operator_shards, reduce=True)
return tf.expand_dims(
ops.energy.operator_expectation(expectation_shards), 0)
else:
raise NotImplementedError(
"General `BitstringEnergy` models not yet supported.")
return self.e_inference.expectation(expectation_f)
21 changes: 6 additions & 15 deletions qhbmlib/vqt_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@


def vqt(qhbm_infer: hamiltonian_infer.QHBM,
model: hamiltonian_model.Hamiltonian, num_samples: tf.Tensor,
model: hamiltonian_model.Hamiltonian,
hamiltonian: Union[tf.Tensor,
hamiltonian_model.Hamiltonian], beta: tf.Tensor):
"""Computes the VQT loss of a given QHBM and Hamiltonian.
Expand All @@ -34,8 +34,6 @@ def vqt(qhbm_infer: hamiltonian_infer.QHBM,
Args:
qhbm_infer: Inference methods for the model.
model: The modular Hamiltonian being trained to model the thermal state.
num_samples: A scalar `tf.Tensor` specifying the number of samples to draw
from the EBM of `model` when estimating the loss and its gradients.
hamiltonian: The Hamiltonian whose thermal state is to be learned. If
it is a `tf.Tensor`, it is of type `tf.string` with shape [1], result of
calling `tfq.convert_to_tensor` on a list of `cirq.PauliSum`, `[op]`.
Expand All @@ -49,21 +47,14 @@ def vqt(qhbm_infer: hamiltonian_infer.QHBM,

# See equations B4 and B5 in appendix. TODO(#119): confirm equation number.
def f_vqt(bitstrings):
# TODO(#158): counts is required here, but not meaningful.
counts = tf.ones([tf.shape(bitstrings)[0]])
if isinstance(hamiltonian, tf.Tensor):
h_expectations = tf.reshape(
qhbm_infer.q_inference.expectation(
model.circuit, bitstrings, counts, hamiltonian, reduce=False),
tf.shape(counts))
h_expectations = tf.squeeze(
qhbm_infer.q_inference.expectation(model.circuit, bitstrings,
hamiltonian), 1)
elif isinstance(hamiltonian.energy, energy_model.PauliMixin):
u_dagger_u = model.circuit + hamiltonian.circuit_dagger
expectation_shards = qhbm_infer.q_inference.expectation(
u_dagger_u,
bitstrings,
counts,
hamiltonian.operator_shards,
reduce=False)
u_dagger_u, bitstrings, hamiltonian.operator_shards)
h_expectations = hamiltonian.energy.operator_expectation(
expectation_shards)
else:
Expand All @@ -74,6 +65,6 @@ def f_vqt(bitstrings):
return beta_h_expectations - energies

qhbm_infer.e_inference.infer(model.energy)
average_expectation = qhbm_infer.e_inference.expectation(f_vqt, num_samples)
average_expectation = qhbm_infer.e_inference.expectation(f_vqt)
current_partition = tf.stop_gradient(qhbm_infer.e_inference.log_partition())
return average_expectation - current_partition
69 changes: 23 additions & 46 deletions tests/circuit_infer_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@

from qhbmlib import circuit_infer
from qhbmlib import circuit_model
from qhbmlib import utils
from tests import test_util

# Global tolerance, set for float32.
Expand Down Expand Up @@ -108,10 +109,10 @@ def test_expectation(self):
exp_infer = circuit_infer.QuantumInference()

# Choose some bitstrings.
num_bitstrings = 10
bitstrings = tfp.distributions.Bernoulli(
num_bitstrings = int(1e6)
initial_states = tfp.distributions.Bernoulli(
probs=[0.5] * self.num_qubits, dtype=tf.int8).sample(num_bitstrings)
counts = tf.random.uniform([num_bitstrings], 1, 1000, tf.int32)
bitstrings, _, counts = utils.unique_bitstrings_with_counts(initial_states)

# Get true expectation values based on the bitstrings.
expected_x_exps = []
Expand Down Expand Up @@ -151,56 +152,32 @@ def test_expectation(self):
for exps in expected_grad:
expected_grad_reduced.append(
tf.reduce_sum(exps * e_counts, 0) / total_counts)
expected_reduced = tf.stack(expected_reduced)
expected_grad_reduced = tf.stack(expected_grad_reduced)

# Measure operators on every qubit.
x_ops = tfq.convert_to_tensor([1 * cirq.X(q) for q in self.raw_qubits])
y_ops = tfq.convert_to_tensor([1 * cirq.Y(q) for q in self.raw_qubits])
z_ops = tfq.convert_to_tensor([1 * cirq.Z(q) for q in self.raw_qubits])
all_ops = [x_ops, y_ops, z_ops]

# Check with reduce True (this is the default)
# TODO(#71): Decoration yields an error seemingly coming from TFQ:
# LookupError: gradient registry has no entry for: TfqAdjointGradient

#@tf.function
def exp_infer_true(qnn, bitstrings, counts, op):
return exp_infer.expectation(qnn, bitstrings, counts, op)

with tf.GradientTape(persistent=True) as tape:
actual_exps = []
for op in all_ops:
actual_exps.append(exp_infer_true(self.p_qnn, bitstrings, counts, op))
actual_exps_grad = [
tf.squeeze(tape.jacobian(exps, self.p_qnn.trainable_variables))
for exps in actual_exps
]
del tape
for a, e in zip(actual_exps, expected_reduced):
self.assertAllClose(a, e, atol=ATOL)
for a, e in zip(actual_exps_grad, expected_grad_reduced):
self.assertAllClose(a, e, atol=GRAD_ATOL)

# Check with reduce False
# TODO(#71): Decoration yields an error seemingly coming from TFQ:
# LookupError: gradient registry has no entry for: TfqAdjointGradient

#@tf.function
def exp_infer_false(qnn, bitstrings, counts, op):
return exp_infer.expectation(qnn, bitstrings, counts, op, False)

with tf.GradientTape(persistent=True) as tape:
actual_exps = []
for op in all_ops:
actual_exps.append(exp_infer_false(self.p_qnn, bitstrings, counts, op))
actual_exps_grad = [
tf.squeeze(tape.jacobian(exps, self.p_qnn.trainable_variables))
for exps in actual_exps
]
del tape
for a, e in zip(actual_exps, expected):
self.assertAllClose(a, e, atol=ATOL)
for a, e in zip(actual_exps_grad, expected_grad):
self.assertAllClose(a, e, atol=GRAD_ATOL)
expectation_wrapper = tf.function(exp_infer.expectation)
actual_reduced = []
actual_grad_reduced = []
for op in all_ops:
with tf.GradientTape() as tape:
current_exp = expectation_wrapper(self.p_qnn, initial_states, op)
reduced_exp = tf.math.reduce_mean(current_exp, 0)
reduced_grad = tf.squeeze(
tape.jacobian(reduced_exp, self.p_qnn.trainable_variables))
actual_reduced.append(reduced_exp)
actual_grad_reduced.append(reduced_grad)
actual_reduced = tf.stack(actual_reduced)
actual_grad_reduced = tf.stack(actual_grad_reduced)

self.assertAllClose(actual_reduced, expected_reduced, atol=ATOL)
self.assertAllClose(
actual_grad_reduced, expected_grad_reduced, atol=GRAD_ATOL)

@test_util.eager_mode_toggle
def test_sample_basic(self):
Expand Down
Loading

0 comments on commit 2fbc816

Please sign in to comment.