Skip to content

Commit

Permalink
fix lint
Browse files Browse the repository at this point in the history
  • Loading branch information
donghufeng committed Jul 28, 2023
1 parent ffff36f commit 3f5c0e6
Show file tree
Hide file tree
Showing 14 changed files with 67 additions and 25 deletions.
22 changes: 11 additions & 11 deletions ccsrc/include/math/tensor/ops_cpu/memory_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,10 @@ void destroy(Tensor* t);
// -----------------------------------------------------------------------------

template <TDtype src, TDtype des>
Tensor cast_to(void* data, size_t len) {
Tensor cast_to(const void* data, size_t len) {
using d_src = to_device_t<src>;
using d_des = to_device_t<des>;
auto c_data = reinterpret_cast<d_src*>(data);
auto c_data = reinterpret_cast<const d_src*>(data);
auto out = cpu::init<des>(len);
auto c_out = reinterpret_cast<d_des*>(out.data);
auto caster = cast_value<to_device_t<src>, to_device_t<des>>();
Expand All @@ -71,13 +71,13 @@ Tensor cast_to(const Tensor& t, TDtype des);
// -----------------------------------------------------------------------------

template <TDtype dtype>
std::string to_string(void* data, size_t dim, bool simplify = false) {
std::string to_string(const void* data, size_t dim, bool simplify = false) {
std::string out = "";
if (!simplify) {
out = "array(dtype: " + dtype_to_string(dtype) + ", device: " + device_to_string(TDevice::CPU) + ", data: [";
}
using calc_t = to_device_t<dtype>;
calc_t* data_ = reinterpret_cast<calc_t*>(data);
const calc_t* data_ = reinterpret_cast<const calc_t*>(data);
for (size_t i = 0; i < dim; i++) {
if constexpr (is_complex_v<calc_t>) {
out += "(" + std::to_string(data_[i].real()) + ", " + std::to_string(data_[i].imag()) + ")";
Expand Down Expand Up @@ -118,7 +118,7 @@ Tensor init_with_vector(const std::vector<T>& a) {
// -----------------------------------------------------------------------------

template <TDtype dtype>
Tensor copy(void* data, size_t len) {
Tensor copy(const void* data, size_t len) {
using calc_t = to_device_t<dtype>;
auto out = init<dtype>(len);
mindquantum::safe_copy(out.data, sizeof(calc_t) * len, data, sizeof(calc_t) * len);
Expand All @@ -128,7 +128,7 @@ Tensor copy(void* data, size_t len) {
Tensor copy(const Tensor& t);

template <TDtype dtype>
void* copy_mem(void* data, size_t len) {
void* copy_mem(const void* data, size_t len) {
using calc_t = to_device_t<dtype>;
auto res = reinterpret_cast<void*>(malloc(sizeof(calc_t) * len));
if (res == nullptr) {
Expand All @@ -137,7 +137,7 @@ void* copy_mem(void* data, size_t len) {
mindquantum::safe_copy(res, sizeof(calc_t) * len, data, sizeof(calc_t) * len);
return res;
}
void* copy_mem(void* data, TDtype dtype, size_t len);
void* copy_mem(const void* data, TDtype dtype, size_t len);

// -----------------------------------------------------------------------------
template <typename src, typename T>
Expand Down Expand Up @@ -185,8 +185,8 @@ Tensor get(const Tensor& t, size_t idx);
// -----------------------------------------------------------------------------

template <TDtype src_dtype>
std::vector<to_device_t<src_dtype>> to_vector(void* data, size_t len) {
auto c_data = reinterpret_cast<to_device_t<src_dtype>*>(data);
std::vector<to_device_t<src_dtype>> to_vector(const void* data, size_t len) {
auto c_data = reinterpret_cast<const to_device_t<src_dtype>*>(data);
std::vector<to_device_t<src_dtype>> out;
for (size_t i = 0; i < len; i++) {
out.push_back(c_data[i]);
Expand All @@ -204,8 +204,8 @@ std::vector<T> to_vector(const Tensor& ori) {
}

template <TDtype src_dtype>
std::vector<std::vector<to_device_t<src_dtype>>> to_vector(void* data, size_t n_row, size_t n_col) {
auto c_data = reinterpret_cast<to_device_t<src_dtype>*>(data);
std::vector<std::vector<to_device_t<src_dtype>>> to_vector(const void* data, size_t n_row, size_t n_col) {
auto c_data = reinterpret_cast<const to_device_t<src_dtype>*>(data);
std::vector<std::vector<to_device_t<src_dtype>>> out;
for (size_t i = 0; i < n_row; i++) {
std::vector<to_device_t<src_dtype>> tmp;
Expand Down
2 changes: 1 addition & 1 deletion ccsrc/lib/math/tensor/ops_cpu/memory_operator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ Tensor copy(const Tensor& t) {
return Tensor();
}

void* copy_mem(void* data, TDtype dtype, size_t len) {
void* copy_mem(const void* data, TDtype dtype, size_t len) {
switch (dtype) {
case (TDtype::Float32):
return copy_mem<TDtype::Float32>(data, len);
Expand Down
3 changes: 1 addition & 2 deletions ccsrc/lib/simulator/vector/detail/runtime/cmd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
#include "simulator/vector/runtime/rt_gate.h"
#include "simulator/vector/runtime/utils.h"
#include "simulator/vector/vector_state.h"
#define MAX_SHOTS 100000
constexpr int MAX_SHOTS = 100000;

namespace mindquantum::sim::rt {
int cmd(const std::vector<std::string> &args) {
Expand Down Expand Up @@ -216,7 +216,6 @@ int cmd_file(const char *filename) {
file.open(filename);
if (!file.is_open()) {
throw std::runtime_error(fmt::format("Cannot open file {}", filename));
return 0;
}
std::vector<std::string> cmds = {"", "cmd"};
std::string current_cmd = "";
Expand Down
7 changes: 6 additions & 1 deletion mindquantum/algorithm/compiler/decompose/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,10 @@ def kron_factor_4x4_to_2x2s(mat: np.ndarray):
f2 /= np.sqrt(np.linalg.det(f2)) or 1

# Determine global phase.
g = mat[a, b] / (f1[a >> 1, b >> 1] * f2[a & 1, b & 1])
denominator = f1[a >> 1, b >> 1] * f2[a & 1, b & 1]
if denominator == 0:
raise ZeroDivisionError("denominator cannot be zero.")
g = mat[a, b] / denominator
if np.real(g) < 0:
f1 *= -1
g = -g
Expand Down Expand Up @@ -222,6 +225,8 @@ def glob_phase(mat: np.ndarray) -> float:
Global phase rad, in range of (-pi, pi].
"""
d = mat.shape[0]
if d == 0:
raise ZeroDivisionError("Dimension of mat can not be zero.")
exp_alpha = linalg.det(mat) ** (1 / d)
return np.angle(exp_alpha)

Expand Down
5 changes: 4 additions & 1 deletion mindquantum/algorithm/error_mitigation/mitigation.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,10 @@ def zne(
product = 1
for i in range(0, len(y)):
if k != i:
product = product * (scaling[i] / (scaling[i] - scaling[k]))
try:
product = product * (scaling[i] / (scaling[i] - scaling[k]))
except ZeroDivisionError as exc:
raise ZeroDivisionError(f"Error scaling: {scaling}") from exc
mitigated = mitigated + y_k * product
return mitigated
if order is None:
Expand Down
7 changes: 5 additions & 2 deletions mindquantum/algorithm/library/amplitude_encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""Amplitude encoder for quantum machine learning."""

import math
Expand Down Expand Up @@ -41,6 +40,7 @@ def controlled_gate(circuit, gate, t_qubit, c_qubits, zero_qubit):
circuit += X.on(abs(control))


# pylint: disable=too-many-locals
def amplitude_encoder(x, n_qubits):
"""
Quantum circuit for amplitude encoding.
Expand Down Expand Up @@ -109,7 +109,10 @@ def amplitude_encoder(x, n_qubits):
controls.append(tmp_j * j)
theta = 0
if tree[(i - 1) // 2] > 1e-10:
amp_0 = tree[i] / tree[(i - 1) // 2]
try:
amp_0 = tree[i] / tree[(i - 1) // 2]
except ZeroDivisionError as exc:
raise ZeroDivisionError("Failed to set amplitude encoding.") from exc
theta = 2 * math.acos(amp_0)
if tree[i + 1] < 0 < math.sin(theta / 2):
theta = -theta
Expand Down
5 changes: 4 additions & 1 deletion mindquantum/algorithm/nisq/barren_plateau.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,10 @@ def run(grad_ops, n_sampling):
half_l = len(gradients) // 2
ori_var = np.var(gradients[:half_l])
this_var = np.var(gradients[half_l:])
var_i = np.abs(ori_var - this_var) / ori_var
try:
var_i = np.abs(ori_var - this_var) / ori_var
except ZeroDivisionError as exc:
raise ZeroDivisionError("ori_val cannot be zero.") from exc
step += 1

return np.var(gradients)
4 changes: 4 additions & 0 deletions mindquantum/core/operators/fermion_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,10 +225,14 @@ def __rmul__(self, other: typing.Union["FermionOperator", PRConvertible]) -> "Fe

def __truediv__(self, other: PRConvertible) -> "FermionOperator":
"""Divide a number."""
if other == 0.0:
raise ZeroDivisionError("other cannot be zero.")
return self * (1.0 / other)

def __itruediv__(self, other: PRConvertible) -> "FermionOperator":
"""Divide a number."""
if other == 0.0:
raise ZeroDivisionError("other cannot be zero.")
self.__imul__(1.0 / other)
return self

Expand Down
4 changes: 4 additions & 0 deletions mindquantum/core/operators/qubit_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,10 +157,14 @@ def __rmul__(self, other: typing.Union["QubitOperator", PRConvertible]) -> "Qubi

def __truediv__(self, other: PRConvertible) -> "QubitOperator":
"""Divide a number."""
if other == 0.0:
raise ZeroDivisionError("other cannot be zero.")
return self * (1.0 / other)

def __itruediv__(self, other: PRConvertible) -> "QubitOperator":
"""Divide a number."""
if other == 0.0:
raise ZeroDivisionError("other cannot be zero.")
self.__imul__(1.0 / other)
return self

Expand Down
4 changes: 3 additions & 1 deletion mindquantum/io/display/bloch_plt_drawer.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,8 +345,10 @@ def state_to_cor(amp: np.ndarray):
_check_input_type('amp', np.ndarray, amp)
if amp.shape != (2,):
raise ValueError(f"amp requires shape (2, ), but get {amp.shape}")
if np.sqrt(np.vdot(amp, amp)) != 0:
try:
amp = amp / np.sqrt(np.vdot(amp, amp))
except ZeroDivisionError as exc:
raise ZeroDivisionError("Mode of amp is zero.") from exc
global_phase = np.angle(amp[0])
amp = amp / np.exp(1j * global_phase)
theta = 2 * np.arccos(np.real(amp[0]))
Expand Down
4 changes: 4 additions & 0 deletions mindquantum/io/display/measure_res_drawer.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@


def _trans(v, k, m): # pylint: disable=invalid-name
if m == 0:
raise ZeroDivisionError("m cannot be zero.")
return math.ceil(v / m * k)


Expand All @@ -39,6 +41,8 @@ def measure_text_drawer(res): # pylint: disable=too-many-locals
max_shot = max(res.data.values())
if res.shots != 0:
max_prop = max_shot / res.shots
else:
raise ValueError("shots cannot be zero.")
if max_prop == 0:
max_prop = 1
if max_prop / 0.8 > 1:
Expand Down
4 changes: 4 additions & 0 deletions mindquantum/io/display/measure_res_svg_drawer.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,12 @@ def __init__(self, res, style):
self.style = style
self.res = res
self.table = SVGContainer()
if self.res.shots == 0:
raise ValueError("shots cannot be zero.")
self.max_val = max(res.data.values()) / res.shots
self.max_val = min(self.max_val / 0.9, self.max_val)
if self.max_val == 0:
raise ValueError("Error measure result.")
self.f = (self.style['n_stick'] - 1) / self.max_val * self.style['v_dis'] # pylint: disable=invalid-name
main_box = SVGContainer()
text = self.build_title()
Expand Down
6 changes: 4 additions & 2 deletions mindquantum/io/qasm/openqasm.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""OpenQASM support module."""

import numpy as np
Expand Down Expand Up @@ -59,7 +58,10 @@ def _extr_parameter(cmd):
tmp = expre.split('/')
if len(tmp) != 2:
raise ValueError(f"cannot parse cmd {cmd}")
expre = str(float(tmp[0]) / float(tmp[1]))
try:
expre = str(float(tmp[0]) / float(tmp[1]))
except ZeroDivisionError as exc:
raise ZeroDivisionError(f"Wrong cmd: {cmd}") from exc
out.append(float(expre))
return out[0] if len(all_expre) == 1 else out

Expand Down
15 changes: 12 additions & 3 deletions mindquantum/simulator/mqsim.py
Original file line number Diff line number Diff line change
Expand Up @@ -461,13 +461,19 @@ def set_qs(self, quantum_state: np.ndarray):
raise ValueError(f"{n_qubits} qubits vec does not match with simulation qubits ({self.n_qubits})")
if self.name == "mqmatrix":
if len(quantum_state.shape) == 1:
self.sim.set_qs(quantum_state / np.sqrt(np.sum(np.abs(quantum_state) ** 2)))
norm_factor = np.sqrt(np.sum(np.abs(quantum_state) ** 2))
if norm_factor == 0.0:
raise ValueError("Wrong quantum state.")
self.sim.set_qs(quantum_state / norm_factor)
elif len(quantum_state.shape) == 2:
if not np.allclose(quantum_state, quantum_state.T.conj()):
raise ValueError("density matrix must be hermitian.")
if (quantum_state.diagonal() < 0).any():
raise ValueError("the diagonal terms in density matrix cannot be negative.")
self.sim.set_dm(quantum_state / np.real(np.trace(quantum_state)))
norm_factor = np.real(np.trace(quantum_state))
if norm_factor == 0.0:
raise ValueError("Wrong quantum state.")
self.sim.set_dm(quantum_state / norm_factor)
else:
raise ValueError(
f"vec requires a 1-dimensional array, density matrix requires \
Expand All @@ -476,4 +482,7 @@ def set_qs(self, quantum_state: np.ndarray):
else:
if len(quantum_state.shape) != 1:
raise ValueError(f"vec requires a 1-dimensional array, but get {quantum_state.shape}")
self.sim.set_qs(quantum_state / np.sqrt(np.sum(np.abs(quantum_state) ** 2)))
norm_factor = np.sqrt(np.sum(np.abs(quantum_state) ** 2))
if norm_factor == 0.0:
raise ValueError("Wrong quantum state.")
self.sim.set_qs(quantum_state / norm_factor)

0 comments on commit 3f5c0e6

Please sign in to comment.