diff --git a/libs/qec/include/cudaq/qec/decoder.h b/libs/qec/include/cudaq/qec/decoder.h index 1a246b6f..77099d3d 100644 --- a/libs/qec/include/cudaq/qec/decoder.h +++ b/libs/qec/include/cudaq/qec/decoder.h @@ -1,5 +1,5 @@ /****************************************************************-*- C++ -*-**** - * Copyright (c) 2024 - 2025 NVIDIA Corporation & Affiliates. * + * Copyright (c) 2024 - 2026 NVIDIA Corporation & Affiliates. * * All rights reserved. * * * * This source code and the accompanying materials are made available under * @@ -11,6 +11,7 @@ #include "cuda-qx/core/extension_point.h" #include "cuda-qx/core/heterogeneous_map.h" #include "cuda-qx/core/tensor.h" +#include "sparse_binary_matrix.h" #include #include #include @@ -121,7 +122,8 @@ class async_decoder_result { /// arbitrary constructor parameters that can be unique to each specific /// decoder. class decoder - : public cudaqx::extension_point &, + : public cudaqx::extension_point { private: struct rt_impl; @@ -134,11 +136,9 @@ class decoder decoder() = delete; /// @brief Constructor - /// @param H Decoder's parity check matrix represented as a tensor. The tensor - /// is required be rank 2 and must be of dimensions \p syndrome_size x - /// \p block_size. - /// will use the same \p H. - decoder(const cudaqx::tensor &H); + /// @param H Decoder's parity check matrix represented as a sparse binary + /// matrix. + decoder(const cudaq::qec::sparse_binary_matrix &H); /// @brief Decode a single syndrome /// @param syndrome A vector of syndrome measurements where the floating point @@ -174,7 +174,7 @@ class decoder /// @brief This `get` overload supports default values. static std::unique_ptr - get(const std::string &name, const cudaqx::tensor &H, + get(const std::string &name, const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶m_map = cudaqx::heterogeneous_map()); std::size_t get_block_size() { return block_size; } @@ -248,7 +248,7 @@ class decoder std::size_t syndrome_size = 0; /// @brief The decoder's parity check matrix - cudaqx::tensor H; + sparse_binary_matrix H; /// @brief The decoder's observable matrix in sparse format std::vector> O_sparse; @@ -400,6 +400,6 @@ inline void convert_vec_hard_to_soft(const std::vector> &in, } std::unique_ptr -get_decoder(const std::string &name, const cudaqx::tensor &H, +get_decoder(const std::string &name, const sparse_binary_matrix &H, const cudaqx::heterogeneous_map options = {}); } // namespace cudaq::qec diff --git a/libs/qec/include/cudaq/qec/pcm_utils.h b/libs/qec/include/cudaq/qec/pcm_utils.h index c9387450..7387f5b3 100644 --- a/libs/qec/include/cudaq/qec/pcm_utils.h +++ b/libs/qec/include/cudaq/qec/pcm_utils.h @@ -1,5 +1,5 @@ /****************************************************************-*- C++ -*-**** - * Copyright (c) 2025 NVIDIA Corporation & Affiliates. * + * Copyright (c) 2025 - 2026 NVIDIA Corporation & Affiliates. * * All rights reserved. * * * * This source code and the accompanying materials are made available under * @@ -111,6 +111,14 @@ get_sorted_pcm_column_indices(const cudaqx::tensor &pcm, bool pcm_is_sorted(const cudaqx::tensor &pcm, std::uint32_t num_syndromes_per_round = 0); +/// @brief Check if a PCM is sorted. +/// @param sparse_pcm The sparse PCM to check (in the same format as +/// get_sparse_pcm()) +/// @param num_syndromes_per_round The number of syndromes per round. +/// @return True if the PCM is sorted, false otherwise. +bool pcm_is_sorted(const std::vector> &sparse_pcm, + std::uint32_t num_syndromes_per_round = 0); + /// @brief Reorder the columns of a PCM according to the given column order. /// Note: this may return a subset of the columns in the original PCM if the /// \p column_order does not contain all of the columns in the original PCM. diff --git a/libs/qec/include/cudaq/qec/sparse_binary_matrix.h b/libs/qec/include/cudaq/qec/sparse_binary_matrix.h new file mode 100644 index 00000000..7bb1f862 --- /dev/null +++ b/libs/qec/include/cudaq/qec/sparse_binary_matrix.h @@ -0,0 +1,124 @@ +/****************************************************************-*- C++ -*-**** + * Copyright (c) 2026 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ +#pragma once + +#include "cuda-qx/core/tensor.h" +#include +#include + +namespace cudaq::qec { + +/// @brief Storage layout for the sparse PCM: column-major (CSC) or row-major +/// (CSR). All non-zero entries are assumed to be 1; values are not stored. +enum class sparse_binary_matrix_layout { csc, csr }; + +/// @brief Sparse parity-check matrix in either CSC or CSR form. +/// Index types are uint32_t; non-zero values are always 1 and are not stored. +class sparse_binary_matrix { +public: + using index_type = std::uint32_t; + + /// @brief Construct a sparse PCM in CSC form. + /// @param num_rows Number of rows. + /// @param num_cols Number of columns. + /// @param col_ptrs Column pointer array (length num_cols + 1); column j has + /// indices in \p row_indices[col_ptrs[j] .. col_ptrs[j+1]-1]. + /// @param row_indices Row indices of non-zeros (length nnz). + static sparse_binary_matrix from_csc(index_type num_rows, index_type num_cols, + std::vector col_ptrs, + std::vector row_indices); + + /// @brief Construct a sparse PCM in CSR form. + /// @param num_rows Number of rows. + /// @param num_cols Number of columns. + /// @param row_ptrs Row pointer array (length num_rows + 1); row i has + /// indices in \p col_indices[row_ptrs[i] .. row_ptrs[i+1]-1]. + /// @param col_indices Column indices of non-zeros (length nnz). + static sparse_binary_matrix from_csr(index_type num_rows, index_type num_cols, + std::vector row_ptrs, + std::vector col_indices); + + /// @brief Construct from nested CSC: \p nested[j] is the list of row indices + /// for column j; \p nested.size() must equal \p num_cols. + static sparse_binary_matrix + from_nested_csc(index_type num_rows, index_type num_cols, + const std::vector> &nested); + + /// @brief Construct from nested CSR: \p nested[i] is the list of column + /// indices for row i; \p nested.size() must equal \p num_rows. + static sparse_binary_matrix + from_nested_csr(index_type num_rows, index_type num_cols, + const std::vector> &nested); + + /// @brief Construct a sparse PCM from a dense PCM tensor (rows x columns). + /// Any non-zero entry is treated as 1. + /// @param dense Dense parity-check matrix; must have rank 2. + /// @param layout Storage layout for the sparse representation (default CSC). + sparse_binary_matrix( + const cudaqx::tensor &dense, + sparse_binary_matrix_layout layout = sparse_binary_matrix_layout::csc); + + // Default constructor + sparse_binary_matrix() = default; + + // Copy constructor + sparse_binary_matrix(const sparse_binary_matrix &) = default; + + // Move constructor + sparse_binary_matrix(sparse_binary_matrix &&) noexcept = default; + + // Copy assignment operator + sparse_binary_matrix &operator=(const sparse_binary_matrix &) = default; + + // Move assignment operator + sparse_binary_matrix &operator=(sparse_binary_matrix &&) noexcept = default; + + sparse_binary_matrix_layout layout() const { return layout_; } + index_type num_rows() const { return num_rows_; } + index_type num_cols() const { return num_cols_; } + index_type num_nnz() const { + return indices_.empty() ? 0 : static_cast(indices_.size()); + } + + /// @brief For CSC: ptr has length num_cols+1; for CSR: ptr has length + /// num_rows+1. + const std::vector &ptr() const { return ptr_; } + /// @brief For CSC: row indices; for CSR: column indices. + const std::vector &indices() const { return indices_; } + + /// @brief Return a copy of this matrix in CSC layout. No-op if already CSC. + sparse_binary_matrix to_csc() const; + + /// @brief Return a copy of this matrix in CSR layout. No-op if already CSR. + sparse_binary_matrix to_csr() const; + + /// @brief Convert to a dense PCM tensor (rows x columns). Non-zero entries + /// are set to 1. + cudaqx::tensor to_dense() const; + + /// @brief Nested CSC: outer vector has size num_cols; inner vector for + /// column j lists row indices of non-zeros in that column. + std::vector> to_nested_csc() const; + + /// @brief Nested CSR: outer vector has size num_rows; inner vector for row i + /// lists column indices of non-zeros in that row. + std::vector> to_nested_csr() const; + +private: + sparse_binary_matrix(sparse_binary_matrix_layout layout, index_type num_rows, + index_type num_cols, std::vector ptr, + std::vector indices); + + sparse_binary_matrix_layout layout_ = sparse_binary_matrix_layout::csc; + index_type num_rows_ = 0; + index_type num_cols_ = 0; + std::vector ptr_; + std::vector indices_; +}; + +} // namespace cudaq::qec diff --git a/libs/qec/lib/CMakeLists.txt b/libs/qec/lib/CMakeLists.txt index 86b4e5dd..08e3c97a 100644 --- a/libs/qec/lib/CMakeLists.txt +++ b/libs/qec/lib/CMakeLists.txt @@ -1,5 +1,5 @@ # ============================================================================ # -# Copyright (c) 2024 - 2025 NVIDIA Corporation & Affiliates. # +# Copyright (c) 2024 - 2026 NVIDIA Corporation & Affiliates. # # All rights reserved. # # # # This source code and the accompanying materials are made available under # @@ -18,6 +18,7 @@ add_library(${LIBRARY_NAME} SHARED experiments.cpp pcm_utils.cpp plugin_loader.cpp + sparse_binary_matrix.cpp stabilizer_utils.cpp decoders/lut.cpp decoders/sliding_window.cpp diff --git a/libs/qec/lib/decoder.cpp b/libs/qec/lib/decoder.cpp index a7377cc3..afac33bf 100644 --- a/libs/qec/lib/decoder.cpp +++ b/libs/qec/lib/decoder.cpp @@ -1,5 +1,5 @@ /******************************************************************************* - * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * Copyright (c) 2022 - 2026 NVIDIA Corporation & Affiliates. * * All rights reserved. * * * * This source code and the accompanying materials are made available under * @@ -17,8 +17,10 @@ #include #include -INSTANTIATE_REGISTRY(cudaq::qec::decoder, const cudaqx::tensor &) -INSTANTIATE_REGISTRY(cudaq::qec::decoder, const cudaqx::tensor &, +INSTANTIATE_REGISTRY(cudaq::qec::decoder, + const cudaq::qec::sparse_binary_matrix &) +INSTANTIATE_REGISTRY(cudaq::qec::decoder, + const cudaq::qec::sparse_binary_matrix &, const cudaqx::heterogeneous_map &) // Include decoder implementations AFTER registry instantiation @@ -70,12 +72,10 @@ struct decoder::rt_impl { void decoder::rt_impl_deleter::operator()(rt_impl *p) const { delete p; } -decoder::decoder(const cudaqx::tensor &H) +decoder::decoder(const cudaq::qec::sparse_binary_matrix &H) : H(H), pimpl(std::unique_ptr(new rt_impl())) { - const auto H_shape = H.shape(); - assert(H_shape.size() == 2 && "H tensor must be of rank 2"); - syndrome_size = H_shape[0]; - block_size = H_shape[1]; + syndrome_size = H.num_rows(); + block_size = H.num_cols(); reset_decoder(); pimpl->persistent_detector_buffer.resize(this->syndrome_size); pimpl->persistent_soft_detector_buffer.resize(this->syndrome_size); @@ -130,7 +130,7 @@ decoder::decode_async(const std::vector &syndrome) { } std::unique_ptr -decoder::get(const std::string &name, const cudaqx::tensor &H, +decoder::get(const std::string &name, const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶m_map) { auto [mutex, registry] = get_registry(); std::lock_guard lock(mutex); @@ -479,7 +479,7 @@ void decoder::reset_decoder() { } std::unique_ptr get_decoder(const std::string &name, - const cudaqx::tensor &H, + const sparse_binary_matrix &H, const cudaqx::heterogeneous_map options) { return decoder::get(name, H, options); } diff --git a/libs/qec/lib/decoders/lut.cpp b/libs/qec/lib/decoders/lut.cpp index 2501587a..16e98374 100644 --- a/libs/qec/lib/decoders/lut.cpp +++ b/libs/qec/lib/decoders/lut.cpp @@ -1,5 +1,5 @@ /******************************************************************************* - * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * Copyright (c) 2022 - 2026 NVIDIA Corporation & Affiliates. * * All rights reserved. * * * * This source code and the accompanying materials are made available under * @@ -48,7 +48,7 @@ class multi_error_lut : public decoder { bool decoding_time = false; public: - multi_error_lut(const cudaqx::tensor &H, + multi_error_lut(const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms) : decoder(H) { if (params.contains("lut_error_depth")) { @@ -119,12 +119,7 @@ class multi_error_lut : public decoder { // For each error e, build a list of detectors that are set if the error // occurs. - std::vector> H_e2d(block_size); - for (std::size_t c = 0; c < block_size; c++) - for (std::size_t r = 0; r < syndrome_size; r++) - if (H.at({r, c}) != 0) - H_e2d[c].push_back(r); - + std::vector> H_e2d = H.to_nested_csc(); auto toggleSynForError = [&H_e2d](std::string &err_sig, std::size_t qErr) { for (std::size_t r : H_e2d[qErr]) err_sig[r] = err_sig[r] == '1' ? '0' : '1'; @@ -233,7 +228,7 @@ class multi_error_lut : public decoder { CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( multi_error_lut, static std::unique_ptr create( - const cudaqx::tensor &H, + const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms) { return std::make_unique(H, params); }) @@ -243,7 +238,7 @@ CUDAQ_REGISTER_TYPE(multi_error_lut) class single_error_lut : public multi_error_lut { public: - single_error_lut(const cudaqx::tensor &H, + single_error_lut(const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms) : multi_error_lut(H, params) {} @@ -251,7 +246,7 @@ class single_error_lut : public multi_error_lut { CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( single_error_lut, static std::unique_ptr create( - const cudaqx::tensor &H, + const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms) { return std::make_unique(H, params); }) diff --git a/libs/qec/lib/decoders/plugins/example/single_error_lut_example.cpp b/libs/qec/lib/decoders/plugins/example/single_error_lut_example.cpp index 1df26ef9..f17b92df 100644 --- a/libs/qec/lib/decoders/plugins/example/single_error_lut_example.cpp +++ b/libs/qec/lib/decoders/plugins/example/single_error_lut_example.cpp @@ -1,5 +1,5 @@ /******************************************************************************* - * Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. * + * Copyright (c) 2022 - 2026 NVIDIA Corporation & Affiliates. * * All rights reserved. * * * * This source code and the accompanying materials are made available under * @@ -21,12 +21,13 @@ class single_error_lut_example : public decoder { std::map single_qubit_err_signatures; public: - single_error_lut_example(const cudaqx::tensor &H, + single_error_lut_example(const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms) : decoder(H) { // Decoder-specific constructor arguments can be placed in `params`. // Build a lookup table for an error on each possible qubit + std::vector> H_e2d = H.to_nested_csc(); // For each qubit with a possible error, calculate an error signature. for (std::size_t qErr = 0; qErr < block_size; qErr++) { @@ -34,9 +35,8 @@ class single_error_lut_example : public decoder { for (std::size_t r = 0; r < syndrome_size; r++) { bool syndrome = 0; // Toggle syndrome on every "1" entry in the row. - // Except if there is an error on this qubit (c == qErr). - for (std::size_t c = 0; c < block_size; c++) - syndrome ^= (c != qErr) && H.at({r, c}); + for (std::uint32_t c : H_e2d[qErr]) + syndrome ^= 1; err_sig[r] = syndrome ? '1' : '0'; } // printf("Adding err_sig=%s for qErr=%lu\n", err_sig.c_str(), qErr); @@ -80,7 +80,7 @@ class single_error_lut_example : public decoder { CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( single_error_lut_example, static std::unique_ptr create( - const cudaqx::tensor &H, + const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms) { return std::make_unique(H, params); }) diff --git a/libs/qec/lib/decoders/plugins/pymatching/pymatching.cpp b/libs/qec/lib/decoders/plugins/pymatching/pymatching.cpp index 514adfb6..e793acb8 100644 --- a/libs/qec/lib/decoders/plugins/pymatching/pymatching.cpp +++ b/libs/qec/lib/decoders/plugins/pymatching/pymatching.cpp @@ -38,7 +38,7 @@ class pymatching : public decoder { } public: - pymatching(const cudaqx::tensor &H, + pymatching(const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms) : decoder(H) { @@ -77,29 +77,30 @@ class pymatching : public decoder { } } - user_graph = pm::UserGraph(H.shape()[0]); + user_graph = pm::UserGraph(H.num_rows()); - auto sparse = cudaq::qec::dense_to_sparse(H); + std::vector> H_e2d = H.to_nested_csc(); std::vector observables; std::size_t col_idx = 0; - for (auto &col : sparse) { + for (std::size_t col = 0; col < block_size; col++) { double weight = 1.0; if (col_idx < error_rate_vec.size()) { weight = -std::log(error_rate_vec[col_idx] / (1.0 - error_rate_vec[col_idx])); } - if (col.size() == 2) { - edge2col_idx[make_canonical_edge(col[0], col[1])] = col_idx; - user_graph.add_or_merge_edge(col[0], col[1], observables, weight, 0.0, - merge_strategy_enum); - } else if (col.size() == 1) { - edge2col_idx[make_canonical_edge(col[0], -1)] = col_idx; - user_graph.add_or_merge_boundary_edge(col[0], observables, weight, 0.0, - merge_strategy_enum); + if (H_e2d[col].size() == 2) { + edge2col_idx[make_canonical_edge(H_e2d[col][0], H_e2d[col][1])] = + col_idx; + user_graph.add_or_merge_edge(H_e2d[col][0], H_e2d[col][1], observables, + weight, 0.0, merge_strategy_enum); + } else if (H_e2d[col].size() == 1) { + edge2col_idx[make_canonical_edge(H_e2d[col][0], -1)] = col_idx; + user_graph.add_or_merge_boundary_edge(H_e2d[col][0], observables, + weight, 0.0, merge_strategy_enum); } else { - throw std::runtime_error( - "Invalid column in H: " + std::to_string(col_idx) + " has " + - std::to_string(col.size()) + " ones. Must have 1 or 2 ones."); + throw std::runtime_error("Invalid column in H: " + std::to_string(col) + + " has " + std::to_string(H_e2d[col].size()) + + " ones. Must have 1 or 2 ones."); } col_idx++; } @@ -137,7 +138,7 @@ class pymatching : public decoder { CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( pymatching, static std::unique_ptr create( - const cudaqx::tensor &H, + const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms) { return std::make_unique(H, params); }) diff --git a/libs/qec/lib/decoders/plugins/trt_decoder/trt_decoder.cpp b/libs/qec/lib/decoders/plugins/trt_decoder/trt_decoder.cpp index 17bbe59d..0a6163b0 100644 --- a/libs/qec/lib/decoders/plugins/trt_decoder/trt_decoder.cpp +++ b/libs/qec/lib/decoders/plugins/trt_decoder/trt_decoder.cpp @@ -1,5 +1,5 @@ /******************************************************************************* - * Copyright (c) 2025 NVIDIA Corporation & Affiliates. * + * Copyright (c) 2025 - 2026 NVIDIA Corporation & Affiliates. * * All rights reserved. * * * * This source code and the accompanying materials are made available under * @@ -325,7 +325,7 @@ class trt_decoder : public decoder { size_t output_size_per_sample_ = 0; public: - trt_decoder(const cudaqx::tensor &H, + trt_decoder(const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms); virtual decoder_result decode(const std::vector &syndrome) override; @@ -337,7 +337,7 @@ class trt_decoder : public decoder { CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( trt_decoder, static std::unique_ptr create( - const cudaqx::tensor &H, + const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms) { return std::make_unique(H, params); }) @@ -390,7 +390,7 @@ struct trt_decoder::Impl { // trt_decoder method implementations // ============================================================================ -trt_decoder::trt_decoder(const cudaqx::tensor &H, +trt_decoder::trt_decoder(const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms) : decoder(H), decoder_ready_(false) { diff --git a/libs/qec/lib/decoders/sliding_window.cpp b/libs/qec/lib/decoders/sliding_window.cpp index d936b923..edded6e9 100644 --- a/libs/qec/lib/decoders/sliding_window.cpp +++ b/libs/qec/lib/decoders/sliding_window.cpp @@ -1,5 +1,5 @@ /******************************************************************************* - * Copyright (c) 2025 NVIDIA Corporation & Affiliates. * + * Copyright (c) 2025 - 2026 NVIDIA Corporation & Affiliates. * * All rights reserved. * * * * This source code and the accompanying materials are made available under * @@ -16,6 +16,8 @@ namespace cudaq::qec { void sliding_window::validate_inputs() { + uint32_t num_rows = H.num_rows(); + uint32_t num_cols = H.num_cols(); if (window_size < 1 || window_size > num_rounds) { throw std::invalid_argument( fmt::format("sliding_window constructor: window_size ({}) must " @@ -38,7 +40,7 @@ void sliding_window::validate_inputs() { throw std::invalid_argument("sliding_window constructor: " "num_syndromes_per_round must be non-zero"); } - if (H.shape()[0] % num_syndromes_per_round != 0) { + if (num_rows % num_syndromes_per_round != 0) { throw std::invalid_argument( "sliding_window constructor: Number of rows in H must be divisible " "by num_syndromes_per_round"); @@ -57,7 +59,7 @@ void sliding_window::validate_inputs() { } // Enforce that H is already sorted. - if (!cudaq::qec::pcm_is_sorted(H, num_syndromes_per_round)) { + if (!cudaq::qec::pcm_is_sorted(H.to_dense(), this->num_syndromes_per_round)) { throw std::invalid_argument("sliding_window constructor: PCM must be " "sorted. See cudaq::qec::simplify_pcm."); } @@ -176,9 +178,9 @@ void sliding_window::update_rw_next_read_index() { rw_next_read_index -= num_syndromes_per_window; } -sliding_window::sliding_window(const cudaqx::tensor &H, +sliding_window::sliding_window(const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms) - : decoder(H), full_pcm(H) { + : decoder(H), full_pcm(H.to_dense()) { full_pcm_T = full_pcm.transpose(); // Fetch parameters from the params map. window_size = params.get("window_size", window_size); @@ -196,18 +198,21 @@ sliding_window::sliding_window(const cudaqx::tensor &H, inner_decoder_params = params.get( "inner_decoder_params", inner_decoder_params); - num_rounds = H.shape()[0] / num_syndromes_per_round; + num_rounds = H.num_rows() / num_syndromes_per_round; num_windows = (num_rounds - window_size) / step_size + 1; num_syndromes_per_window = num_syndromes_per_round * window_size; validate_inputs(); + // FIXME - update downstream code to support sparse matrices. + auto H_dense = H.to_dense(); + // Create the inner decoders. for (std::size_t w = 0; w < num_windows; ++w) { std::size_t start_round = w * step_size; std::size_t end_round = start_round + window_size - 1; auto [H_round, first_column, last_column] = cudaq::qec::get_pcm_for_rounds( - H, num_syndromes_per_round, start_round, end_round, + H_dense, num_syndromes_per_round, start_round, end_round, straddle_start_round, straddle_end_round); first_columns.push_back(first_column); @@ -222,6 +227,14 @@ sliding_window::sliding_window(const cudaqx::tensor &H, "first_column = {}, last_column = {}", start_round, end_round, H_round.shape()[0], H_round.shape()[1], first_column, last_column); + + if (last_column - first_column + 1 != H_round.shape()[1]) { + throw std::invalid_argument( + fmt::format("last_column - first_column + 1 ({}) must be equal to " + "the number of columns in H_round ({})", + last_column - first_column + 1, H_round.shape()[1])); + } + auto inner_decoder = decoder::get(inner_decoder_name, H_round, inner_decoder_params_mod); inner_decoders.push_back(std::move(inner_decoder)); diff --git a/libs/qec/lib/decoders/sliding_window.h b/libs/qec/lib/decoders/sliding_window.h index 74513c8e..cfc87c43 100644 --- a/libs/qec/lib/decoders/sliding_window.h +++ b/libs/qec/lib/decoders/sliding_window.h @@ -1,5 +1,5 @@ /****************************************************************-*- C++ -*-**** - * Copyright (c) 2025 NVIDIA Corporation & Affiliates. * + * Copyright (c) 2025 - 2026 NVIDIA Corporation & Affiliates. * * All rights reserved. * * * * This source code and the accompanying materials are made available under * @@ -120,7 +120,7 @@ class sliding_window : public decoder { /// - num_syndromes_per_round: Number of syndromes per round /// - inner_decoder: Name of the inner decoder to use /// - inner_decoder_params: Parameters for the inner decoder (optional) - sliding_window(const cudaqx::tensor &H, + sliding_window(const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms); /// @brief Decode a syndrome vector @@ -144,7 +144,7 @@ class sliding_window : public decoder { // Plugin registration macros CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( sliding_window, static std::unique_ptr create( - const cudaqx::tensor &H, + const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms) { return std::make_unique(H, params); }) diff --git a/libs/qec/lib/pcm_utils.cpp b/libs/qec/lib/pcm_utils.cpp index ea2faf87..f76ec20e 100644 --- a/libs/qec/lib/pcm_utils.cpp +++ b/libs/qec/lib/pcm_utils.cpp @@ -1,5 +1,5 @@ /******************************************************************************* - * Copyright (c) 2025 NVIDIA Corporation & Affiliates. * + * Copyright (c) 2025 - 2026 NVIDIA Corporation & Affiliates. * * All rights reserved. * * * * This source code and the accompanying materials are made available under * @@ -118,6 +118,17 @@ std::vector get_sorted_pcm_column_indices( return column_order; } +bool pcm_is_sorted(const std::vector> &sparse_pcm, + std::uint32_t num_syndromes_per_round) { + auto column_indices = + get_sorted_pcm_column_indices(sparse_pcm, num_syndromes_per_round); + auto num_cols = sparse_pcm.size(); + for (std::size_t c = 0; c < num_cols; c++) + if (column_indices[c] != c) + return false; + return true; +} + /// @brief Check if a PCM is sorted. /// @param pcm The PCM to check. /// @param num_syndromes_per_round The number of syndromes per round. diff --git a/libs/qec/lib/sparse_binary_matrix.cpp b/libs/qec/lib/sparse_binary_matrix.cpp new file mode 100644 index 00000000..142ecebb --- /dev/null +++ b/libs/qec/lib/sparse_binary_matrix.cpp @@ -0,0 +1,278 @@ +/******************************************************************************* + * Copyright (c) 2026 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/qec/sparse_binary_matrix.h" +#include +#include +#include +#include + +namespace cudaq::qec { + +sparse_binary_matrix::sparse_binary_matrix(sparse_binary_matrix_layout layout, + index_type num_rows, + index_type num_cols, + std::vector ptr, + std::vector indices) + : layout_(layout), num_rows_(num_rows), num_cols_(num_cols), + ptr_(std::move(ptr)), indices_(std::move(indices)) {} + +sparse_binary_matrix +sparse_binary_matrix::from_csc(index_type num_rows, index_type num_cols, + std::vector col_ptrs, + std::vector row_indices) { + assert(col_ptrs.size() == static_cast(num_cols) + 1); + assert(col_ptrs.back() == row_indices.size()); + return sparse_binary_matrix(sparse_binary_matrix_layout::csc, num_rows, + num_cols, std::move(col_ptrs), + std::move(row_indices)); +} + +sparse_binary_matrix +sparse_binary_matrix::from_csr(index_type num_rows, index_type num_cols, + std::vector row_ptrs, + std::vector col_indices) { + assert(row_ptrs.size() == static_cast(num_rows) + 1); + assert(row_ptrs.back() == col_indices.size()); + return sparse_binary_matrix(sparse_binary_matrix_layout::csr, num_rows, + num_cols, std::move(row_ptrs), + std::move(col_indices)); +} + +sparse_binary_matrix sparse_binary_matrix::from_nested_csc( + index_type num_rows, index_type num_cols, + const std::vector> &nested) { + if (nested.size() != static_cast(num_cols)) { + throw std::invalid_argument( + "sparse_pcm::from_nested_csc: nested.size() must equal num_cols"); + } + std::vector col_ptrs(num_cols + 1); + col_ptrs[0] = 0; + std::vector row_indices; + row_indices.reserve(nested.size() * 2); + for (index_type j = 0; j < num_cols; ++j) { + for (index_type r : nested[j]) { + if (r >= num_rows) { + throw std::invalid_argument( + "sparse_pcm::from_nested_csc: row index out of range"); + } + row_indices.push_back(r); + } + col_ptrs[j + 1] = static_cast(row_indices.size()); + } + return sparse_binary_matrix(sparse_binary_matrix_layout::csc, num_rows, + num_cols, std::move(col_ptrs), + std::move(row_indices)); +} + +sparse_binary_matrix sparse_binary_matrix::from_nested_csr( + index_type num_rows, index_type num_cols, + const std::vector> &nested) { + if (nested.size() != static_cast(num_rows)) { + throw std::invalid_argument( + "sparse_pcm::from_nested_csr: nested.size() must equal num_rows"); + } + std::vector row_ptrs(num_rows + 1); + row_ptrs[0] = 0; + std::vector col_indices; + col_indices.reserve(nested.size() * 2); + for (index_type i = 0; i < num_rows; ++i) { + for (index_type c : nested[i]) { + if (c >= num_cols) { + throw std::invalid_argument( + "sparse_pcm::from_nested_csr: column index out of range"); + } + col_indices.push_back(c); + } + row_ptrs[i + 1] = static_cast(col_indices.size()); + } + return sparse_binary_matrix(sparse_binary_matrix_layout::csr, num_rows, + num_cols, std::move(row_ptrs), + std::move(col_indices)); +} + +sparse_binary_matrix::sparse_binary_matrix( + const cudaqx::tensor &dense, + sparse_binary_matrix_layout layout) { + if (dense.rank() != 2) { + throw std::invalid_argument( + "sparse_pcm: dense PCM tensor must have rank 2"); + } + const std::size_t nrows = dense.shape()[0]; + const std::size_t ncols = dense.shape()[1]; + if (nrows > + static_cast(std::numeric_limits::max()) || + ncols > + static_cast(std::numeric_limits::max())) { + throw std::invalid_argument( + "sparse_pcm: dense PCM dimensions exceed index_type range"); + } + num_rows_ = static_cast(nrows); + num_cols_ = static_cast(ncols); + layout_ = layout; + + if (layout_ == sparse_binary_matrix_layout::csc) { + std::vector row_indices; + // row_indices.reserve(nrows * ncols / 2); + ptr_.resize(num_cols_ + 1); + ptr_[0] = 0; + for (index_type c = 0; c < num_cols_; ++c) { + for (index_type r = 0; r < num_rows_; ++r) { + if (dense.at( + {static_cast(r), static_cast(c)})) + row_indices.push_back(r); + } + ptr_[c + 1] = static_cast(row_indices.size()); + } + indices_ = std::move(row_indices); + } else { + std::vector col_indices; + // col_indices.reserve(nrows * ncols / 2); + ptr_.resize(num_rows_ + 1); + ptr_[0] = 0; + for (index_type r = 0; r < num_rows_; ++r) { + for (index_type c = 0; c < num_cols_; ++c) { + if (dense.at( + {static_cast(r), static_cast(c)})) + col_indices.push_back(c); + } + ptr_[r + 1] = static_cast(col_indices.size()); + } + indices_ = std::move(col_indices); + } +} + +sparse_binary_matrix sparse_binary_matrix::to_csc() const { + if (layout_ == sparse_binary_matrix_layout::csc) { + return sparse_binary_matrix(sparse_binary_matrix_layout::csc, num_rows_, + num_cols_, ptr_, indices_); + } + // CSR -> CSC: for each column j, gather row indices i where (i,j) is stored + // In CSR, row i has col indices in indices_[row_ptrs[i] .. row_ptrs[i+1]-1] + std::vector col_nnz(num_cols_, 0); + for (index_type i = 0; i < num_rows_; ++i) { + for (index_type p = ptr_[i]; p < ptr_[i + 1]; ++p) { + index_type j = indices_[p]; + ++col_nnz[j]; + } + } + std::vector col_ptrs(num_cols_ + 1); + col_ptrs[0] = 0; + for (index_type j = 0; j < num_cols_; ++j) { + col_ptrs[j + 1] = col_ptrs[j] + col_nnz[j]; + } + std::fill(col_nnz.begin(), col_nnz.end(), 0); + std::vector row_indices(indices_.size()); + for (index_type i = 0; i < num_rows_; ++i) { + for (index_type p = ptr_[i]; p < ptr_[i + 1]; ++p) { + index_type j = indices_[p]; + index_type q = col_ptrs[j] + col_nnz[j]; + row_indices[q] = i; + ++col_nnz[j]; + } + } + return sparse_binary_matrix(sparse_binary_matrix_layout::csc, num_rows_, + num_cols_, std::move(col_ptrs), + std::move(row_indices)); +} + +sparse_binary_matrix sparse_binary_matrix::to_csr() const { + if (layout_ == sparse_binary_matrix_layout::csr) { + return sparse_binary_matrix(sparse_binary_matrix_layout::csr, num_rows_, + num_cols_, ptr_, indices_); + } + // CSC -> CSR: for each row i, gather column indices j where (i,j) is stored + // In CSC, col j has row indices in indices_[col_ptrs[j] .. col_ptrs[j+1]-1] + std::vector row_nnz(num_rows_, 0); + for (index_type j = 0; j < num_cols_; ++j) { + for (index_type p = ptr_[j]; p < ptr_[j + 1]; ++p) { + index_type i = indices_[p]; + ++row_nnz[i]; + } + } + std::vector row_ptrs(num_rows_ + 1); + row_ptrs[0] = 0; + for (index_type i = 0; i < num_rows_; ++i) { + row_ptrs[i + 1] = row_ptrs[i] + row_nnz[i]; + } + std::fill(row_nnz.begin(), row_nnz.end(), 0); + std::vector col_indices(indices_.size()); + for (index_type j = 0; j < num_cols_; ++j) { + for (index_type p = ptr_[j]; p < ptr_[j + 1]; ++p) { + index_type i = indices_[p]; + index_type q = row_ptrs[i] + row_nnz[i]; + col_indices[q] = j; + ++row_nnz[i]; + } + } + return sparse_binary_matrix(sparse_binary_matrix_layout::csr, num_rows_, + num_cols_, std::move(row_ptrs), + std::move(col_indices)); +} + +cudaqx::tensor sparse_binary_matrix::to_dense() const { + cudaqx::tensor dense( + std::vector{num_rows_, num_cols_}); + for (std::size_t r = 0; r < num_rows_; ++r) { + std::memset(&dense.at({r, 0}), 0, num_cols_ * sizeof(std::uint8_t)); + } + if (layout_ == sparse_binary_matrix_layout::csc) { + for (index_type j = 0; j < num_cols_; ++j) { + for (index_type p = ptr_[j]; p < ptr_[j + 1]; ++p) { + index_type i = indices_[p]; + dense.at({i, j}) = 1; + } + } + } else { + for (index_type i = 0; i < num_rows_; ++i) { + for (index_type p = ptr_[i]; p < ptr_[i + 1]; ++p) { + index_type j = indices_[p]; + dense.at({i, j}) = 1; + } + } + } + return dense; +} + +std::vector> +sparse_binary_matrix::to_nested_csc() const { + std::vector> out(num_cols_); + if (layout_ == sparse_binary_matrix_layout::csc) { + for (index_type j = 0; j < num_cols_; ++j) { + out[j].assign(indices_.begin() + ptr_[j], indices_.begin() + ptr_[j + 1]); + } + } else { + for (index_type i = 0; i < num_rows_; ++i) { + for (index_type p = ptr_[i]; p < ptr_[i + 1]; ++p) { + index_type j = indices_[p]; + out[j].push_back(i); + } + } + } + return out; +} + +std::vector> +sparse_binary_matrix::to_nested_csr() const { + std::vector> out(num_rows_); + if (layout_ == sparse_binary_matrix_layout::csr) { + for (index_type i = 0; i < num_rows_; ++i) { + out[i].assign(indices_.begin() + ptr_[i], indices_.begin() + ptr_[i + 1]); + } + } else { + for (index_type j = 0; j < num_cols_; ++j) { + for (index_type p = ptr_[j]; p < ptr_[j + 1]; ++p) { + index_type i = indices_[p]; + out[i].push_back(j); + } + } + } + return out; +} + +} // namespace cudaq::qec diff --git a/libs/qec/python/bindings/py_decoder.cpp b/libs/qec/python/bindings/py_decoder.cpp index 55fbf849..4a164c74 100644 --- a/libs/qec/python/bindings/py_decoder.cpp +++ b/libs/qec/python/bindings/py_decoder.cpp @@ -1,5 +1,5 @@ /******************************************************************************* - * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * Copyright (c) 2022 - 2026 NVIDIA Corporation & Affiliates. * * All rights reserved. * * * * This source code and the accompanying materials are made available under * @@ -12,6 +12,7 @@ #include "cudaq/qec/detector_error_model.h" #include "cudaq/qec/pcm_utils.h" #include "cudaq/qec/plugin_loader.h" +#include "cudaq/qec/sparse_binary_matrix.h" #include "cudaq/runtime/logger/logger.h" #include #include @@ -31,9 +32,47 @@ using namespace cudaqx; namespace cudaq::qec { +/// Build sparse_binary_matrix from Python dict with keys layout, num_rows, +/// num_cols, nested (nested_csc or nested_csr format). +static sparse_binary_matrix +sparse_binary_matrix_from_py_dict(const py::dict &d) { + if (!d.contains("layout") || !d.contains("num_rows") || + !d.contains("num_cols") || !d.contains("nested")) + throw std::runtime_error( + "Sparse H dict must have keys: layout, num_rows, num_cols, " + "nested. Use layout \"nested_csc\" or \"nested_csr\"; nested " + "is a list of lists (column indices per column for csc, row " + "indices per row for csr)."); + std::string layout = d["layout"].cast(); + auto num_rows = static_cast( + d["num_rows"].cast()); + auto num_cols = static_cast( + d["num_cols"].cast()); + py::list nested_py = d["nested"].cast(); + std::vector> nested; + nested.reserve(nested_py.size()); + for (py::handle item : nested_py) { + py::list inner = item.cast(); + std::vector row; + row.reserve(inner.size()); + for (py::handle v : inner) + row.push_back( + static_cast(v.cast())); + nested.push_back(std::move(row)); + } + if (layout == "nested_csc") + return sparse_binary_matrix::from_nested_csc(num_rows, num_cols, nested); + if (layout == "nested_csr") + return sparse_binary_matrix::from_nested_csr(num_rows, num_cols, nested); + throw std::runtime_error( + "Sparse H dict layout must be \"nested_csc\" or \"nested_csr\"."); +} + class PyDecoder : public decoder { public: - PyDecoder(const py::array_t &H) : decoder(toTensor(H)) {} + PyDecoder(const py::array_t &H) + : decoder(cudaq::qec::sparse_binary_matrix( + toTensor(H), cudaq::qec::sparse_binary_matrix_layout::csr)) {} decoder_result decode(const std::vector &syndrome) override { PYBIND11_OVERRIDE_PURE(decoder_result, decoder, decode, syndrome); @@ -297,11 +336,49 @@ void bindDecoder(py::module &mod) { qecmod.def( "get_decoder", - [](const std::string &name, const py::array_t H, - const py::kwargs options) + [](const std::string &name, py::object H, const py::kwargs options) -> std::variant> { - if (PyDecoderRegistry::contains(name)) - return PyDecoderRegistry::get_decoder(name, H, options); + // Use sparse format internally; convert dense input to sparse. + cudaq::qec::sparse_binary_matrix H_sparse; + + auto make_sparse_from_dense = [](py::array_t arr) { + py::buffer_info buf = arr.request(); + if (buf.ndim != 2 || buf.itemsize != sizeof(uint8_t)) + throw std::runtime_error( + "Parity check matrix must be 2-dimensional uint8."); + if (buf.strides[0] == buf.itemsize) + throw std::runtime_error( + "Parity check matrix must be in row-major order."); + std::vector shape = { + static_cast(buf.shape[0]), + static_cast(buf.shape[1])}; + cudaqx::tensor tensor_H(shape); + tensor_H.borrow(static_cast(buf.ptr), shape); + return cudaq::qec::sparse_binary_matrix( + tensor_H, cudaq::qec::sparse_binary_matrix_layout::csc); + }; + + if (py::isinstance(H)) + H_sparse = sparse_binary_matrix_from_py_dict(H.cast()); + else { + py::array_t arr = H.cast>(); + H_sparse = make_sparse_from_dense(arr); + } + + // Python-registered decoders expect dense numpy H; convert sparse -> + // dense only here. + // TODO: Update the Python-registered decoders to accept sparse H. + if (PyDecoderRegistry::contains(name)) { + cudaqx::tensor dense_t = H_sparse.to_dense(); + std::vector sh = dense_t.shape(); + py::array_t H_dense({sh[0], sh[1]}); + py::buffer_info info = H_dense.request(); + uint8_t *out = static_cast(info.ptr); + for (std::size_t r = 0; r < sh[0]; ++r) + for (std::size_t c = 0; c < sh[1]; ++c) + out[r * sh[1] + c] = dense_t.at({r, c}); + return PyDecoderRegistry::get_decoder(name, H_dense, options); + } if (name == "tensor_network_decoder") { throw std::runtime_error( @@ -310,39 +387,13 @@ void bindDecoder(py::module &mod) { " pip install cudaq-qec[tensor-network-decoder]\n"); } - py::buffer_info buf = H.request(); - - if (buf.ndim != 2) { - throw std::runtime_error( - "Parity check matrix must be 2-dimensional."); - } - - if (buf.itemsize != sizeof(uint8_t)) { - throw std::runtime_error( - "Parity check matrix must be an array of uint8_t."); - } - - if (buf.strides[0] == buf.itemsize) { - throw std::runtime_error( - "Parity check matrix must be in row-major order, but " - "column-major order was detected."); - } - - // Create a vector of the array dimensions - std::vector shape; - for (py::ssize_t d : buf.shape) { - shape.push_back(static_cast(d)); - } - - // Create a tensor and borrow the NumPy array data - cudaqx::tensor tensor_H(shape); - tensor_H.borrow(static_cast(buf.ptr), shape); - - return get_decoder(name, tensor_H, hetMapFromKwargs(options)); + return get_decoder(name, H_sparse, hetMapFromKwargs(options)); }, - "Get a decoder by name with a given parity check matrix" - "and optional decoder-specific parameters. Note: the parity check matrix " - "must be in row-major order."); + py::arg("name"), py::arg("H"), + "Get a decoder by name. H can be a dense 2D NumPy array (uint8, " + "row-major) or a sparse dict: {\"layout\": \"nested_csc\" or " + "\"nested_csr\", \"num_rows\": int, \"num_cols\": int, \"nested\": list " + "of lists}."); qecmod.def( "get_sorted_pcm_column_indices", diff --git a/libs/qec/python/tests/test_decoder.py b/libs/qec/python/tests/test_decoder.py index 7c92f138..31bc015e 100644 --- a/libs/qec/python/tests/test_decoder.py +++ b/libs/qec/python/tests/test_decoder.py @@ -360,5 +360,209 @@ def test_decoder_pymatching_results(): assert np.array_equal(result.result, actual_errors) +# --- Sparse matrix (sparse_binary_matrix) tests --- + + +def dense_to_nested_csc(H): + """Convert dense binary matrix (numpy) to sparse dict with nested_csc layout.""" + rows, cols = H.shape + nested = [] + for j in range(cols): + nested.append([i for i in range(rows) if H[i, j] != 0]) + return { + "layout": "nested_csc", + "num_rows": int(rows), + "num_cols": int(cols), + "nested": nested, + } + + +def dense_to_nested_csr(H): + """Convert dense binary matrix (numpy) to sparse dict with nested_csr layout.""" + rows, cols = H.shape + nested = [] + for i in range(rows): + nested.append([j for j in range(cols) if H[i, j] != 0]) + return { + "layout": "nested_csr", + "num_rows": int(rows), + "num_cols": int(cols), + "nested": nested, + } + + +def scipy_sparse_to_nested_csc(sp): + """Convert scipy.sparse (csc or csr) to our sparse dict (nested_csc). Binary only.""" + sp = sp.tocsc() + num_rows, num_cols = sp.shape + nested = [] + for j in range(num_cols): + start, end = sp.indptr[j], sp.indptr[j + 1] + # scipy allows any dtype; we treat non-zero as 1 + nested.append(sp.indices[start:end].tolist()) + return { + "layout": "nested_csc", + "num_rows": int(num_rows), + "num_cols": int(num_cols), + "nested": nested, + } + + +def scipy_sparse_to_nested_csr(sp): + """Convert scipy.sparse (csr or csc) to our sparse dict (nested_csr). Binary only.""" + sp = sp.tocsr() + num_rows, num_cols = sp.shape + nested = [] + for i in range(num_rows): + start, end = sp.indptr[i], sp.indptr[i + 1] + nested.append(sp.indices[start:end].tolist()) + return { + "layout": "nested_csr", + "num_rows": int(num_rows), + "num_cols": int(num_cols), + "nested": nested, + } + + +def test_get_decoder_sparse_nested_csc(): + """get_decoder with sparse H as dict (nested_csc) produces a working decoder.""" + H_dense = create_test_matrix() + H_sparse = dense_to_nested_csc(H_dense) + decoder = qec.get_decoder("example_byod", H_sparse) + assert decoder is not None + assert hasattr(decoder, "decode") + syndrome = create_test_syndrome() + result = decoder.decode(syndrome) + assert hasattr(result, "converged") + assert hasattr(result, "result") + # example_byod returns result length = num_rows (syndrome size) + assert len(result.result) == H_dense.shape[0] + + +def test_get_decoder_sparse_nested_csr(): + """get_decoder with sparse H as dict (nested_csr) produces a working decoder.""" + H_dense = create_test_matrix() + H_sparse = dense_to_nested_csr(H_dense) + decoder = qec.get_decoder("example_byod", H_sparse) + assert decoder is not None + assert hasattr(decoder, "decode") + syndrome = create_test_syndrome() + result = decoder.decode(syndrome) + assert hasattr(result, "converged") + assert hasattr(result, "result") + # example_byod returns result length = num_rows (syndrome size) + assert len(result.result) == H_dense.shape[0] + + +def test_get_decoder_sparse_vs_dense_same_results(): + """Decoder from sparse H (nested_csc) behaves like dense H: same shape and validity.""" + np.random.seed(123) + H_dense = np.random.randint(0, 2, (8, 16)).astype(np.uint8) + syndrome = np.random.random(8).tolist() + + dec_dense = qec.get_decoder("example_byod", H_dense) + dec_sparse = qec.get_decoder("example_byod", dense_to_nested_csc(H_dense)) + + r_dense = dec_dense.decode(syndrome) + r_sparse = dec_sparse.decode(syndrome) + + assert r_dense.converged == r_sparse.converged + assert len(r_dense.result) == len(r_sparse.result) + assert all(0 <= x <= 1 for x in r_dense.result) + assert all(0 <= x <= 1 for x in r_sparse.result) + # example_byod may be non-deterministic; same H and syndrome can yield different floats + + +def test_get_decoder_sparse_nested_csr_same_as_csc(): + """Decoders from nested_csc and nested_csr (same matrix) produce valid results.""" + H_dense = create_test_matrix() + syndrome = create_test_syndrome() + + dec_csc = qec.get_decoder("example_byod", dense_to_nested_csc(H_dense)) + dec_csr = qec.get_decoder("example_byod", dense_to_nested_csr(H_dense)) + + r_csc = dec_csc.decode(syndrome) + r_csr = dec_csr.decode(syndrome) + + assert r_csc.converged == r_csr.converged + assert len(r_csc.result) == len(r_csr.result) == H_dense.shape[0] + assert all(0 <= x <= 1 for x in r_csc.result) + assert all(0 <= x <= 1 for x in r_csr.result) + # example_byod may be non-deterministic; CSC vs CSR path can yield different floats + + +def test_get_decoder_sparse_pymatching(): + """Native pymatching decoder accepts sparse H dict and returns valid result.""" + pcm = qec.generate_random_pcm( + n_rounds=2, + n_errs_per_round=10, + n_syndromes_per_round=5, + weight=2, + seed=7, + ) + pcm, _ = qec.simplify_pcm(pcm, np.ones(pcm.shape[1]), 10) + H_sparse = dense_to_nested_csc(pcm) + + columns = np.random.choice(pcm.shape[1], 3, replace=False) + syndrome = (np.sum(pcm[:, columns], axis=1) % 2).tolist() + + decoder = qec.get_decoder("pymatching", H_sparse) + result = decoder.decode(syndrome) + assert result.converged is True + assert len(result.result) == pcm.shape[1] + assert all(isinstance(x, float) for x in result.result) + assert all(0 <= x <= 1 for x in result.result) + + +def test_get_decoder_sparse_dict_missing_keys(): + """Sparse dict missing required keys raises RuntimeError.""" + with pytest.raises(RuntimeError) as exc_info: + qec.get_decoder("example_byod", {"layout": "nested_csc"}) + assert "layout" in str(exc_info.value) or "num_rows" in str( + exc_info.value) or "nested" in str(exc_info.value) + + +def test_get_decoder_sparse_dict_invalid_layout(): + """Sparse dict with invalid layout string raises RuntimeError.""" + H_sparse = dense_to_nested_csc(create_test_matrix()) + H_sparse["layout"] = "invalid_layout" + with pytest.raises(RuntimeError) as exc_info: + qec.get_decoder("example_byod", H_sparse) + assert "nested_csc" in str(exc_info.value) or "nested_csr" in str( + exc_info.value) + + +def test_get_decoder_sparse_from_scipy(): + """get_decoder accepts sparse H built from scipy.sparse (converted to our dict).""" + scipy_sparse = pytest.importorskip("scipy.sparse") + np.random.seed(42) + # Build a small binary matrix via scipy.sparse (e.g. random sparse) + H_dense = np.random.randint(0, 2, (6, 12)).astype(np.uint8) + H_scipy = scipy_sparse.csr_matrix(H_dense) + H_sparse_dict = scipy_sparse_to_nested_csc(H_scipy) + decoder = qec.get_decoder("example_byod", H_sparse_dict) + assert decoder is not None + syndrome = np.random.random(6).tolist() + result = decoder.decode(syndrome) + assert result.converged is True + assert len(result.result) == H_dense.shape[0] + assert all(0 <= x <= 1 for x in result.result) + + +def test_get_decoder_sparse_python_registered_decoder(): + """Python-registered decoder (e.g. example_byod) receives dense H when given sparse.""" + # When we pass sparse dict, backend converts to sparse_binary_matrix then for + # Python registry builds H_dense from to_dense() and passes to the Python + # factory. So the Python decoder still gets a numpy array (dense). + H_dense = create_test_matrix() + H_sparse = dense_to_nested_csc(H_dense) + decoder = qec.get_decoder("example_byod", H_sparse) + # Decoder works; internal contract is that Python decoders get dense array + result = decoder.decode(create_test_syndrome()) + assert result.converged is True + # example_byod returns result length = num_rows + assert len(result.result) == H_dense.shape[0] + + if __name__ == "__main__": pytest.main() diff --git a/libs/qec/unittests/CMakeLists.txt b/libs/qec/unittests/CMakeLists.txt index 90ae5882..3a3092eb 100644 --- a/libs/qec/unittests/CMakeLists.txt +++ b/libs/qec/unittests/CMakeLists.txt @@ -48,6 +48,11 @@ target_link_libraries(test_qec PRIVATE GTest::gtest_main cudaq-qec cudaq::cudaq- add_dependencies(CUDAQXQECUnitTests test_qec) gtest_discover_tests(test_qec) +add_executable(test_sparse_binary_matrix test_sparse_binary_matrix.cpp) +target_link_libraries(test_sparse_binary_matrix PRIVATE GTest::gtest_main cudaq-qec + cudaq::cudaq) +add_dependencies(CUDAQXQECUnitTests test_sparse_binary_matrix) +gtest_discover_tests(test_sparse_binary_matrix) # TensorRT decoder test is only built for x86 architectures if(CUDAQ_QEC_BUILD_TRT_DECODER AND CMAKE_SYSTEM_PROCESSOR MATCHES "(x86_64)|(AMD64|amd64)|(^i.86$)") diff --git a/libs/qec/unittests/decoders/sample_decoder.cpp b/libs/qec/unittests/decoders/sample_decoder.cpp index bd7ae7b5..408a18d0 100644 --- a/libs/qec/unittests/decoders/sample_decoder.cpp +++ b/libs/qec/unittests/decoders/sample_decoder.cpp @@ -1,5 +1,5 @@ /******************************************************************************* - * Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. * + * Copyright (c) 2022 - 2026 NVIDIA Corporation & Affiliates. * * All rights reserved. * * * * This source code and the accompanying materials are made available under * @@ -17,7 +17,7 @@ namespace cudaq::qec { /// bare bones custom decoder based on the `cudaqx::qec::decoder` interface. class sample_decoder : public decoder { public: - sample_decoder(const cudaqx::tensor &H, + sample_decoder(const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms) : decoder(H) { // Decoder-specific constructor arguments can be placed in `params`. @@ -35,7 +35,7 @@ class sample_decoder : public decoder { CUDAQ_EXTENSION_CUSTOM_CREATOR_FUNCTION( sample_decoder, static std::unique_ptr create( - const cudaqx::tensor &H, + const cudaq::qec::sparse_binary_matrix &H, const cudaqx::heterogeneous_map ¶ms) { return std::make_unique(H, params); }) diff --git a/libs/qec/unittests/test_decoders.cpp b/libs/qec/unittests/test_decoders.cpp index 2c8cd709..b6e9424a 100644 --- a/libs/qec/unittests/test_decoders.cpp +++ b/libs/qec/unittests/test_decoders.cpp @@ -1,5 +1,5 @@ /******************************************************************************* - * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * Copyright (c) 2022 - 2026 NVIDIA Corporation & Affiliates. * * All rights reserved. * * * * This source code and the accompanying materials are made available under * @@ -104,7 +104,8 @@ TEST(SampleDecoder, checkAPI) { std::size_t block_size = 10; std::size_t syndrome_size = 4; cudaqx::tensor H({syndrome_size, block_size}); - auto d = cudaq::qec::decoder::get("sample_decoder", H); + auto H_sparse = cudaq::qec::sparse_binary_matrix(H); + auto d = cudaq::qec::decoder::get("sample_decoder", H_sparse); std::vector syndromes(syndrome_size); auto dec_result = d->decode(syndromes); ASSERT_EQ(dec_result.result.size(), block_size); @@ -651,7 +652,8 @@ TEST(DecoderTest, GetBlockSizeAndSyndromeSize) { } // Create a decoder instance - auto decoder = cudaq::qec::decoder::get("sample_decoder", H); + auto H_sparse = cudaq::qec::sparse_binary_matrix(H); + auto decoder = cudaq::qec::decoder::get("sample_decoder", H_sparse); ASSERT_NE(decoder, nullptr); // Test get_block_size() returns the correct block size @@ -665,7 +667,8 @@ TEST(DecoderTest, GetBlockSizeAndSyndromeSize) { std::size_t new_syndrome_size = 12; cudaqx::tensor H2({new_syndrome_size, new_block_size}); - auto decoder2 = cudaq::qec::decoder::get("sample_decoder", H2); + auto H_sparse2 = cudaq::qec::sparse_binary_matrix(H2); + auto decoder2 = cudaq::qec::decoder::get("sample_decoder", H_sparse2); ASSERT_NE(decoder2, nullptr); EXPECT_EQ(decoder2->get_block_size(), new_block_size); @@ -689,6 +692,8 @@ TEST(DecoderRegistryTest, SingleParameterRegistryDirect) { } } + auto H_sparse = cudaq::qec::sparse_binary_matrix(H); + // Test that the single-parameter registry exists and can be accessed // This directly tests line 18: INSTANTIATE_REGISTRY(cudaq::qec::decoder, // const cudaqx::tensor &) @@ -698,7 +703,8 @@ TEST(DecoderRegistryTest, SingleParameterRegistryDirect) { // registry auto single_param_decoder = cudaqx::extension_point< cudaq::qec::decoder, - const cudaqx::tensor &>::get("sample_decoder", H); + const cudaq::qec::sparse_binary_matrix &>::get("sample_decoder", + H_sparse); ASSERT_NE(single_param_decoder, nullptr); @@ -722,7 +728,8 @@ TEST(DecoderRegistryTest, SingleParameterRegistryDirect) { // Test that we can check if extensions are registered in the single-parameter // registry auto registered_single = cudaqx::extension_point< - cudaq::qec::decoder, const cudaqx::tensor &>::get_registered(); + cudaq::qec::decoder, + const cudaq::qec::sparse_binary_matrix &>::get_registered(); // The registry should exist (even if empty), proving line 18 instantiation // works This test passes if no exceptions are thrown, proving the diff --git a/libs/qec/unittests/test_sparse_binary_matrix.cpp b/libs/qec/unittests/test_sparse_binary_matrix.cpp new file mode 100644 index 00000000..ec1d8ae2 --- /dev/null +++ b/libs/qec/unittests/test_sparse_binary_matrix.cpp @@ -0,0 +1,384 @@ +/******************************************************************************* + * Copyright (c) 2025 - 2026 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "cudaq/qec/pcm_utils.h" +#include "cudaq/qec/sparse_binary_matrix.h" +#include +#include +#include + +namespace cudaq::qec { +namespace { + +using index_type = sparse_binary_matrix::index_type; + +bool dense_pcm_equal(const cudaqx::tensor &a, + const cudaqx::tensor &b) { + if (a.rank() != 2 || b.rank() != 2) + return false; + if (a.shape()[0] != b.shape()[0] || a.shape()[1] != b.shape()[1]) + return false; + for (std::size_t r = 0; r < a.shape()[0]; ++r) + for (std::size_t c = 0; c < a.shape()[1]; ++c) + if (a.at({r, c}) != b.at({r, c})) + return false; + return true; +} + +// ----------------------------------------------------------------------------- +// Dense <-> sparse_binary_matrix (CSC) +// ----------------------------------------------------------------------------- + +TEST(SparseBinaryMatrix, DenseToCscToDense_Small) { + // 3x4 matrix: rows x cols + std::vector data = { + 1, 0, 1, 0, // row 0 + 0, 1, 1, 0, // row 1 + 1, 1, 0, 1 // row 2 + }; + cudaqx::tensor dense({3, 4}); + dense.copy(data.data(), {3, 4}); + + sparse_binary_matrix sp(dense, sparse_binary_matrix_layout::csc); + EXPECT_EQ(sp.layout(), sparse_binary_matrix_layout::csc); + EXPECT_EQ(sp.num_rows(), 3); + EXPECT_EQ(sp.num_cols(), 4); + EXPECT_EQ(sp.num_nnz(), 7); + + auto back = sp.to_dense(); + EXPECT_TRUE(dense_pcm_equal(dense, back)); +} + +TEST(SparseBinaryMatrix, DenseToCsrToDense_Small) { + std::vector data = {1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1}; + cudaqx::tensor dense({3, 4}); + dense.copy(data.data(), {3, 4}); + + sparse_binary_matrix sp(dense, sparse_binary_matrix_layout::csr); + EXPECT_EQ(sp.layout(), sparse_binary_matrix_layout::csr); + EXPECT_EQ(sp.num_rows(), 3); + EXPECT_EQ(sp.num_cols(), 4); + EXPECT_EQ(sp.num_nnz(), 7); + + auto back = sp.to_dense(); + EXPECT_TRUE(dense_pcm_equal(dense, back)); +} + +TEST(SparseBinaryMatrix, FromCscToDense) { + // 2x3 matrix: col0 has rows {0,1}, col1 has {}, col2 has {1} + index_type num_rows = 2, num_cols = 3; + std::vector col_ptrs = {0, 2, 2, 3}; + std::vector row_indices = {0, 1, 1}; + + auto sp = + sparse_binary_matrix::from_csc(num_rows, num_cols, col_ptrs, row_indices); + auto dense = sp.to_dense(); + + EXPECT_EQ(dense.shape()[0], 2); + EXPECT_EQ(dense.shape()[1], 3); + EXPECT_EQ(dense.at({0, 0}), 1); + EXPECT_EQ(dense.at({1, 0}), 1); + EXPECT_EQ(dense.at({0, 1}), 0); + EXPECT_EQ(dense.at({1, 1}), 0); + EXPECT_EQ(dense.at({0, 2}), 0); + EXPECT_EQ(dense.at({1, 2}), 1); +} + +TEST(SparseBinaryMatrix, FromCsrToDense) { + // 2x3 matrix: row0 has cols {0}, row1 has cols {0, 2} + index_type num_rows = 2, num_cols = 3; + std::vector row_ptrs = {0, 1, 3}; + std::vector col_indices = {0, 0, 2}; + + auto sp = + sparse_binary_matrix::from_csr(num_rows, num_cols, row_ptrs, col_indices); + auto dense = sp.to_dense(); + + EXPECT_EQ(dense.shape()[0], 2); + EXPECT_EQ(dense.shape()[1], 3); + EXPECT_EQ(dense.at({0, 0}), 1); + EXPECT_EQ(dense.at({0, 1}), 0); + EXPECT_EQ(dense.at({0, 2}), 0); + EXPECT_EQ(dense.at({1, 0}), 1); + EXPECT_EQ(dense.at({1, 1}), 0); + EXPECT_EQ(dense.at({1, 2}), 1); +} + +// ----------------------------------------------------------------------------- +// CSC <-> CSR conversion round-trip +// ----------------------------------------------------------------------------- + +TEST(SparseBinaryMatrix, CscToCsrToCsc_RoundTrip) { + std::vector data = {1, 0, 1, 0, 1, 1, 0, 1, 0, 1}; + cudaqx::tensor dense({2, 5}); + dense.copy(data.data(), {2, 5}); + + sparse_binary_matrix csc(dense, sparse_binary_matrix_layout::csc); + sparse_binary_matrix csr = csc.to_csr(); + sparse_binary_matrix csc2 = csr.to_csc(); + + EXPECT_EQ(csc2.layout(), sparse_binary_matrix_layout::csc); + auto back = csc2.to_dense(); + EXPECT_TRUE(dense_pcm_equal(dense, back)); +} + +TEST(SparseBinaryMatrix, CsrToCscToCsr_RoundTrip) { + std::vector data = {1, 1, 0, 0, 1, 0, 1, 1}; + cudaqx::tensor dense({2, 4}); + dense.copy(data.data(), {2, 4}); + + sparse_binary_matrix csr(dense, sparse_binary_matrix_layout::csr); + sparse_binary_matrix csc = csr.to_csc(); + sparse_binary_matrix csr2 = csc.to_csr(); + + EXPECT_EQ(csr2.layout(), sparse_binary_matrix_layout::csr); + auto back = csr2.to_dense(); + EXPECT_TRUE(dense_pcm_equal(dense, back)); +} + +// ----------------------------------------------------------------------------- +// Edge cases: empty, 1x1, all zeros +// ----------------------------------------------------------------------------- + +TEST(SparseBinaryMatrix, EmptyMatrix) { + cudaqx::tensor dense({0, 0}); + sparse_binary_matrix sp(dense, sparse_binary_matrix_layout::csc); + EXPECT_EQ(sp.num_rows(), 0); + EXPECT_EQ(sp.num_cols(), 0); + EXPECT_EQ(sp.num_nnz(), 0); + auto back = sp.to_dense(); + EXPECT_EQ(back.shape()[0], 0); + EXPECT_EQ(back.shape()[1], 0); +} + +TEST(SparseBinaryMatrix, SingleElementZero) { + cudaqx::tensor dense({1, 1}); + dense.at({0, 0}) = 0; + sparse_binary_matrix sp(dense, sparse_binary_matrix_layout::csc); + EXPECT_EQ(sp.num_nnz(), 0); + auto back = sp.to_dense(); + EXPECT_EQ(back.at({0, 0}), 0); +} + +TEST(SparseBinaryMatrix, SingleElementOne) { + cudaqx::tensor dense({1, 1}); + dense.at({0, 0}) = 1; + sparse_binary_matrix sp(dense, sparse_binary_matrix_layout::csc); + EXPECT_EQ(sp.num_nnz(), 1); + auto back = sp.to_dense(); + EXPECT_EQ(back.at({0, 0}), 1); +} + +TEST(SparseBinaryMatrix, Small2x2) { + std::vector data = {1, 1, 1, 0}; + cudaqx::tensor dense({2, 2}); + dense.copy(data.data(), {2, 2}); + + for (auto layout : + {sparse_binary_matrix_layout::csc, sparse_binary_matrix_layout::csr}) { + sparse_binary_matrix sp(dense, layout); + auto back = sp.to_dense(); + EXPECT_TRUE(dense_pcm_equal(dense, back)); + } +} + +// ----------------------------------------------------------------------------- +// Random PCM via generate_random_pcm +// ----------------------------------------------------------------------------- + +TEST(SparseBinaryMatrix, RandomPcm_CscRoundTrip) { + std::mt19937_64 rng(12345); + auto dense = generate_random_pcm(2, 3, 4, 2, std::move(rng)); + ASSERT_EQ(dense.rank(), 2); + + sparse_binary_matrix sp(dense, sparse_binary_matrix_layout::csc); + auto back = sp.to_dense(); + EXPECT_TRUE(dense_pcm_equal(dense, back)); +} + +TEST(SparseBinaryMatrix, RandomPcm_CsrRoundTrip) { + std::mt19937_64 rng(67890); + auto dense = generate_random_pcm(3, 2, 3, 2, std::move(rng)); + ASSERT_EQ(dense.rank(), 2); + + sparse_binary_matrix sp(dense, sparse_binary_matrix_layout::csr); + auto back = sp.to_dense(); + EXPECT_TRUE(dense_pcm_equal(dense, back)); +} + +TEST(SparseBinaryMatrix, RandomPcm_CscToCsrToDense) { + std::mt19937_64 rng(42); + auto dense = generate_random_pcm(2, 4, 3, 2, std::move(rng)); + sparse_binary_matrix csc(dense, sparse_binary_matrix_layout::csc); + sparse_binary_matrix csr = csc.to_csr(); + auto back = csr.to_dense(); + EXPECT_TRUE(dense_pcm_equal(dense, back)); +} + +TEST(SparseBinaryMatrix, RandomPcm_CsrToCscToDense) { + std::mt19937_64 rng(99); + auto dense = generate_random_pcm(4, 3, 2, 2, std::move(rng)); + sparse_binary_matrix csr(dense, sparse_binary_matrix_layout::csr); + sparse_binary_matrix csc = csr.to_csc(); + auto back = csc.to_dense(); + EXPECT_TRUE(dense_pcm_equal(dense, back)); +} + +// ----------------------------------------------------------------------------- +// Nested CSC / CSR +// ----------------------------------------------------------------------------- + +TEST(SparseBinaryMatrix, ToNestedCsc_FromCsc) { + // 2x3 matrix: col0 rows {0,1}, col1 rows {}, col2 rows {1} + index_type num_rows = 2, num_cols = 3; + std::vector col_ptrs = {0, 2, 2, 3}; + std::vector row_indices = {0, 1, 1}; + + auto sp = + sparse_binary_matrix::from_csc(num_rows, num_cols, col_ptrs, row_indices); + auto nested = sp.to_nested_csc(); + + ASSERT_EQ(nested.size(), 3); + EXPECT_EQ(nested[0], (std::vector{0, 1})); + EXPECT_TRUE(nested[1].empty()); + EXPECT_EQ(nested[2], (std::vector{1})); +} + +TEST(SparseBinaryMatrix, ToNestedCsr_FromCsr) { + // 2x3 matrix: row0 cols {0}, row1 cols {0, 2} + index_type num_rows = 2, num_cols = 3; + std::vector row_ptrs = {0, 1, 3}; + std::vector col_indices = {0, 0, 2}; + + auto sp = + sparse_binary_matrix::from_csr(num_rows, num_cols, row_ptrs, col_indices); + auto nested = sp.to_nested_csr(); + + ASSERT_EQ(nested.size(), 2); + EXPECT_EQ(nested[0], (std::vector{0})); + EXPECT_EQ(nested[1], (std::vector{0, 2})); +} + +TEST(SparseBinaryMatrix, ToNestedCsc_RoundTrip) { + std::vector data = {1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1}; + cudaqx::tensor dense({3, 4}); + dense.copy(data.data(), {3, 4}); + + for (auto layout : + {sparse_binary_matrix_layout::csc, sparse_binary_matrix_layout::csr}) { + sparse_binary_matrix sp(dense, layout); + auto nested = sp.to_nested_csc(); + ASSERT_EQ(nested.size(), sp.num_cols()); + index_type nnz = 0; + std::vector col_ptrs(sp.num_cols() + 1); + col_ptrs[0] = 0; + std::vector row_indices; + for (index_type j = 0; j < sp.num_cols(); ++j) { + nnz += static_cast(nested[j].size()); + col_ptrs[j + 1] = col_ptrs[j] + static_cast(nested[j].size()); + row_indices.insert(row_indices.end(), nested[j].begin(), nested[j].end()); + } + EXPECT_EQ(nnz, sp.num_nnz()); + auto sp2 = sparse_binary_matrix::from_csc(sp.num_rows(), sp.num_cols(), + std::move(col_ptrs), + std::move(row_indices)); + EXPECT_TRUE(dense_pcm_equal(dense, sp2.to_dense())); + } +} + +TEST(SparseBinaryMatrix, ToNestedCsr_RoundTrip) { + std::vector data = {1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1}; + cudaqx::tensor dense({3, 4}); + dense.copy(data.data(), {3, 4}); + + for (auto layout : + {sparse_binary_matrix_layout::csc, sparse_binary_matrix_layout::csr}) { + sparse_binary_matrix sp(dense, layout); + auto nested = sp.to_nested_csr(); + ASSERT_EQ(nested.size(), sp.num_rows()); + index_type nnz = 0; + std::vector row_ptrs(sp.num_rows() + 1); + row_ptrs[0] = 0; + std::vector col_indices; + for (index_type i = 0; i < sp.num_rows(); ++i) { + nnz += static_cast(nested[i].size()); + row_ptrs[i + 1] = row_ptrs[i] + static_cast(nested[i].size()); + col_indices.insert(col_indices.end(), nested[i].begin(), nested[i].end()); + } + EXPECT_EQ(nnz, sp.num_nnz()); + auto sp2 = sparse_binary_matrix::from_csr(sp.num_rows(), sp.num_cols(), + std::move(row_ptrs), + std::move(col_indices)); + EXPECT_TRUE(dense_pcm_equal(dense, sp2.to_dense())); + } +} + +TEST(SparseBinaryMatrix, FromNestedCsc_MatchesFromCsc) { + index_type num_rows = 2, num_cols = 3; + std::vector> nested = {{0, 1}, {}, {1}}; + auto sp = sparse_binary_matrix::from_nested_csc(num_rows, num_cols, nested); + EXPECT_EQ(sp.num_rows(), num_rows); + EXPECT_EQ(sp.num_cols(), num_cols); + EXPECT_EQ(sp.num_nnz(), 3); + std::vector col_ptrs = {0, 2, 2, 3}; + std::vector row_indices = {0, 1, 1}; + auto sp_ref = + sparse_binary_matrix::from_csc(num_rows, num_cols, col_ptrs, row_indices); + EXPECT_TRUE(dense_pcm_equal(sp.to_dense(), sp_ref.to_dense())); +} + +TEST(SparseBinaryMatrix, FromNestedCsr_MatchesFromCsr) { + index_type num_rows = 2, num_cols = 3; + std::vector> nested = {{0}, {0, 2}}; + auto sp = sparse_binary_matrix::from_nested_csr(num_rows, num_cols, nested); + EXPECT_EQ(sp.num_rows(), num_rows); + EXPECT_EQ(sp.num_cols(), num_cols); + EXPECT_EQ(sp.num_nnz(), 3); + std::vector row_ptrs = {0, 1, 3}; + std::vector col_indices = {0, 0, 2}; + auto sp_ref = + sparse_binary_matrix::from_csr(num_rows, num_cols, row_ptrs, col_indices); + EXPECT_TRUE(dense_pcm_equal(sp.to_dense(), sp_ref.to_dense())); +} + +TEST(SparseBinaryMatrix, FromNestedCsc_RoundTrip) { + std::vector data = {1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1}; + cudaqx::tensor dense({3, 4}); + dense.copy(data.data(), {3, 4}); + sparse_binary_matrix sp(dense, sparse_binary_matrix_layout::csc); + const auto nested = sp.to_nested_csc(); + auto sp2 = sparse_binary_matrix::from_nested_csc(sp.num_rows(), sp.num_cols(), + nested); + EXPECT_TRUE(dense_pcm_equal(dense, sp2.to_dense())); +} + +TEST(SparseBinaryMatrix, FromNestedCsr_RoundTrip) { + std::vector data = {1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1}; + cudaqx::tensor dense({3, 4}); + dense.copy(data.data(), {3, 4}); + sparse_binary_matrix sp(dense, sparse_binary_matrix_layout::csr); + const auto nested = sp.to_nested_csr(); + auto sp2 = sparse_binary_matrix::from_nested_csr(sp.num_rows(), sp.num_cols(), + nested); + EXPECT_TRUE(dense_pcm_equal(dense, sp2.to_dense())); +} + +TEST(SparseBinaryMatrix, FromNestedCsc_InvalidSizeThrows) { + std::vector> nested = {{0}, {1}}; + EXPECT_THROW(sparse_binary_matrix::from_nested_csc(2, 3, nested), + std::invalid_argument); +} + +TEST(SparseBinaryMatrix, FromNestedCsr_InvalidSizeThrows) { + std::vector> nested = {{0}, {1}, {0}}; + EXPECT_THROW(sparse_binary_matrix::from_nested_csr(2, 2, nested), + std::invalid_argument); +} + +} // namespace +} // namespace cudaq::qec