diff --git a/Cargo.toml b/Cargo.toml index 31feb2f5..1e7b6c99 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,3 +31,7 @@ crate-type = ["rlib","cdylib"] name = "benchmark" harness = false +[[bench]] +name = "search_benchmark" +harness = false + diff --git a/benches/search_benchmark.rs b/benches/search_benchmark.rs new file mode 100644 index 00000000..8b4534e2 --- /dev/null +++ b/benches/search_benchmark.rs @@ -0,0 +1,70 @@ +use bit_set::BitSet; +use criterion::{criterion_group, criterion_main, Criterion}; +use std::time::Duration; +use std::{ffi::OsStr, time::Instant}; +use std::fs; +use std::path::Path; + +use assembly_theory::{ + assembly::{clique_index_search_bench, Kernel, Bound}, + loader, + molecule::Molecule, +}; + +pub fn reference_datasets(c: &mut Criterion) { + // Define a new criterion benchmark group of dataset benchmarks. + let mut group = c.benchmark_group("reference_datasets"); + + // Define datasets, bounds, and labels. + let datasets = ["gdb13_1201", "gdb17_200", "coconut_55"]; + let bounds = [ + Bound::IntChain, + ]; + let kernel = Kernel::Never; + + // Loop over all datasets of interest. + for dataset in datasets.iter() { + // Load all molecules from the given dataset. + let paths = fs::read_dir(Path::new("data").join(dataset)).unwrap(); + let mut mol_list: Vec = Vec::new(); + for path in paths { + let name = path.unwrap().path(); + if name.extension().and_then(OsStr::to_str) != Some("mol") { + continue; + } + mol_list.push( + loader::parse_molfile_str( + &fs::read_to_string(name.clone()) + .expect(&format!("Could not read file {name:?}")), + ) + .expect(&format!("Failed to parse {name:?}")), + ); + } + + group.bench_function(*dataset, move |b| { + b.iter_custom(|iters| { + let mut total = Duration::new(0, 0); + for _ in 0..iters { + for mol in mol_list.iter() { + let matches: Vec<(BitSet, BitSet)> = mol.matches().collect(); + let start = Instant::now(); + clique_index_search_bench(mol, matches, &bounds, kernel); + total += start.elapsed() + } + } + + total + }, + ); + }); + } + + group.finish(); +} + +criterion_group! { + name = benchmark; + config = Criterion::default().sample_size(10); + targets = reference_datasets +} +criterion_main!(benchmark); diff --git a/src/assembly.rs b/src/assembly.rs index 5c09ede4..50d19ecb 100644 --- a/src/assembly.rs +++ b/src/assembly.rs @@ -16,28 +16,28 @@ //! # } //! ``` use std::{ - collections::BTreeSet, - sync::{ + collections::BTreeSet, sync::{ atomic::{AtomicUsize, Ordering::Relaxed}, Arc, - }, + } }; use bit_set::BitSet; use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}; use crate::{ - molecule::Bond, molecule::Element, molecule::Molecule, utils::connected_components_under_edges, + molecule::Bond, molecule::Element, molecule::Molecule, utils::connected_components_under_edges }; +static PARALLEL_MATCH_SIZE_THRESHOLD: usize = 100; +static ADD_CHAIN: &[usize] = &[0, 1, 2, 2, 3, 3, 4, 3, 4, 4]; + #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] struct EdgeType { bond: Bond, ends: (Element, Element), } -static PARALLEL_MATCH_SIZE_THRESHOLD: usize = 100; - /// Enum to represent the different bounds available during the computation of molecular assembly /// indices. /// Bounds are used by `index_search()` to speed up assembly index computations. @@ -54,6 +54,334 @@ pub enum Bound { /// 'VecChainSmallFrags' bounds using information on the number of fragments of size 2 in the /// molecule VecChainSmallFrags, + Weight, + Color, + CoverNoSort, + CoverSort, + Fragment, +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub enum Kernel { + Never, + Once, + Depth1, + All +} + +#[derive(Debug)] +struct CompatGraph { + graph: Vec, + weights: Vec, + matches: Vec<(BitSet, BitSet)>, +} + +impl CompatGraph { + pub fn new(mut init_matches: Vec<(BitSet, BitSet)>) -> Self{ + let size = init_matches.len(); + init_matches.sort_by(|e1, e2| e2.0.len().cmp(&e1.0.len())); + + // Initialize weights and empty graph + let mut init_graph: Vec = Vec::with_capacity(size); + let mut init_weights: Vec = Vec::with_capacity(size); + for m in init_matches.iter() { + init_graph.push(BitSet::with_capacity(size)); + init_weights.push(m.0.len() - 1); + } + + // Populate graph + for (idx1, (h1, h2)) in init_matches.iter().enumerate() { + for (idx2, (h1p, h2p)) in init_matches[idx1 + 1..].iter().enumerate() { + let idx2 = idx2 + idx1 + 1; + + let forward_compatible = { + h2.is_disjoint(h1p) && + h2.is_disjoint(h2p) && + (h1.is_disjoint(h1p) || h1.is_superset(h1p)) && + (h1.is_disjoint(h2p) || h1.is_superset(h2p)) + }; + + if forward_compatible { + init_graph[idx1].insert(idx2); + init_graph[idx2].insert(idx1); + } + } + } + + Self { + graph: init_graph, + weights: init_weights, + matches: init_matches, + } + } + + /*pub fn savings_ground_truth(&self, subgraph: &BitSet) -> usize { + self.savings_ground_truth_recurse(0, 0, subgraph) + } + + fn savings_ground_truth_recurse(&self, ix: usize, mut best: usize, subgraph: &BitSet) -> usize { + if subgraph.len() == 0 { + return ix; + } + let mut cx = ix; + + /*if ix + subgraph.iter().count() <= best && ix + self.remaining_weight_bound(&subgraph) <= best { + return ix; + }*/ + /*if ix + self.color_bound(&subgraph) <= best { + return ix; + }*/ + /*if ix + self.cover_bound(&subgraph, true) <= best{ + return ix; + }*/ + + // Search for duplicatable fragment + for v in subgraph.iter() { + let subgraph_clone = self.forward_neighbors(v, &subgraph); + + cx = cx.max(self.savings_ground_truth_recurse( + ix + self.weights[v], + best, + &subgraph_clone, + )); + best = best.max(cx); + } + + cx + }*/ + + pub fn len(&self) -> usize { + self.matches.len() + } + + pub fn degree(&self, v: usize, subgraph: &BitSet) -> usize { + self.graph[v].intersection(subgraph).count() + } + + pub fn neighbors(&self, v: usize, subgraph: &BitSet) -> BitSet { + let mut neighbors = self.graph[v].clone(); + neighbors.intersect_with(subgraph); + + neighbors + } + + pub fn forward_neighbors(&self, v: usize, subgraph: &BitSet) -> BitSet { + let mut neighbors = self.graph[v].clone(); + neighbors.intersect_with(subgraph); + let mut to_remove = vec![]; + for u in neighbors.iter() { + if u <= v { + to_remove.push(u); + } + if u > v { + break; + } + } + for u in to_remove { + neighbors.remove(u); + } + + neighbors + } + + pub fn are_adjacent(&self, v: usize, u: usize) -> bool { + self.graph[v].contains(u) + } + + pub fn remaining_weight_bound(&self, subgraph: &BitSet) -> usize { + let deg_sum = subgraph.iter().map(|v| self.degree(v, subgraph)).sum::() as f32; + let max_clique = ((1_f32 + (4_f32 * deg_sum + 1_f32).sqrt()) / 2_f32).floor() as usize; + let mut sum = 0; + let mut iter = subgraph.iter(); + for _ in 0..max_clique { + sum += self.weights[iter.next().unwrap()]; + }; + + sum + } + + pub fn get_match(&self, v: usize) -> &(BitSet, BitSet) { + &self.matches[v] + } + + pub fn color_bound(&self, subgraph: &BitSet) -> usize{ + // Greedy coloring + let mut colors: Vec = vec![-1; self.len()]; + let mut num_colors = 0; + let mut largest: Vec = Vec::new(); + + + for v in (0..self.matches.len()).rev() { + if !subgraph.contains(v) { + continue; + } + + let mut used: Vec = vec![0; num_colors]; + + for u in subgraph.intersection(&self.graph[v]) { + if colors[u] != -1 { + used[colors[u] as usize] = 1; + } + } + + let mut max = 0; + let mut max_idx = num_colors; + for i in 0..num_colors { + if used[i] == 0 && largest[i] > max { + max = largest[i]; + max_idx = i; + } + } + + if max_idx == num_colors { + num_colors += 1; + largest.push(0); + } + if self.weights[v] > largest[max_idx] { + largest[max_idx] = self.weights[v] + } + + colors[v] = max_idx as i32; + } + + largest.iter().sum::() + } + + pub fn cover_bound(&self, subgraph: &BitSet, sort: bool) -> usize { + // Sort vertices + if sort { + let mut vertices: Vec<(usize, usize)> = Vec::with_capacity(subgraph.len()); + for v in subgraph { + vertices.push((v, self.degree(v, subgraph))); + } + vertices.sort_by(|a, b| b.1.cmp(&a.1)); + self.cover_bound_helper(subgraph, vertices.iter().map(|(v, _)| *v)) + } + else { + let vertices = (0..self.matches.len()).rev().filter(|v| subgraph.contains(*v)); + self.cover_bound_helper(subgraph, vertices) + } + } + + fn cover_bound_helper(&self, subgraph: &BitSet, iter: impl Iterator) -> usize { + let mut colors: Vec>> = vec![None; self.len()]; + let mut col_weights = vec![]; + let mut num_col = 0; + + for v in iter { + let mut v_col = Vec::new(); + let mut used = vec![0; num_col]; + + // Find colors used in neighborhood of v + for u in subgraph.intersection(&self.graph[v]) { + let Some(u_col) = &colors[u] else { + continue; + }; + + for c in u_col { + used[*c] = 1; + } + } + + let mut total_weight = 0; + let v_val = self.weights[v]; + // Find colors to give to v + for c in 0..num_col { + if used[c] == 1 { + continue; + } + + v_col.push(c); + total_weight += col_weights[c]; + + if total_weight >= v_val { + break; + } + } + + if total_weight == 0 { + v_col.push(num_col); + col_weights.push(v_val); + num_col += 1 + } + else if total_weight < v_val { + let mut k = num_col - 1; + while used[k] == 1 { + k -= 1 + } + col_weights[k] += v_val - total_weight + } + + colors[v] = Some(v_col); + }; + + col_weights.iter().sum() + } + + pub fn frag_bound(&self, subgraph: &BitSet, fragments: &[BitSet]) -> usize { + let total_bonds = fragments.iter().map(|x| x.len()).sum::(); + let mut bound = 0; + let sizes = { + let mut vec = vec![]; + let mut prev = 0; + for s in subgraph.iter().map(|v| self.weights[v] + 1) { + if s != prev { + vec.push(s); + prev = s; + } + } + + vec + }; + + for i in sizes { + let mut bound_temp = 0; + let mut has_bonds = fragments.len(); + let mut num_bonds: Vec = fragments.iter().map(|x| x.len()).collect(); + let mut smallest_remove = i; + + for v in subgraph.iter() { + if has_bonds == 0 { + break; + } + if self.weights[v] + 1 > i { + continue; + } + + + let dup = &self.matches[v]; + let bond = dup.1.iter().next().unwrap(); + let mut j = 0; + while !fragments[j].contains(bond) { + j += 1; + } + + if num_bonds[j] > 0 { + let remove = std::cmp::min(dup.0.len(), num_bonds[j]); + bound_temp += 1; + num_bonds[j] -= remove; + smallest_remove = std::cmp::min(smallest_remove, remove); + + if num_bonds[j] == 0 { + has_bonds -= 1; + } + } + } + + let leftover = num_bonds.iter().sum::(); + let log = { + if leftover > 0 { + 0 + } + else { + (smallest_remove as f32).log2().ceil() as usize + } + }; + bound = std::cmp::max(bound, total_bonds - bound_temp - leftover - log); + } + + bound + } } pub fn naive_assembly_depth(mol: &Molecule) -> u32 { @@ -154,6 +482,7 @@ fn recurse_index_search( mut best: usize, bounds: &[Bound], states_searched: &mut usize, + last_removed: i32 ) -> usize { let mut cx = ix; @@ -168,6 +497,7 @@ fn recurse_index_search( Bound::VecChainSmallFrags => { ix - vec_bound_small_frags(fragments, largest_remove, mol) >= best } + _ => false }; if exceeds { return ix; @@ -176,6 +506,8 @@ fn recurse_index_search( // Search for duplicatable fragment for (i, (h1, h2)) in matches.iter().enumerate() { + let i = i as i32; + if i <= last_removed {continue;} let mut fractures = fragments.to_owned(); let f1 = fragments.iter().enumerate().find(|(_, c)| h1.is_subset(c)); let f2 = fragments.iter().enumerate().find(|(_, c)| h2.is_subset(c)); @@ -216,13 +548,14 @@ fn recurse_index_search( cx = cx.min(recurse_index_search( mol, - &matches[i + 1..], + &matches, &fractures, ix - h1.len() + 1, largest_remove, best, bounds, states_searched, + i )); best = best.min(cx); } @@ -230,6 +563,272 @@ fn recurse_index_search( cx } + +fn recurse_clique_index_search(mol: &Molecule, + fragments: &[BitSet], + ix: usize, + mut best: usize, + bounds: &[Bound], + states_searched: &mut usize, + subgraph: BitSet, + matches_graph: &CompatGraph, + depth: usize, + must_include: &Vec, + kernel_method: &Kernel, +) -> usize { + if subgraph.len() == 0 { + return ix; + } + let mut cx = ix; + let largest_remove = matches_graph.weights[subgraph.iter().next().unwrap()] + 1; + *states_searched += 1; + + // Bounds + for bound_type in bounds { + let exceeds = match bound_type { + Bound::Log => ix - log_bound(fragments) >= best, + Bound::IntChain => ix - addition_bound(fragments, largest_remove) >= best, + Bound::VecChainSimple => ix - vec_bound_simple(fragments, largest_remove, mol) >= best, + Bound::VecChainSmallFrags => { + ix - vec_bound_small_frags(fragments, largest_remove, mol) >= best + }, + Bound::Weight => { + ix >= best + subgraph.iter().count() && + ix >= best + matches_graph.remaining_weight_bound(&subgraph) + }, + Bound::Color => ix >= best + matches_graph.color_bound(&subgraph), + Bound::CoverNoSort => ix >= best + matches_graph.cover_bound(&subgraph, false), + Bound::CoverSort => ix >= best + matches_graph.cover_bound(&subgraph, true), + Bound::Fragment => ix >= best + matches_graph.frag_bound(&subgraph, fragments), + }; + if exceeds { + return ix; + } + } + + // Search for duplicatable fragment + for v in subgraph.iter() { + let (h1, h2) = matches_graph.get_match(v); + + let mut fractures = fragments.to_owned(); + let f1 = fragments.iter().enumerate().find(|(_, c)| h1.is_subset(c)); + let f2 = fragments.iter().enumerate().find(|(_, c)| h2.is_subset(c)); + + let (Some((i1, f1)), Some((i2, f2))) = (f1, f2) else { + continue; + }; + + // All of these clones are on bitsets and cheap enough + if i1 == i2 { + let mut union = h1.clone(); + union.union_with(h2); + let mut difference = f1.clone(); + difference.difference_with(&union); + let c = connected_components_under_edges(mol.graph(), &difference); + fractures.extend(c); + fractures.swap_remove(i1); + } else { + let mut f1r = f1.clone(); + f1r.difference_with(h1); + let mut f2r = f2.clone(); + f2r.difference_with(h2); + + let c1 = connected_components_under_edges(mol.graph(), &f1r); + let c2 = connected_components_under_edges(mol.graph(), &f2r); + + fractures.extend(c1); + fractures.extend(c2); + + fractures.swap_remove(i1.max(i2)); + fractures.swap_remove(i1.min(i2)); + } + + fractures.retain(|i| i.len() > 1); + fractures.push(h1.clone()); + + let mut subgraph_clone = matches_graph.forward_neighbors(v, &subgraph); + let mut must_include_clone = must_include.clone(); + + // Kernelize + if *kernel_method == Kernel::All || (*kernel_method == Kernel:: Depth1 && depth == 1) { + subgraph_clone = deletion_kernel(matches_graph, subgraph_clone); + must_include_clone.append(&mut inclusion_kernel(matches_graph, &subgraph_clone)); + } + + cx = cx.min(recurse_clique_index_search( + mol, + &fractures, + ix - matches_graph.weights[v], + best, + bounds, + states_searched, + subgraph_clone, + matches_graph, + depth + 1, + &must_include_clone, + kernel_method, + )); + best = best.min(cx); + + if must_include.contains(&v) { + return cx; + } + } + + cx +} + +pub fn clique_index_search(mol: &Molecule, bounds: &[Bound], kernel_method: Kernel) -> (u32, u32, usize) { + // Graph Initialization + let matches: Vec<(BitSet, BitSet)> = mol.matches().collect(); + let num_matches = matches.len(); + let matches_graph = CompatGraph::new(matches); + + let mut subgraph = BitSet::with_capacity(num_matches); + for i in 0..num_matches { + subgraph.insert(i); + } + + // Kernelization + if kernel_method != Kernel::Never { + subgraph = deletion_kernel(&matches_graph, subgraph); + } + + // Search + let mut total_search = 0; + let mut init = BitSet::new(); + init.extend(mol.graph().edge_indices().map(|ix| ix.index())); + let edge_count = mol.graph().edge_count(); + + let index = recurse_clique_index_search( + mol, + &[init], + edge_count - 1, + edge_count - 1, + bounds, + &mut total_search, + subgraph.clone(), + &matches_graph, + 1, + &vec![], + &kernel_method,); + + (index as u32, num_matches as u32, total_search) +} + +pub fn clique_index_search_bench(mol: &Molecule, matches: Vec<(BitSet, BitSet)>, bounds: &[Bound], kernel_method: Kernel) -> (u32, u32, usize) { + // Graph Initialization + let num_matches = matches.len(); + let matches_graph = CompatGraph::new(matches); + + let mut subgraph = BitSet::with_capacity(num_matches); + for i in 0..num_matches { + subgraph.insert(i); + } + + // Kernelization + if kernel_method != Kernel::Never { + subgraph = deletion_kernel(&matches_graph, subgraph); + } + + // Search + let mut total_search = 0; + let mut init = BitSet::new(); + init.extend(mol.graph().edge_indices().map(|ix| ix.index())); + let edge_count = mol.graph().edge_count(); + + let index = recurse_clique_index_search( + mol, + &[init], + edge_count - 1, + edge_count - 1, + &bounds, + &mut total_search, + subgraph.clone(), + &matches_graph, + 1, + &vec![], + &kernel_method,); + + (index as u32, num_matches as u32, total_search) +} + +fn deletion_kernel(g: &CompatGraph, mut subgraph: BitSet) -> BitSet { + let subgraph_copy = subgraph.clone(); + + for v in subgraph_copy.iter() { + let v_val = g.weights[v]; + let neighbors_v = g.neighbors(v, &subgraph); + + let Some(w1) = neighbors_v.iter().next() else { + continue; + }; + let Some(w2) = neighbors_v.iter().last() else { + continue; + }; + + let mut s = subgraph.clone(); + s.intersect_with(&g.graph[w1]); + for u in s.intersection(&&g.graph[w2]) { + if g.are_adjacent(v, u) || v == u { + continue; + } + + let u_val = g.weights[u]; + if v_val > u_val { + continue; + } + + let neighbors_u = g.neighbors(u, &subgraph); + + if neighbors_v.is_subset(&neighbors_u) { + subgraph.remove(v); + break; + } + } + } + + subgraph +} + +fn inclusion_kernel(g: &CompatGraph, subgraph: &BitSet) -> Vec { + let mut kernel = Vec::new(); + let tot = subgraph.iter().map(|v| g.weights[v]).sum::(); + + 'outer: for v in subgraph { + let vw = g.weights[v]; + let nw = g.neighbors(v, subgraph).iter().map(|u| g.weights[u]).sum::(); + if vw >= tot - nw - vw { + kernel.push(v); + continue; + } + + let mut neighbors: Vec = vec![]; + + for u in subgraph.difference(&g.graph[v]) { + if u == v { + continue; + } + if g.weights[u] > vw { + continue 'outer; + } + + for w in neighbors.iter() { + if g.are_adjacent(u, *w) { + continue 'outer; + } + } + + neighbors.push(u); + } + + kernel.push(v); + } + + kernel +} + + #[allow(clippy::too_many_arguments)] fn parallel_recurse_index_search( mol: &Molecule, @@ -254,7 +853,8 @@ fn parallel_recurse_index_search( Bound::VecChainSimple => ix - vec_bound_simple(fragments, largest_remove, mol) >= best, Bound::VecChainSmallFrags => { ix - vec_bound_small_frags(fragments, largest_remove, mol) >= best - } + }, + _ => false }; if exceeds { return ix; @@ -390,6 +990,7 @@ pub fn index_search(mol: &Molecule, bounds: &[Bound]) -> (u32, u32, usize) { edge_count - 1, bounds, &mut total_search, + -1 ); (index as u32, total_search) }; @@ -432,6 +1033,7 @@ pub fn serial_index_search(mol: &Molecule, bounds: &[Bound]) -> (u32, u32, usize let edge_count = mol.graph().edge_count(); let mut total_search = 0; + let index = recurse_index_search( mol, &matches, @@ -441,7 +1043,9 @@ pub fn serial_index_search(mol: &Molecule, bounds: &[Bound]) -> (u32, u32, usize edge_count - 1, bounds, &mut total_search, + -1 ); + (index as u32, matches.len() as u32, total_search) } @@ -466,14 +1070,21 @@ fn addition_bound(fragments: &[BitSet], m: usize) -> usize { // Test for all sizes m of largest removed duplicate for max in 2..m + 1 { - let log = (max as f32).log2().ceil(); + let log = { + if max <= ADD_CHAIN.len() { + ADD_CHAIN[max - 1] + } + else { + (max as f32).log2().ceil() as usize + } + }; let mut aux_sum: usize = 0; for len in &frag_sizes { aux_sum += (len / max) + (len % max != 0) as usize } - max_s = max_s.max(size_sum - log as usize - aux_sum); + max_s = max_s.max(size_sum - log - aux_sum); } max_s diff --git a/src/main.rs b/src/main.rs index 08daca99..11f05986 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,7 +2,7 @@ use std::fs; use std::path::PathBuf; use anyhow::{bail, Context, Result}; -use assembly_theory::assembly::{index_search, serial_index_search, Bound}; +use assembly_theory::assembly::{clique_index_search, index_search, serial_index_search, Bound, Kernel}; use assembly_theory::{loader, molecule::Molecule}; use clap::{Args, Parser, ValueEnum}; @@ -11,6 +11,18 @@ enum Bounds { Log, IntChain, VecChain, + Weight, + Color, + Cover, + Fragment, +} + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, ValueEnum, Ord, Debug)] +pub enum KernelOption { + Never, + Once, + Depth1, + All, } #[derive(Parser, Debug)] @@ -32,6 +44,12 @@ struct Cli { #[arg(long)] /// Disable all parallelism serial: bool, + + #[arg(long)] + kernel_method: Option, + + #[arg(long)] + no_clique: bool, } #[derive(Args, Debug)] @@ -53,17 +71,23 @@ fn make_boundlist(u: &[Bounds]) -> Vec { Bounds::Log => vec![Bound::Log], Bounds::IntChain => vec![Bound::IntChain], Bounds::VecChain => vec![Bound::VecChainSimple, Bound::VecChainSmallFrags], + Bounds::Weight => vec![Bound::Weight], + Bounds::Color => vec![Bound::Color], + Bounds::Cover => vec![Bound::CoverNoSort, Bound::CoverSort], + Bounds::Fragment => vec![Bound::Fragment], }) .collect::>(); boundlist.dedup(); boundlist } -fn index_message(mol: &Molecule, bounds: &[Bound], verbose: bool, serial: bool) -> String { +fn index_message(mol: &Molecule, bounds: &[Bound], verbose: bool, serial: bool, no_clique: bool, kernel: Kernel) -> String { let (index, duplicates, space) = if serial { serial_index_search(mol, bounds) - } else { + } else if no_clique { index_search(mol, bounds) + } else { + clique_index_search(mol, bounds, kernel) }; if verbose { let mut message = String::new(); @@ -89,24 +113,37 @@ fn main() -> Result<()> { return Ok(()); } + let kernel = match cli.kernel_method { + Some(KernelOption::Never) => Kernel::Never, + Some(KernelOption::Once) => Kernel::Once, + Some(KernelOption::Depth1) => Kernel::Depth1, + Some(KernelOption::All) => Kernel::All, + None => Kernel::Once, + }; + let output = match cli.boundgroup { None => index_message( &molecule, &[ + Bound::Fragment, Bound::IntChain, Bound::VecChainSimple, Bound::VecChainSmallFrags, + Bound::CoverNoSort, + Bound::CoverSort, ], cli.verbose, cli.serial, + cli.no_clique, + kernel ), Some(BoundGroup { no_bounds: true, .. - }) => index_message(&molecule, &[], cli.verbose, cli.serial), + }) => index_message(&molecule, &[], cli.verbose, cli.serial, cli.no_clique, kernel), Some(BoundGroup { no_bounds: false, bounds, - }) => index_message(&molecule, &make_boundlist(&bounds), cli.verbose, cli.serial), + }) => index_message(&molecule, &make_boundlist(&bounds), cli.verbose, cli.serial, cli.no_clique, kernel), }; println!("{output}");