diff --git a/.github/workflows/R.yml b/.github/workflows/R.yml index 7d2ede166..77e599b17 100644 --- a/.github/workflows/R.yml +++ b/.github/workflows/R.yml @@ -46,8 +46,7 @@ jobs: os: [ ubuntu-22.04, windows-latest, macos-15-intel, macos-14 ] r: [ release ] include: - # Use 4.0 to check with rtools40's older compiler - - { os: windows-latest, r: '4.0' } + - { os: windows-latest, r: '4.1' } # Use latest ubuntu to make it easier to install dependencies - { os: ubuntu-24.04, r: 'devel', http-user-agent: 'release' } env: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9f2d2bd4d..7e6350ad5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,7 +7,7 @@ ci: repos: - repo: https://github.com/lorenzwalthert/precommit - rev: v0.4.3.9014 + rev: v0.4.3.9017 hooks: - id: parsable-R - id: no-browser-statement @@ -43,19 +43,19 @@ repos: exclude: '(\.Rd|python/doc/source/reference/.*|test-doctest-.*)' - repo: https://github.com/tox-dev/tox-ini-fmt - rev: "1.6.0" + rev: "1.7.0" hooks: - id: tox-ini-fmt - repo: https://github.com/tox-dev/pyproject-fmt - rev: "v2.6.0" + rev: "v2.11.0" hooks: - id: pyproject-fmt additional_dependencies: ["tox>=4.12.1"] - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.13.0 + rev: v0.14.3 hooks: # Run the formatter. - id: ruff-format @@ -67,6 +67,6 @@ repos: require_serial: true - repo: https://github.com/sphinx-contrib/sphinx-lint - rev: v1.0.0 + rev: v1.0.1 hooks: - id: sphinx-lint diff --git a/DEVEL-README.md b/DEVEL-README.md index 4d27893f2..2039211e3 100644 --- a/DEVEL-README.md +++ b/DEVEL-README.md @@ -24,6 +24,32 @@ How to release 1. `git ci -a -m "Start development of v{NEW_VERSION}"` +Building the code +================= + +C code +----- + +See [*Compilation*](https://github.com/multi-objective/moocore/tree/main/c#compilation) to compile the C code under `c/`. + +R and Python packages +--------------------- + +Under `r/` or `python/`, the commands `make build` and `make install` will build and install the R and Python packages. + + +Testing +======== + +The C code and the Python and R packages have their own testsuites. See [C testsuite](https://github.com/multi-objective/moocore/tree/main/c#testsuite) to setup and run the C testsuite. + +The Python and R testsuites are run automatically via Github Actions for every push and pull request. You can run then manually by running `make test` under `r/` or `python/`. + +You can run all testsuites at once by executing at the top-level: + + make test + + How to extend the Python API ============================ diff --git a/Makefile b/Makefile index 89c2effa4..efb16736a 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -PACKAGEVERSION=0.1.9 +PACKAGEVERSION=0.2.0 .PHONY: default clean check test pre-commit diff --git a/README.md b/README.md index 0eda9d0be..eb84f8743 100644 --- a/README.md +++ b/README.md @@ -53,6 +53,9 @@ The following projects currently use `moocore`: - [`pymoo`](https://pymoo.org/) - [`Liger`](https://www.ide.uk/project-liger) - [`DESDEO`](https://github.com/industrial-optimization-group/DESDEO/) + - [`IOHInspector`](https://github.com/IOHprofiler/IOHinspector?tab=readme-ov-file) + - [QuantumAnnealingMOO](https://github.com/andrew-d-king/QuantumAnnealingMOO) (D-Wave Quantum Inc) + [c-build-badge]: https://github.com/multi-objective/moocore/actions/workflows/C.yml/badge.svg?event=push @@ -79,5 +82,5 @@ The following projects currently use `moocore`: [r-universe-build-link]: https://github.com/r-universe/multi-objective/actions/workflows/build.yml [r-universe-version]: https://multi-objective.r-universe.dev/moocore [r-universe-version-badge]: https://multi-objective.r-universe.dev/badges/moocore -[testsuite-badge]: https://github.com/multi-objective/testsuite/actions/workflows/moocore.yml/badge.svg?event=push +[testsuite-badge]: https://github.com/multi-objective/testsuite/actions/workflows/moocore.yml/badge.svg [testsuite-link]: https://github.com/multi-objective/testsuite/actions/workflows/moocore.yml diff --git a/c/Makefile b/c/Makefile index 68e1c08c5..e72cffba4 100644 --- a/c/Makefile +++ b/c/Makefile @@ -1,7 +1,7 @@ #----------------------------------------------------------------------- # Makefile for moocore #----------------------------------------------------------------------- -VERSION = 0.17.0 +VERSION = 0.17.0$(REVISION) S= ## Quiet / verbose output: @@ -96,6 +96,7 @@ HDRS = avl.h \ hvapprox.h \ hv_priv.h \ hv3d_priv.h \ + hv4d_priv.h \ igd.h \ io.h \ io_priv.h \ @@ -224,6 +225,7 @@ $(CXXSHLIB): $(LIBHV_OBJS) $(CXX) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) -o $@ $^ $(SHLIB_LDFLAGS) clean: + @-$(RM) *.s *.i @-$(RM) config.status config.log @-$(RM) Hypervolume_MEX.mex @-$(RM) $(SHLIB) $(CXXSHLIB) diff --git a/c/README.md b/c/README.md index 85d49e435..3845bea9f 100644 --- a/c/README.md +++ b/c/README.md @@ -45,9 +45,9 @@ This uses the option `"-march=native"`. If your GCC version does not support `"n make all MARCH=x86-64-v2 -See the [GCC manual](https://gcc.gnu.org/onlinedocs/gcc/Submodel-Options.html** for the names of the architectures supported by your version of GCC. +See the [GCC manual](https://gcc.gnu.org/onlinedocs/gcc/Submodel-Options.html) for the names of the architectures supported by your version of GCC. -You can compile the code with various runtime checks enabled using the option `DEBUG=1`. This will slow down the code significantly. +You can compile the code with various runtime checks enabled using the option `DEBUG=1`. This will slow down the code significantly. If this is too slow for your purposes, you can disable the [sanitizers](https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html#index-fsanitize_003daddress) using `make all DEBUG=1 SANITIZERS=""`. The build system will try to pick good compiler flags for you, but if you need to, you can override them by passing the option `OPT_CFLAGS`. For example, to disable compiler optimization and enabled debug symbols, you could run: @@ -55,7 +55,7 @@ The build system will try to pick good compiler flags for you, but if you need t If you do not want to see the command line of each compiler -invocation, pass `S=1` to `make`. +invocation, you pass `S=1` to `make`. Embedding (shared library) @@ -314,12 +314,25 @@ The **moocore** executables are validated using a [comprehensive testsuite](http git clone https://github.com/multi-objective/moocore moocore # Download the testsuite git clone https://github.com/multi-objective/testsuite moocore/testsuite +# Install required python packages +python3 -m pip install -r testsuite/requirements.txt # Testing make -C moocore/c/ test # Timing make -C moocore/c/ time ``` +Under `moocore/c/`, `make test` will compile the code with `DEBUG=1 SANTIZERS="" OPT_CFLAGS="-Og -g3"`. This is useful for debugging failures. + +`make time` will compile the code with `DEBUG=0 SANITIZERS="" OPT_CLAGS="-O3 -ftlo -march=native"`. This is useful for benchmarking code and making sure compiler optimizations do not break anything. + +Neither `make test` nor `make time` recompile the code that has not been modified. So you can compile the code first with your preferred compiler flags and then run the testsuite. For example, if you compile with `make all DEBUG=1`, the code will be compiler with several [sanitizers](https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html#index-fsanitize_003daddress) enabled. You can then call `make test` to run the testsuite with the sanitizers enabled. This is much slower, but very helpful to track crashes. + +Under `c/` there are various targets to run only parts of the testsuite, for example: + + make test-hv + make test-nondominated + ... @@ -348,5 +361,5 @@ See the [LICENSE](/LICENSE) and [COPYRIGHTS](/r/inst/COPYRIGHTS) files. [r-moocore-cran]: https://cran.r-project.org/package=moocore [r-moocore-github]: https://github.com/multi-objective/moocore/tree/main/r#readme [r-moocore-homepage]: https://multi-objective.github.io/moocore/r/ -[testsuite-badge]: https://github.com/multi-objective/testsuite/actions/workflows/moocore.yml/badge.svg?event=push +[testsuite-badge]: https://github.com/multi-objective/testsuite/actions/workflows/moocore.yml/badge.svg [testsuite-link]: https://github.com/multi-objective/testsuite/actions/workflows/moocore.yml diff --git a/c/eaf.c b/c/eaf.c index 16e492b5d..a924ae4b5 100644 --- a/c/eaf.c +++ b/c/eaf.c @@ -563,7 +563,7 @@ eaf_check_polygons(eaf_polygon_t *p, int nobj) polygon_print(pi, nobj); polygon_print(pj, nobj); #endif - assert(!polygons_intersect(pi, pj, nobj)); + assert(false); } pj = next_polygon(pj, nobj, end); } diff --git a/c/epsilon.c b/c/epsilon.c index eb2b742a6..e0880a2e5 100644 --- a/c/epsilon.c +++ b/c/epsilon.c @@ -143,7 +143,7 @@ do_file (const char *filename, double *reference, size_t reference_size, // time_elapsed = Timer_elapsed_virtual (); fprintf (outfile, indicator_printf_format "\n", epsilon); if ((additive_flag && epsilon < 0) || (!additive_flag && epsilon < 1)) { - errprintf ("%s: some points are not dominated by the reference set", + errprintf ("%s: some points are not dominated by the reference set", filename); exit (EXIT_FAILURE); }/* diff --git a/c/epsilon.h b/c/epsilon.h index 552da5be1..49a716677 100644 --- a/c/epsilon.h +++ b/c/epsilon.h @@ -14,11 +14,12 @@ static inline bool all_positive(const double * restrict points, size_t size, dimension_t dim) { - ASSUME(dim <= 32); - for (size_t a = 0; a < size; a++) - for (dimension_t d = 0; d < dim; d++) - if (points[a * dim + d] <= 0) - return false; + ASSUME(size > 0); + ASSUME(1 <= dim && dim <= 32); + const size_t len = size * dim; + for (size_t a = 0; a < len; a++) + if (points[a] <= 0) + return false; return true; } @@ -43,19 +44,19 @@ all_positive(const double * restrict points, size_t size, dimension_t dim) for negative values doesn't make sense. */ -static inline double -eps_value_(bool do_ratio, double a, double b) -{ - return do_ratio ? a / b : a - b; -} +_attr_optimize_finite_math static inline double epsilon_helper_(bool do_mult, const enum objs_agree_t agree, const signed char * restrict minmax, dimension_t dim, const double * restrict points_a, size_t size_a, const double * restrict points_b, size_t size_b) { +// Converting this macro to an inline function hinders vectorization. +#define eps_value_(X,Y) (do_mult ? ((X) / (Y)) : ((X) - (Y))) + ASSUME(2 <= dim && dim <= 32); + ASSUME(size_a > 0 && size_b > 0); ASSUME(agree == AGREE_MINIMISE || agree == AGREE_MAXIMISE || agree == AGREE_NONE); ASSUME(minmax == NULL || agree != AGREE_NONE); double epsilon = do_mult ? 0 : -INFINITY; @@ -65,29 +66,25 @@ epsilon_helper_(bool do_mult, const enum objs_agree_t agree, const double * restrict pb = &points_b[b * dim]; for (size_t a = 0; a < size_a; a++) { const double * restrict pa = &points_a[a * dim]; - double epsilon_max; - if (agree == AGREE_NONE) { - epsilon_max = MAX(minmax[0] * eps_value_(do_mult, pb[0], pa[0]), - minmax[1] * eps_value_(do_mult, pb[1], pa[1])); - if (epsilon_max >= epsilon_min) - continue; - for (dimension_t d = 2; d < dim; d++) { - double epsilon_temp = minmax[d] * eps_value_(do_mult, pb[d], pa[d]); - epsilon_max = MAX(epsilon_max, epsilon_temp); - } - } else { - epsilon_max = (agree == AGREE_MINIMISE) - ? MAX(eps_value_(do_mult, pa[0], pb[0]), eps_value_(do_mult, pa[1], pb[1])) - : MAX(eps_value_(do_mult, pb[0], pa[0]), eps_value_(do_mult, pb[1], pa[1])); - if (epsilon_max >= epsilon_min) - continue; - for (dimension_t d = 2; d < dim; d++) { - double epsilon_temp = (agree == AGREE_MINIMISE) - ? eps_value_(do_mult, pa[d], pb[d]) - : eps_value_(do_mult, pb[d], pa[d]); - epsilon_max = MAX(epsilon_max, epsilon_temp); - } + double epsilon_max = (agree == AGREE_NONE) + ? MAX(minmax[0] * eps_value_(pb[0], pa[0]), + minmax[1] * eps_value_(pb[1], pa[1])) + : ((agree == AGREE_MINIMISE) + ? MAX(eps_value_(pa[0], pb[0]), eps_value_(pa[1], pb[1])) + : MAX(eps_value_(pb[0], pa[0]), eps_value_(pb[1], pa[1]))); + + if (epsilon_max >= epsilon_min) + continue; + + for (dimension_t d = 2; d < dim; d++) { + double epsilon_temp = (agree == AGREE_NONE) + ? minmax[d] * eps_value_(pb[d], pa[d]) + : ((agree == AGREE_MINIMISE) + ? eps_value_(pa[d], pb[d]) + : eps_value_(pb[d], pa[d])); + epsilon_max = MAX(epsilon_max, epsilon_temp); } + if (epsilon_max <= epsilon) { skip_max = true; break; @@ -100,6 +97,63 @@ epsilon_helper_(bool do_mult, const enum objs_agree_t agree, return epsilon; } +_attr_optimize_finite_math +static inline double +epsilon_mult_agree_none(const signed char * restrict minmax, dimension_t dim, + const double * restrict points_a, size_t size_a, + const double * restrict points_b, size_t size_b) +{ + return epsilon_helper_(/* do_mult=*/true, AGREE_NONE, minmax, dim, points_a, size_a, points_b, size_b); +} + +_attr_optimize_finite_math +static inline double +epsilon_mult_agree_min(dimension_t dim, + const double * restrict points_a, size_t size_a, + const double * restrict points_b, size_t size_b) +{ + return epsilon_helper_(/* do_mult=*/true, AGREE_MINIMISE, /*minmax=*/NULL, dim, points_a, size_a, points_b, size_b); +} + +_attr_optimize_finite_math +static inline double +epsilon_mult_agree_max(dimension_t dim, + const double * restrict points_a, size_t size_a, + const double * restrict points_b, size_t size_b) +{ + return epsilon_helper_(/* do_mult=*/true, AGREE_MAXIMISE, /*minmax=*/NULL, dim, points_a, size_a, points_b, size_b); +} + + +_attr_optimize_finite_math +static inline double +epsilon_addi_agree_none(const signed char * restrict minmax, dimension_t dim, + const double * restrict points_a, size_t size_a, + const double * restrict points_b, size_t size_b) +{ + return epsilon_helper_(/* do_mult=*/false, AGREE_NONE, minmax, dim, points_a, size_a, points_b, size_b); +} + +_attr_optimize_finite_math +static inline double +epsilon_addi_agree_min(dimension_t dim, + const double * restrict points_a, size_t size_a, + const double * restrict points_b, size_t size_b) +{ + return epsilon_helper_(/* do_mult=*/false, AGREE_MINIMISE, /*minmax=*/NULL, dim, points_a, size_a, points_b, size_b); +} + +_attr_optimize_finite_math +static inline double +epsilon_addi_agree_max(dimension_t dim, + const double * restrict points_a, size_t size_a, + const double * restrict points_b, size_t size_b) +{ + return epsilon_helper_(/* do_mult=*/false, AGREE_MAXIMISE, /*minmax=*/NULL, dim, points_a, size_a, points_b, size_b); +} + + +_attr_optimize_finite_math static inline double epsilon_mult_minmax(const signed char * restrict minmax, dimension_t dim, const double * restrict points_a, size_t size_a, @@ -114,14 +168,15 @@ epsilon_mult_minmax(const signed char * restrict minmax, dimension_t dim, // This forces the compiler to generate three specialized versions of the function. switch (check_all_minimize_maximize(minmax, dim)) { case AGREE_MINIMISE: - return epsilon_helper_(/* do_mult=*/true, AGREE_MINIMISE, /*minmax=*/NULL, dim, points_a, size_a, points_b, size_b); + return epsilon_mult_agree_min(dim, points_a, size_a, points_b, size_b); case AGREE_MAXIMISE: - return epsilon_helper_(/* do_mult=*/true, AGREE_MAXIMISE, /*minmax=*/NULL, dim, points_a, size_a, points_b, size_b); + return epsilon_mult_agree_max(dim, points_a, size_a, points_b, size_b); default: - return epsilon_helper_(/* do_mult=*/true, AGREE_NONE, minmax, dim, points_a, size_a, points_b, size_b); + return epsilon_mult_agree_none(minmax, dim, points_a, size_a, points_b, size_b); } } +_attr_optimize_finite_math static inline double epsilon_additive_minmax(const signed char * restrict minmax, dimension_t dim, const double * restrict points_a, size_t size_a, @@ -130,11 +185,11 @@ epsilon_additive_minmax(const signed char * restrict minmax, dimension_t dim, // This forces the compiler to generate three specialized versions of the function. switch (check_all_minimize_maximize(minmax, dim)) { case AGREE_MINIMISE: - return epsilon_helper_(/* do_mult=*/false, AGREE_MINIMISE, /*minmax=*/NULL, dim, points_a, size_a, points_b, size_b); + return epsilon_addi_agree_min(dim, points_a, size_a, points_b, size_b); case AGREE_MAXIMISE: - return epsilon_helper_(/* do_mult=*/false, AGREE_MAXIMISE, /*minmax=*/NULL, dim, points_a, size_a, points_b, size_b); + return epsilon_addi_agree_max(dim, points_a, size_a, points_b, size_b); default: - return epsilon_helper_(/* do_mult=*/false, AGREE_NONE, minmax, dim, points_a, size_a, points_b, size_b); + return epsilon_addi_agree_none(minmax, dim, points_a, size_a, points_b, size_b); } } diff --git a/c/gcc.mk b/c/gcc.mk index 1f11496f6..e68f3da09 100644 --- a/c/gcc.mk +++ b/c/gcc.mk @@ -7,7 +7,11 @@ WARN_CFLAGS = -pedantic -Wall -Wextra -Wvla -Wconversion -Wno-sign-conversion -W ifeq ($(DEBUG), 0) SANITIZERS ?= OPT_CFLAGS ?= -DNDEBUG -O3 -flto - # -ffinite-math-only -fno-signed-zeros allows further vectorization. + # GCC options that improve performance: + # -ffinite-math-only -fno-signed-zeros + # GCC options that may or may not improve performance: + # -ftree-loop-im -ftree-loop-ivcanon -fivopts -ftree-vectorize -funroll-loops -fipa-pta + # else SANITIZERS ?= -fsanitize=undefined -fsanitize=address -fsanitize=float-cast-overflow -fsanitize=float-divide-by-zero OPT_CFLAGS ?= -g3 -O0 diff --git a/c/gcc_attribs.h b/c/gcc_attribs.h index f69ab12c2..00418aa34 100644 --- a/c/gcc_attribs.h +++ b/c/gcc_attribs.h @@ -227,4 +227,10 @@ #define _attr_optimize_finite_math /* nothing */ #endif +#ifdef __SANITIZE_ADDRESS__ +# include +#else +# define ASAN_POISON_MEMORY_REGION(addr, size) ((void) (addr), (void) (size)) +#endif + #endif /* GCC_ATTRIBUTES */ diff --git a/c/hv.c b/c/hv.c index 10b6f8c7b..815fa780e 100644 --- a/c/hv.c +++ b/c/hv.c @@ -34,47 +34,58 @@ #include #include "common.h" #include "hv.h" -#define HV_DIMENSION 4 -#include "hv_priv.h" +#define HV_RECURSIVE +#include "hv4d_priv.h" #define STOP_DIMENSION 3 // default: stop on dimension 4. -typedef struct fpli_dlnode { - const double * restrict x; // point vector - struct fpli_dlnode ** next; // next-node vector - struct fpli_dlnode ** prev; // previous-node vector - double * restrict area; // partial area - double * restrict vol; // partial volume - dimension_t ignore; // [0, 255] -} fpli_dlnode_t; - - static int compare_node(const void * restrict p1, const void * restrict p2) { - const double * restrict x1 = (*(const fpli_dlnode_t **)p1)->x; - const double * restrict x2 = (*(const fpli_dlnode_t **)p2)->x; + const double * restrict x1 = (*(const dlnode_t **)p1)->x; + const double * restrict x2 = (*(const dlnode_t **)p2)->x; return cmp_double_asc(*x1, *x2); } -/* - * Setup circular double-linked list in each dimension - */ +/** Setup circular double-linked list in each dimension. + + There are in fact two separate lists that are keep in sync: + + - A multi-dimensional list for dimensions 5 and above tracked by ->r_next[] and ->r_prev[], where head is the sentinel with head->x == NULL. -static fpli_dlnode_t * + - A list for dimensions 3 and 4 tracked by ->next[0 or 1] ->prev[0 or 1]. This list has 3 sentinels as required by hv4dplusU(). The first sentinel is saved in head->next[0]. + + */ +static dlnode_t * fpli_setup_cdllist(const double * restrict data, dimension_t d, size_t * restrict size, const double * restrict ref) { - ASSUME(d > STOP_DIMENSION); + ASSUME(d > STOP_DIMENSION + 1); dimension_t d_stop = d - STOP_DIMENSION; size_t n = *size; - fpli_dlnode_t * head = malloc((n+1) * sizeof(*head)); + + dlnode_t * head = malloc((n+1) * sizeof(*head)); // Allocate single blocks of memory as much as possible. - head->next = malloc(2 * d_stop * (n+1) * sizeof(head)); - head->prev = head->next + d_stop * (n+1); + // We need space in r_next and r_prev for dimension 5 and above (d_stop - 1). + head->r_next = malloc(2 * (d_stop - 1) * (n+1) * sizeof(head)); + head->r_prev = head->r_next + (d_stop - 1) * (n+1); + // We only need space in area and vol for dimension 4 and above. head->area = malloc(2 * d_stop * (n+1) * sizeof(*data)); head->vol = head->area + d_stop * (n+1); - head->x = NULL; /* head contains no data */ - head->ignore = 0; /* should never get used */ + head->x = NULL; // head contains no data + head->ignore = 0; // should never get used + + // Reserve space for the sentinels. + dlnode_t * list4d = new_cdllist(0, ref); + // Link head and list4d; head is not used by HV4D, so next[0] and prev[0] + // should remain untouched. + head->next[0] = list4d; + + // Reserve space for auxiliar list and auxiliar x vector used in onec4dplusU. + // This list will store at most n-1 points. + dlnode_t * list_aux = (dlnode_t *) malloc(n * sizeof(*list_aux)); + list_aux->vol = malloc(3 * n * sizeof(double)); + head->prev[0] = list_aux; + list_aux->next[0] = list4d; size_t i = 1; for (size_t j = 0; j < n; j++) { @@ -82,10 +93,10 @@ fpli_setup_cdllist(const double * restrict data, dimension_t d, point. This is needed to assure that the points left are only those that are needed to calculate the hypervolume. */ if (likely(strongly_dominates(data + j * d, ref, d))) { - head[i].x = data + (j+1) * d; /* this will be fixed a few lines below... */ + head[i].x = data + (j+1) * d; // this will be fixed a few lines below... head[i].ignore = 0; - head[i].next = head->next + i * d_stop; - head[i].prev = head->prev + i * d_stop; + head[i].r_next = head->r_next + i * (d_stop - 1); + head[i].r_prev = head->r_prev + i * (d_stop - 1); head[i].area = head->area + i * d_stop; head[i].vol = head->vol + i * d_stop; i++; @@ -95,41 +106,72 @@ fpli_setup_cdllist(const double * restrict data, dimension_t d, if (unlikely(n == 0)) goto finish; - fpli_dlnode_t **scratch = malloc(n * sizeof(*scratch)); + dlnode_t **scratch = malloc(n * sizeof(*scratch)); for (i = 0; i < n; i++) scratch[i] = head + i + 1; - for (int k = d-1; k >= 0; k--) { + for (int j = d_stop - 2; j >= -2; j--) { + /* FIXME: replace qsort() by something better: + https://github.com/numpy/x86-simd-sort + https://github.com/google/highway/tree/52a2d98d07852c5d69284e175666e5f8cc7d8285/hwy/contrib/sort + */ + // We shift x because qsort() cannot take the dimension to sort as an argument. for (i = 0; i < n; i++) scratch[i]->x--; - int j = k - STOP_DIMENSION; - if (j < 0) - continue; + // Sort each dimension independently. + const int j2 = j+2; qsort(scratch, n, sizeof(*scratch), compare_node); - head->next[j] = scratch[0]; - scratch[0]->prev[j] = head; - for (i = 1; i < n; i++) { - scratch[i-1]->next[j] = scratch[i]; - scratch[i]->prev[j] = scratch[i-1]; + if (j <= -1) { +//#define FORCE_BUG +#ifdef FORCE_BUG + if (j2 == 1) { + dlnode_t * tmp = scratch[0]; + scratch[0] = scratch[1]; + scratch[1] = tmp; + } +#endif + (list4d+1)->next[j2] = scratch[0]; + scratch[0]->prev[j2] = list4d+1; + for (i = 1; i < n; i++) { + scratch[i-1]->next[j2] = scratch[i]; + scratch[i]->prev[j2] = scratch[i-1]; + } + scratch[n-1]->next[j2] = list4d+2; + (list4d+2)->prev[j2] = scratch[n-1]; + } else { + head->r_next[j] = scratch[0]; + scratch[0]->r_prev[j] = head; + for (i = 1; i < n; i++) { + scratch[i-1]->r_next[j] = scratch[i]; + scratch[i]->r_prev[j] = scratch[i-1]; + } + scratch[n-1]->r_next[j] = head; + head->r_prev[j] = scratch[n-1]; } - scratch[n-1]->next[j] = head; - head->prev[j] = scratch[n-1]; + } + // Reset x to point to the first objective. + for (i = 0; i < n; i++) { + scratch[i]->x -= STOP_DIMENSION - 1; } free(scratch); - // FIXME: This should not be necessary. - for (i = 0; i < d_stop; i++) - head->area[i] = 0; + // Make sure it is not used. + ASAN_POISON_MEMORY_REGION(head->area, sizeof(*data) * d_stop); + ASAN_POISON_MEMORY_REGION(head->vol, sizeof(*data) * d_stop); finish: *size = n; return head; } -static void fpli_free_cdllist(fpli_dlnode_t * head) +static void fpli_free_cdllist(dlnode_t * head) { - free(head->next); + assert(head->next[0] == head->prev[0]->next[0]); + free_cdllist(head->next[0]); // free 4D sentinels + free(head->prev[0]->vol); // free x_aux (4D basecase) + free(head->prev[0]); // free list_aux (4D basecase) + free(head->r_next); free(head->area); free(head); } @@ -148,80 +190,93 @@ update_bound(double * restrict bound, const double * restrict x, dimension_t dim } static void -delete(fpli_dlnode_t * restrict nodep, dimension_t dim, double * restrict bound) +delete_4d(dlnode_t * restrict nodep) +{ + nodep->prev[1]->next[1] = nodep->next[1]; + nodep->next[1]->prev[1] = nodep->prev[1]; +} + +static void +delete_3d(dlnode_t * restrict nodep) +{ + nodep->prev[0]->next[0] = nodep->next[0]; + nodep->next[0]->prev[0] = nodep->prev[0]; +} + +static void +reinsert_4d(dlnode_t * restrict nodep) +{ + nodep->prev[1]->next[1] = nodep; + nodep->next[1]->prev[1] = nodep; +} + +static void +reinsert_3d(dlnode_t * restrict nodep) +{ + nodep->prev[0]->next[0] = nodep; + nodep->next[0]->prev[0] = nodep; +} + +static void +delete_dom(dlnode_t * restrict nodep, dimension_t dim) { ASSUME(dim > STOP_DIMENSION); - for (dimension_t d = 0; d < dim - STOP_DIMENSION; d++) { - nodep->prev[d]->next[d] = nodep->next[d]; - nodep->next[d]->prev[d] = nodep->prev[d]; + assert(nodep->x); + // d=0 is dimension 5. + for (dimension_t d = 0; d < dim - 1 - STOP_DIMENSION; d++) { + nodep->r_prev[d]->r_next[d] = nodep->r_next[d]; + nodep->r_next[d]->r_prev[d] = nodep->r_prev[d]; } + delete_4d(nodep); + delete_3d(nodep); +} + +static void +delete(dlnode_t * restrict nodep, dimension_t dim, double * restrict bound) +{ + delete_dom(nodep, dim); update_bound(bound, nodep->x, dim); } + static void -reinsert(fpli_dlnode_t * restrict nodep, dimension_t dim, double * restrict bound) +reinsert_nobound(dlnode_t * restrict nodep, dimension_t dim) { ASSUME(dim > STOP_DIMENSION); - for (dimension_t d = 0; d < dim - STOP_DIMENSION; d++) { - nodep->prev[d]->next[d] = nodep; - nodep->next[d]->prev[d] = nodep; + assert(nodep->x); + // d=0 is dimension 5. + for (dimension_t d = 0; d < dim - 1 - STOP_DIMENSION; d++) { + nodep->r_prev[d]->r_next[d] = nodep; + nodep->r_next[d]->r_prev[d] = nodep; } - update_bound(bound, nodep->x, dim); + reinsert_4d(nodep); + reinsert_3d(nodep); } static void -fpli_hv4d_setup_cdllist(const fpli_dlnode_t * restrict pp, - dlnode_t * restrict list, size_t n _attr_maybe_unused) +reinsert(dlnode_t * restrict nodep, dimension_t dim, double * restrict bound) { - ASSUME(n > 1); - reset_sentinels(list); - - const dimension_t d = HV_DIMENSION - 3; // index within the list. - dlnode_t * q = list+1; - dlnode_t * list3 = list+3; - assert(list->next[d] == list + 1); - assert(q->next[d] == list + 2); - for (size_t i = 0; pp->x != NULL; pp = pp->next[0]) { - dlnode_t * p = list3 + i; - p->x = pp->x; - // Initialize it when debugging so it will crash if uninitialized. - DEBUG1( - p->closest[0] = NULL; - p->closest[1] = NULL; - p->cnext[0] = NULL; - p->cnext[1] = NULL;); - // FIXME: Can we use pp->ignore to initialize p->ndomr? - //p->ndomr = 0; - // Link the list in order. - q->next[d] = p; - p->prev[d] = q; - q = p; - i++; - } - assert((list3 + n - 1) == q); - assert(list+2 == list->prev[d]); - // q = last point, q->next = s3, s3->prev = last point - q->next[d] = list+2; - (list+2)->prev[d] = q; + reinsert_nobound(nodep, dim); + update_bound(bound, nodep->x, dim); } static double -one_point_hv(const double * restrict x, const double * restrict ref, dimension_t d) +fpli_onec4d(dlnode_t * restrict list, size_t c _attr_maybe_unused, dlnode_t * restrict the_point) { - double hv = ref[0] - x[0]; - for (dimension_t i = 1; i < d; i++) - hv *= (ref[i] - x[i]); - return hv; + ASSUME(c > 1); + assert(list->next[0] == list->prev[0]->next[0]); + dlnode_t * restrict list4d = list->next[0]; + dlnode_t * restrict list_aux = list->prev[0]; + double contrib = onec4dplusU(list4d, list_aux, the_point); + return contrib; } -double hv4dplusU(dlnode_t * list); - static double -fpli_hv4d(fpli_dlnode_t * restrict list, dlnode_t * restrict list4d, size_t c) +one_point_hv(const double * restrict x, const double * restrict ref, dimension_t d) { - ASSUME(c > 1); - fpli_hv4d_setup_cdllist(list->next[0], list4d, c); - double hv = hv4dplusU(list4d); + double hv = ref[0] - x[0]; + for (dimension_t i = 1; i < d; i++) + hv *= (ref[i] - x[i]); return hv; } @@ -240,88 +295,194 @@ update_area(double * restrict area, const double * restrict x, area[d + 1] *= area[d]; } +//#define HV_COUNTERS +_attr_maybe_unused static size_t debug_counter[6] = { 0 }; + static double -hv_recursive(fpli_dlnode_t * restrict list, dlnode_t * restrict list4d, - dimension_t dim, size_t c, - const double * restrict ref, double * restrict bound) +hv_recursive(dlnode_t * restrict list, dimension_t dim, size_t c, + const double * restrict ref, double * restrict bound, dlnode_t * the_point) { - ASSUME(dim > STOP_DIMENSION); ASSUME(c > 1); + ASSUME(dim >= STOP_DIMENSION); + if (dim == STOP_DIMENSION) { + /*--------------------------------------- + base case of dimension 4 + --------------------------------------*/ + return fpli_onec4d(list, c, the_point); + } + ASSUME(dim > STOP_DIMENSION); /* ------------------------------------------------------ General case for dimensions higher than 4D ------------------------------------------------------ */ const dimension_t d_stop = dim - STOP_DIMENSION; - fpli_dlnode_t *p1 = list->prev[d_stop]; - for (fpli_dlnode_t *pp = p1; pp->x; pp = pp->prev[d_stop]) { + assert(d_stop > 0); + // d_stop - 1 is dimension 5 in r_prev and r_next. + dlnode_t * p1 = list->r_prev[d_stop - 1]; + for (dlnode_t * pp = p1; pp->x; pp = pp->r_prev[d_stop - 1]) { if (pp->ignore < dim) pp->ignore = 0; } - fpli_dlnode_t *p0 = list; + dlnode_t * p1_prev = p1->r_prev[d_stop - 1]; + dlnode_t * p0 = list; /* Delete all points x[dim] > bound[d_stop]. In case of repeated coordinates, delete also all points x[dim] == bound[d_stop] except one. */ - while (p1->x[dim] > bound[d_stop] || p1->prev[d_stop]->x[dim] >= bound[d_stop]) { + while (p1->x[dim] > bound[d_stop] + || p1_prev->x[dim] >= bound[d_stop]) { // FIXME: Instead of deleting each point, unlink the start and end // nodes after the loop. delete(p1, dim, bound); p0 = p1; - p1 = p1->prev[d_stop]; + p1 = p1->r_prev[d_stop - 1]; + p1_prev = p1->r_prev[d_stop - 1]; c--; if (c == 1) break; } - double hyperv = 0; - if (c > 1) { - hyperv = p1->prev[d_stop]->vol[d_stop] + p1->prev[d_stop]->area[d_stop] - * (p1->x[dim] - p1->prev[d_stop]->x[dim]); - } else { - ASSUME(c == 1); + double hyperv; + if (c == 1) { update_area(p1->area, p1->x, ref, dim); p1->vol[d_stop] = 0; - if (p0->x == NULL) { + assert(p0->x != NULL); + /* if (p0->x == NULL) return p1->area[d_stop] * (ref[dim] - p1->x[dim]); - } + */ hyperv = p1->area[d_stop] * (p0->x[dim] - p1->x[dim]); - bound[d_stop] = p0->x[dim]; + // FIXME: This is never used? + // bound[d_stop] = p0->x[dim]; reinsert(p0, dim, bound); c++; p1 = p0; - p0 = p0->next[d_stop]; + p1_prev = p0->r_prev[d_stop - 1]; + p0 = p0->r_next[d_stop - 1]; + } else { + ASSUME(c > 1); + DEBUG1(debug_counter[0]++); + hyperv = p1_prev->vol[d_stop] + p1_prev->area[d_stop] + * (p1->x[dim] - p1_prev->x[dim]); + assert(p0 != p1_prev); + assert(p0 == p1->r_next[d_stop - 1]); + // p0->x may be NULL here and thus we may return below. } assert(c > 1); while (true) { + // FIXME: This is not true in the first iteration if c > 1 previously. + // assert(p0 == p1->r_prev[d_stop - 1]); + assert(p1_prev == p1->r_prev[d_stop - 1]); p1->vol[d_stop] = hyperv; double hypera; if (p1->ignore >= dim) { - hypera = p1->prev[d_stop]->area[d_stop]; + DEBUG1(debug_counter[1]++); + hypera = p1_prev->area[d_stop]; } else { - if (dim - 1 == STOP_DIMENSION) { - /*--------------------------------------- - base case of dimension 4 - --------------------------------------*/ - hypera = fpli_hv4d(list, list4d, c); - } else { - hypera = hv_recursive(list, list4d, dim - 1, c, ref, bound); + hypera = hv_recursive(list, dim - 1, c, ref, bound, p1); + if(dim - 1 == STOP_DIMENSION){ //hypera only has the contribution of p1 + hypera += p1_prev->area[d_stop]; } - if (hypera <= p1->prev[d_stop]->area[d_stop]) + /* At this point, p1 is the point with the highest value in + dimension dim in the list: If it is dominated in dimension + dim-1, then it is also dominated in dimension dim. */ + if (p1->ignore == dim - 1) { + DEBUG1(debug_counter[2]++); + p1->ignore = dim; + } else if (hypera <= p1_prev->area[d_stop]) { + DEBUG1(debug_counter[3]++); p1->ignore = dim; + } } p1->area[d_stop] = hypera; if (p0->x == NULL) { + bound[d_stop] = p1->x[dim]; hyperv += hypera * (ref[dim] - p1->x[dim]); return hyperv; } hyperv += hypera * (p0->x[dim] - p1->x[dim]); - bound[d_stop] = p0->x[dim]; + // FIXME: This is never used? + // bound[d_stop] = p0->x[dim]; reinsert(p0, dim, bound); c++; p1 = p0; - p0 = p0->next[d_stop]; + p1_prev = p0->r_prev[d_stop - 1]; + p0 = p0->r_next[d_stop - 1]; } } +static double +fpli_hv_ge5d(dlnode_t * restrict list, dimension_t dim, size_t c, + const double * restrict ref) +{ + ASSUME(c > 1); + ASSUME(dim > STOP_DIMENSION); + const dimension_t d_stop = dim - STOP_DIMENSION; + ASSUME(0 < d_stop && d_stop < 255); // Silence -Walloc-size-larger-than= warning + double * bound = malloc(d_stop * sizeof(*bound)); + for (dimension_t i = 0; i < d_stop; i++) + bound[i] = -DBL_MAX; + + /* ------------------------------------------------------ + General case for dimensions higher than 4D + ------------------------------------------------------ */ + dlnode_t * p1 = list->r_prev[d_stop - 1]; + // FIXME: This should be the initial state of the list when building it. + // Delete all points in dimensions < dim. + do { + delete_dom(p1, dim); + p1 = p1->r_prev[d_stop - 1]; + c--; + } while (c > 1); + + update_area(p1->area, p1->x, ref, dim); + p1->vol[d_stop] = 0; + dlnode_t * p0 = p1->r_next[d_stop - 1]; + assert(p0->x != NULL); + double hyperv = p1->area[d_stop] * (p0->x[dim] - p1->x[dim]); + // FIXME: This is never used? + // bound[d_stop] = p0->x[dim]; + reinsert_nobound(p0, dim); + + while (true) { + dlnode_t * p1_prev = p0->r_prev[d_stop - 1]; + p1 = p0; + p0 = p0->r_next[d_stop - 1]; + p1->vol[d_stop] = hyperv; + assert(p1->ignore == 0); + c++; + + double hypera = hv_recursive(list, dim - 1, c, ref, bound, p1); + if (dim - 1 == STOP_DIMENSION) { //hypera only has the contribution of p1 + hypera += p1_prev->area[d_stop]; + } + /* At this point, p1 is the point with the highest value in + dimension dim in the list: If it is dominated in dimension + dim-1, then it is also dominated in dimension dim. */ + if (p1->ignore == dim - 1) { + DEBUG1(debug_counter[4]++); + p1->ignore = dim; + } else if (hypera <= p1_prev->area[d_stop]) { + DEBUG1(debug_counter[5]++); + p1->ignore = dim; + } + p1->area[d_stop] = hypera; + if (p0->x == NULL) { + free(bound); +#if defined(HV_COUNTERS) && DEBUG >= 1 + for (size_t i = 0; i < sizeof(debug_counter)/sizeof(size_t); i++) + fprintf(stderr, "debug_counter[%zu] = %zu\n", i, debug_counter[i]); +#endif + hyperv += hypera * (ref[dim] - p1->x[dim]); + return hyperv; + } + hyperv += hypera * (p0->x[dim] - p1->x[dim]); + // FIXME: This is never used? + // bound[d_stop] = p0->x[dim]; + // FIXME: Does updating the bound here matters? + reinsert(p0, dim, bound); + } +} + + static double hv2d(const double * restrict data, size_t n, const double * restrict ref) { @@ -358,31 +519,22 @@ double fpli_hv(const double * restrict data, int d, int npoints, { size_t n = (size_t) npoints; if (unlikely(n == 0)) return 0.0; - ASSUME(d < 256); - ASSUME(d > 1); + ASSUME(d > 1 && d < 256); if (d == 4) return hv4d(data, n, ref); if (d == 3) return hv3d(data, n, ref); if (d == 2) return hv2d(data, n, ref); dimension_t dim = (dimension_t) d; - fpli_dlnode_t * list = fpli_setup_cdllist(data, dim, &n, ref); + dlnode_t * list = fpli_setup_cdllist(data, dim, &n, ref); double hyperv; - if (unlikely(n == 0)) { - /* Returning here would leak memory. */ - hyperv = 0.0; + if (likely(n > 1)) { + hyperv = fpli_hv_ge5d(list, dim - 1, n, ref); } else if (unlikely(n == 1)) { - hyperv = one_point_hv(list->next[0]->x, ref, dim); + hyperv = one_point_hv(list->r_next[0]->x, ref, dim); } else { - const dimension_t d_stop = dim - STOP_DIMENSION; - ASSUME(d_stop > 1 && d_stop < 255); // Silence -Walloc-size-larger-than= warning - double * bound = malloc(d_stop * sizeof(*bound)); - for (dimension_t i = 0; i < d_stop; i++) - bound[i] = -DBL_MAX; - dlnode_t * list4d = new_cdllist(n, ref); - hyperv = hv_recursive(list, list4d, dim - 1, n, ref, bound); - free_cdllist(list4d); - free(bound); + assert(n == 0); + hyperv = 0.0; // Returning here would leak memory. } - /* Clean up. */ + // Clean up. fpli_free_cdllist(list); return hyperv; } diff --git a/c/hv4d.c b/c/hv4d.c index 9029eec3e..570aa4af4 100644 --- a/c/hv4d.c +++ b/c/hv4d.c @@ -1,246 +1,4 @@ -/****************************************************************************** - HV4D+ algorithm. - ------------------------------------------------------------------------------ - - Copyright (C) 2013, 2016, 2017 - Andreia P. Guerreiro - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at https://mozilla.org/MPL/2.0/. - - ------------------------------------------------------------------------------ - - Reference: - - [1] Andreia P. Guerreiro and Carlos M. Fonseca. Computing and Updating - Hypervolume Contributions in Up to Four Dimensions. IEEE Transactions on - Evolutionary Computation, 22(3):449–463, June 2018. - - ------------------------------------------------------------------------------ - - Modified by Manuel López-Ibáñez (2025): - - Integration within moocore. - - Correct handling of weakly dominated points and repeated coordinates during - preprocessing(). - - More efficient setup_cdllist() and preprocessing() in terms of time and memory. - -******************************************************************************/ - -#include -#include -#include "common.h" -#define HV_DIMENSION 4 -#include "hv_priv.h" - -// ---------- Data Structures Functions --------------------------------------- - -static inline void _attr_maybe_unused -print_point(const char *s, const double * x) -{ - fprintf(stderr, "%s: %g %g %g %g\n", s, x[0], x[1], x[2], x[4]); -} - - - - -// ------------ Update data structure ----------------------------------------- - -static inline void -add_to_z(dlnode_t * new) -{ - new->next[0] = new->prev[0]->next[0]; //in case new->next[0] was removed for being dominated - new->next[0]->prev[0] = new; - new->prev[0]->next[0] = new; -} - -static inline bool -lex_cmp_3d_102(const double * restrict a, const double * restrict b) -{ - return a[1] < b[1] || (a[1] == b[1] && (a[0] < b[0] || (a[0] == b[0] && a[2] < b[2]))); -} - -static inline bool -lex_cmp_3d_012(const double * restrict a, const double * restrict b) -{ - return a[0] < b[0] || (a[0] == b[0] && (a[1] < b[1] || (a[1] == b[1] && a[2] < b[2]))); -} - -/* - Go through the points in the order of z and either remove points that are - dominated by new with respect to x,y,z or update the cx and cy lists by - adding new. -*/ -static void -update_links(dlnode_t * restrict list, dlnode_t * restrict new) -{ - assert(list+2 == list->prev[0]); - const double * newx = new->x; - dlnode_t * p = new->next[0]; - const double * px = p->x; - dlnode_t * stop = list+2; - while (p != stop) { - // px dominates newx (but not equal) - if (px[0] <= newx[0] && px[1] <= newx[1] && (px[0] < newx[0] || px[1] < newx[1])) - return; - - if (newx[0] <= px[0]){ - //new <= p - if (newx[1] <= px[1]){ - assert(weakly_dominates(newx, px, 3)); - //p->ndomr++; - remove_from_z(p); - } else if (newx[0] < px[0] && lex_cmp_3d_102(newx, p->closest[1]->x)) { // newx[1] > px[1] - p->closest[1] = new; - } - } else if (newx[1] < px[1] && lex_cmp_3d_012(newx, p->closest[0]->x)) {//newx[0] > px[0] - p->closest[0] = new; - } - p = p->next[0]; - px = p->x; - } -} - - - -// This does what setupZandClosest does while reconstructing L at z = new->x[2]. -__attribute__((hot)) static bool -restart_base_setup_z_and_closest(dlnode_t * restrict list, dlnode_t * restrict new) -{ - const double * restrict newx = new->x; - // FIXME: This is the most expensive function in the HV4D+ algorithm. - assert(list+1 == list->next[0]); - dlnode_t * closest1 = list; - dlnode_t * closest0 = list+1; - const double * closest0x = closest0->x; - const double * closest1x = closest1->x; - dlnode_t * p = list+1; - assert(p->next[0] == list->next[0]->next[0]); - restart_list_y(list); - while (true) { - p = p->next[0]; - const double * restrict px = p->x; - // Help auto-vectorization. - bool p_leq_new_0 = px[0] <= newx[0]; - bool p_leq_new_1 = px[1] <= newx[1]; - bool p_leq_new_2 = px[2] <= newx[2]; - - if (p_leq_new_0 & p_leq_new_1 & p_leq_new_2) { - //new->ndomr++; - assert(weakly_dominates(px, newx, 4)); - return false; - } - - if (!lexicographic_less_3d(px, newx)) - break; - - // reconstruct - set_cnext_to_closest(p); - p->cnext[0]->cnext[1] = p; - p->cnext[1]->cnext[0] = p; - - // setup_z_and_closest - assert(px[0] > newx[0] || px[1] > newx[1]); - if (px[1] < newx[1] && (px[0] < closest0x[0] || (px[0] == closest0x[0] && px[1] < closest0x[1]))) { - closest0 = p; - closest0x = px; - } else if (px[0] < newx[0] && (px[1] < closest1x[1] || (px[1] == closest1x[1] && px[0] < closest1x[0]))) { - closest1 = p; - closest1x = px; - } - } - - new->closest[0] = closest0; - new->closest[1] = closest1; - - new->prev[0] = p->prev[0]; - new->next[0] = p; - return true; -} - -// FIXME: This is very similar to the loop in hvc3d_list() but it doesn't use p->last_slice_z -static double -one_contribution_3d(dlnode_t * restrict new) -{ - set_cnext_to_closest(new); - const double * newx = new->x; - // if newx[0] == new->cnext[0]->x[0], the first area is zero - double area = compute_area_no_inners(newx, new->cnext[0], 1); - double volume = 0; - dlnode_t * p = new; - const double * px = p->x; - assert(!weakly_dominates(p->next[0]->x, newx, 4)); - while (true) { - double lastz = px[2]; - p = p->next[0]; - px = p->x; - volume += area * (px[2] - lastz); - - if (px[0] <= newx[0] && px[1] <= newx[1]) - return volume; - - assert(px[0] > newx[0] || px[1] > newx[1]); - assert(!weakly_dominates(px, p->next[0]->x, 4)); - - set_cnext_to_closest(p); - - if (px[0] < newx[0]) { - if (px[1] <= new->cnext[1]->x[1]) { - const double tmpx[] = { newx[0], px[1] }; - // if px[1] == new->cnext[1]->x[1] then area starts at 0. - area -= compute_area_no_inners(tmpx, new->cnext[1], 0); - p->cnext[1] = new->cnext[1]; - p->cnext[0]->cnext[1] = p; - new->cnext[1] = p; - } - } else if (px[1] < newx[1]) { - if (px[0] <= new->cnext[0]->x[0]) { - const double tmpx[] = { px[0], newx[1] }; - // if px[0] == new->cnext[0]->x[0] then area starts at 0. - area -= compute_area_no_inners(tmpx, new->cnext[0], 1); - p->cnext[0] = new->cnext[0]; - p->cnext[1]->cnext[0] = p; - new->cnext[0] = p; - } - } else { - assert(px[0] >= newx[0] && px[1] >= newx[1]); - // if px[0] == p->cnext[0]->x[0] then area starts at 0. - area -= compute_area_no_inners(px, p->cnext[0], 1); - p->cnext[1]->cnext[0] = p; - p->cnext[0]->cnext[1] = p; - } - } - return volume; -} - -/* Compute the hypervolume indicator in d=4 by iteratively computing the one - contribution problem in d=3. */ -double -hv4dplusU(dlnode_t * list) -{ - assert(list+2 == list->prev[0]); - assert(list+2 == list->prev[1]); - assert(list+1 == list->next[1]); - - double volume = 0, hv = 0; - dlnode_t * new = (list+1)->next[1]; - dlnode_t * last = list+2; - while (new != last) { - if (restart_base_setup_z_and_closest(list, new)) { - // new was not dominated by something else. - double new_v = one_contribution_3d(new); - assert(new_v > 0); - volume += new_v; - add_to_z(new); - update_links(list, new); - } - double height = new->next[1]->x[3] - new->x[3]; - assert(height >= 0); - hv += volume * height; - new = new->next[1]; - } - return hv; -} +#include "hv4d_priv.h" double hv4d(const double * restrict data, size_t n, const double * restrict ref) diff --git a/c/hv4d_priv.h b/c/hv4d_priv.h new file mode 100644 index 000000000..1952ad1e2 --- /dev/null +++ b/c/hv4d_priv.h @@ -0,0 +1,558 @@ +#ifndef _HV4D_PRIV_H +#define _HV4D_PRIV_H + +/****************************************************************************** + HV4D+ algorithm. + ------------------------------------------------------------------------------ + + Copyright (C) 2013, 2016, 2017, 2025 + Andreia P. Guerreiro + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at https://mozilla.org/MPL/2.0/. + + ------------------------------------------------------------------------------ + + Reference: + + [1] Andreia P. Guerreiro and Carlos M. Fonseca. Computing and Updating + Hypervolume Contributions in Up to Four Dimensions. IEEE Transactions on + Evolutionary Computation, 22(3):449–463, June 2018. + + ------------------------------------------------------------------------------ + + Modified by Manuel López-Ibáñez (2025): + - Integration within moocore. + - Correct handling of weakly dominated points and repeated coordinates during + preprocessing(). + - More efficient setup_cdllist() and preprocessing() in terms of time and memory. + +******************************************************************************/ + +#include +#include +#include "common.h" +#define HV_DIMENSION 4 +#include "hv_priv.h" + +// ------------ Update data structure ----------------------------------------- + +static inline void +add_to_z(dlnode_t * newp) +{ + newp->next[0] = newp->prev[0]->next[0]; //in case newp->next[0] was removed for being dominated + newp->next[0]->prev[0] = newp; + newp->prev[0]->next[0] = newp; +} + +static inline bool +lex_cmp_3d_102(const double * restrict a, const double * restrict b) +{ + return a[1] < b[1] || (a[1] == b[1] && (a[0] < b[0] || (a[0] == b[0] && a[2] < b[2]))); +} + +static inline bool +lex_cmp_3d_012(const double * restrict a, const double * restrict b) +{ + return a[0] < b[0] || (a[0] == b[0] && (a[1] < b[1] || (a[1] == b[1] && a[2] < b[2]))); +} + +/* + Go through the points in the order of z and either remove points that are + dominated by newp with respect to x,y,z or update the cx and cy lists by + adding newp. +*/ +static void +update_links(dlnode_t * restrict list, dlnode_t * restrict newp) +{ + assert(list+2 == list->prev[0]); + const double newx[] = { newp->x[0], newp->x[1], newp->x[2] }; + dlnode_t * p = newp->next[0]; + const dlnode_t * const stop = list+2; + while (p != stop) { + const double * px = p->x; + // px dominates newx (but not equal) + if (px[0] <= newx[0] && px[1] <= newx[1] && (px[0] < newx[0] || px[1] < newx[1])) + return; + + if (newx[0] <= px[0]){ + //new <= p + if (newx[1] <= px[1]){ + assert(weakly_dominates(newx, px, 3)); + //p->ndomr++; + remove_from_z(p); + } else if (newx[0] < px[0] && lex_cmp_3d_102(newx, p->closest[1]->x)) { // newx[1] > px[1] + p->closest[1] = newp; + } + } else if (newx[1] < px[1] && lex_cmp_3d_012(newx, p->closest[0]->x)) { //newx[0] > px[0] + p->closest[0] = newp; + } + p = p->next[0]; + } +} + + + +// This does what setupZandClosest does while reconstructing L at z = newp->x[2]. +_attr_optimize_finite_math +__attribute__((hot)) +static bool +restart_base_setup_z_and_closest(dlnode_t * restrict list, dlnode_t * restrict newp) +{ + // FIXME: This is the most expensive function in the HV4D+ algorithm. +#ifdef HV_RECURSIVE + const double newx[] = { newp->x[0], newp->x[1], newp->x[2] }; +#else + const double newx[] = { newp->x[0], newp->x[1], newp->x[2], newp->x[3] }; +#endif + assert(list+1 == list->next[0]); + dlnode_t * closest0 = list+1; + dlnode_t * closest1 = list; + const double * closest0x = closest0->x; + const double * closest1x = closest1->x; + dlnode_t * p = (list+1)->next[0]; + assert(p == list->next[0]->next[0]); + restart_list_y(list); + while (true) { + const double * restrict px = p->x; + // Help auto-vectorization. + bool p_lt_new_0 = px[0] < newx[0]; + bool p_lt_new_1 = px[1] < newx[1]; + bool p_lt_new_2 = px[2] < newx[2]; + bool p_eq_new_0 = px[0] == newx[0]; + bool p_eq_new_1 = px[1] == newx[1]; + bool p_eq_new_2 = px[2] == newx[2]; + bool p_leq_new_0 = p_lt_new_0 | p_eq_new_0; + bool p_leq_new_1 = p_lt_new_1 | p_eq_new_1; + bool p_leq_new_2 = p_lt_new_2 | p_eq_new_2; + + // if (weakly_dominates(px, newx, 3)) { // Slower + if (p_leq_new_0 & p_leq_new_1 & p_leq_new_2) { + //new->ndomr++; +#ifdef HV_RECURSIVE + // When called by onec4dplusU, px may be just a 3-dimensional vector. + assert(weakly_dominates(px, newx, 3)); +#else + assert(weakly_dominates(px, newx, 4)); +#endif + return false; + } + + //if (!lexicographic_less_3d(px, newx)) { // Slower + if (!(p_lt_new_2 || (p_eq_new_2 && (p_lt_new_1 || (p_eq_new_1 && p_leq_new_0))))) { + assert(!lexicographic_less_3d(px, newx)); + newp->closest[0] = closest0; + newp->closest[1] = closest1; + newp->prev[0] = p->prev[0]; + newp->next[0] = p; + return true; + } + + // FIXME: Can we move reconstruct() after setup_z_and_closest() ? + // reconstruct() + set_cnext_to_closest(p); + p->cnext[0]->cnext[1] = p; + p->cnext[1]->cnext[0] = p; + + // setup_z_and_closest() + ASSUME(!p_leq_new_0 || !p_leq_new_1); + if (p_lt_new_1 && (px[0] < closest0x[0] || (px[0] == closest0x[0] && px[1] < closest0x[1]))) { + closest0 = p; + closest0x = px; + } else if (p_lt_new_0 && (px[1] < closest1x[1] || (px[1] == closest1x[1] && px[0] < closest1x[0]))) { + closest1 = p; + closest1x = px; + } + p = p->next[0]; + } + unreachable(); +} + + +/** + Assumes that the HV3D+ data structure is reconstructed up to z <= newp->x[2]. + Sweeps the points in the data structure in ascending order of y-coordinate, includes newp + in the z list, sets up the "closest" data of newp and, if equalz=true (i.e., all points in + the data structure have x[2] == newp->x[2]), it also updates the "closest" data of the point + that follows newp according to the lexicographic order (z,y,x). +*/ +static inline bool +continue_base_update_z_closest(dlnode_t * restrict list, dlnode_t * restrict newp, bool equalz) +{ + const double newx[] = { newp->x[0], newp->x[1], newp->x[2] }; + assert(list+1 == list->next[0]); + dlnode_t * p = (list+1)->cnext[1]; + + // find newp->closest[0] + while (p->x[1] < newx[1]){ + assert(p->x[2] <= newx[2]); + p = p->cnext[1]; + } + + if (p->x[1] != newx[1] || p->x[0] > newx[0]) + p = p->cnext[0]; + + assert(lexicographic_less_2d(p->x, newx)); + assert(lexicographic_less_2d(newx, p->cnext[1]->x)); + + // Check if newp is dominated. + if (weakly_dominates(p->x, newx, 3)) { + return false; + } + dlnode_t * lex_prev = p; // newp->closest[0] = lex_prev + p = lex_prev->cnext[1]; + + // if all points in the list have z-coordinate equal to px[2] + if (equalz) { + assert(p->cnext[0]->x[1] < newx[1]); + assert(p->x[1] >= newx[1]); + // Check if newp dominates points in the list. + while (p->x[0] >= newx[0]) { + remove_from_z(p); + p = p->cnext[1]; + } + + assert(p->x[0] < newx[0]); + // update the closest of points in the list (max. 1), if needed + if (p != list) + p->closest[0] = newp; + + //setup z list + assert(p == list || lex_prev->next[0] == p); + newp->prev[0] = lex_prev; + newp->next[0] = lex_prev->next[0]; + + newp->closest[1] = list; + + } else { + //setup z list + newp->prev[0] = list->prev[0]->prev[0]; + newp->next[0] = list->prev[0]; + + // find newp->closest[1] + while (p->x[0] >= newx[0]){ + assert(p->x[2] <= newx[2]); + assert(!weakly_dominates(newx, p->x, 3)); + p = p->cnext[1]; + } + newp->closest[1] = p; + } + + newp->closest[0] = lex_prev; + // update cnext + lex_prev->cnext[1] = newp; + newp->cnext[0] = lex_prev; + newp->cnext[1] = p; + p->cnext[0] = newp; + + //update z list + newp->next[0]->prev[0] = newp; + newp->prev[0]->next[0] = newp; + + return true; +} + + +// FIXME: This is very similar to the loop in hvc3d_list() but it doesn't use p->last_slice_z +static double +one_contribution_3d(dlnode_t * restrict newp) +{ + set_cnext_to_closest(newp); + const double * newx = newp->x; + // if newx[0] == newp->cnext[0]->x[0], the first area is zero + double area = compute_area_no_inners(newx, newp->cnext[0], 1); + double volume = 0; + double lastz = newx[2]; + dlnode_t * p = newp->next[0]; + assert(!weakly_dominates(p->x, newx, 4)); + while (true) { + const double * px = p->x; + volume += area * (px[2] - lastz); + + if (px[0] <= newx[0] && px[1] <= newx[1]) + return volume; + + ASSUME(px[0] > newx[0] || px[1] > newx[1]); + assert(!weakly_dominates(px, p->next[0]->x, 4)); + + set_cnext_to_closest(p); + + if (px[0] < newx[0]) { + if (px[1] <= newp->cnext[1]->x[1]) { + const double tmpx[] = { newx[0], px[1] }; + // if px[1] == newp->cnext[1]->x[1] then area starts at 0. + area -= compute_area_no_inners(tmpx, newp->cnext[1], 0); + p->cnext[1] = newp->cnext[1]; + p->cnext[0]->cnext[1] = p; + newp->cnext[1] = p; + } + } else if (px[1] < newx[1]) { + if (px[0] <= newp->cnext[0]->x[0]) { + const double tmpx[] = { px[0], newx[1] }; + // if px[0] == newp->cnext[0]->x[0] then area starts at 0. + area -= compute_area_no_inners(tmpx, newp->cnext[0], 1); + p->cnext[0] = newp->cnext[0]; + p->cnext[1]->cnext[0] = p; + newp->cnext[0] = p; + } + } else { + assert(px[0] >= newx[0] && px[1] >= newx[1]); + // if px[0] == p->cnext[0]->x[0] then area starts at 0. + area -= compute_area_no_inners(px, p->cnext[0], 1); + p->cnext[1]->cnext[0] = p; + p->cnext[0]->cnext[1] = p; + } + lastz = px[2]; + p = p->next[0]; + } + unreachable(); +} + +/* Compute the hypervolume indicator in d=4 by iteratively computing the one + contribution problem in d=3. */ +static inline double +hv4dplusU(dlnode_t * list) +{ + assert(list+2 == list->prev[0]); + assert(list+2 == list->prev[1]); + assert(list+1 == list->next[1]); + + double volume = 0, hv = 0; + dlnode_t * newp = (list+1)->next[1]; + const dlnode_t * const last = list+2; + while (newp != last) { + if (restart_base_setup_z_and_closest(list, newp)) { + // newp was not dominated by something else. + double new_v = one_contribution_3d(newp); + assert(new_v > 0); + volume += new_v; + add_to_z(newp); + update_links(list, newp); + } + // FIXME: It newp was dominated, can we accumulate the height and update + // hv later? + double height = newp->next[1]->x[3] - newp->x[3]; + assert(height >= 0); + hv += volume * height; + newp = newp->next[1]; + } + return hv; +} + +static inline int compare_lex_2d(const void * pa, const void * pb) +{ + const double ax = **(const double **)pa; + const double bx = **(const double **)pb; + const double ay = *(*(const double **)pa + 1); + const double by = *(*(const double **)pb + 1); + int cmpx = cmp_double_asc(ax, bx); + int cmpy = cmp_double_asc(ay, by); + return cmpy ? cmpy : cmpx; +} + +static inline void lex_sort_equal_z_and_setup_nodes(dlnode_t * newp_aux, double * x_aux, size_t n) { + const double **scratch = malloc(n * sizeof(*scratch)); + double * x = x_aux; + size_t i; + for (i = 0; i < n; i++){ + scratch[i] = x; + x += 3; + } + + qsort(scratch, n, sizeof(*scratch), compare_lex_2d); + + for (i = 0; i < n; i++) { + newp_aux->x = scratch[i]; + assert(i == 0 || lexicographic_less_2d(scratch[i-1], scratch[i])); + newp_aux++; + } + + free(scratch); +} + +static inline void +update_bound_3d(double * restrict dest, const double * restrict a, const double * restrict b) +{ + for (int i = 0; i < 3; i++) + dest[i] = MAX(a[i], b[i]); +} + +// FIXME: Move this function to hv.c +#ifdef HV_RECURSIVE +/** + Compute the hv contribution of "the_point" in d=4 by iteratively computing + the one contribution problem in d=3. */ +static inline double +onec4dplusU(dlnode_t * restrict list, dlnode_t * restrict list_aux, + dlnode_t * restrict the_point) +{ + // FIXME: This is never triggered. + assert(the_point->ignore < 3); + if (the_point->ignore >= 3) { + return 0; + } + + assert(list+2 == list->prev[0]); + assert(list+2 == list->prev[1]); + assert(list+1 == list->next[1]); + + dlnode_t * newp = (list+1)->next[0]; + const dlnode_t * const last = list+2; + dlnode_t * const z_first = newp; + dlnode_t * const z_last = last->prev[0]; + + double * x_aux = list_aux->vol; + dlnode_t * newp_aux = list_aux+1; // list_aux is a sentinel + dlnode_t * prevp_aux; + int c; + + reset_sentinels_3d(list); + restart_list_y(list); + + const double * the_point_x = the_point->x; + // Setup the 3D base only if there are any points leq than the_point_x[3]) + if ((list+1)->next[1] != the_point || the_point->next[1]->x[3] == the_point_x[3]) { + bool done_once = false; + // Set the_point->ignore=3 so the loop will skip it, but restore its + // value after the loop. + dimension_t the_point_ignore = the_point->ignore; + the_point->ignore = 3; + assert(newp != last); + + // PART 1: Setup 2D base of the 3D base + while (newp->x[2] <= the_point_x[2]) { + const double * newpx = newp->x; + if (newpx[3] <= the_point_x[3] && newp->ignore < 3) { + if (weakly_dominates(newpx, the_point_x, 3)) { + assert(the_point->ignore == 3); + // Restore modified links (z list). + (list+1)->next[0] = z_first; + (list+2)->prev[0] = z_last; + return 0; + } + + // FIXME: Is it possible that x_aux matches newpx? YES! So just assign and avoid the comparison. + // x_aux is the coordinate-wise maximum between newpx and the_point_x. + update_bound_3d(x_aux, newpx, the_point_x); + newp_aux->x = x_aux; + x_aux += 3; + + if (continue_base_update_z_closest(list, newp_aux, done_once)) { + newp_aux++; + done_once = true; + } + } + newp = newp->next[0]; + } + the_point->ignore = the_point_ignore; + + // PART 2: Setup the remainder of the 3D base + c = 0; + while (newp != last) { + const double * newpx = newp->x; + if (newpx[3] <= the_point_x[3] && newp->ignore < 3) { + + // FIXME: Is it possible that x_aux matches newpx? YES! So just assign and avoid the comparison. + // x_aux is the coordinate-wise maximum between newpx and the_point_x. + update_bound_3d(x_aux, newpx, the_point_x); + x_aux += 3; + c++; + } + + if(c > 0 && newp->next[0]->x[2] > newpx[2]){ + if (c == 1) { + newp_aux->x = x_aux-3; + continue_base_update_z_closest(list, newp_aux, false); + newp_aux++; + } else { + // all points with equal z-coordinate will be added to the data structure in lex order + lex_sort_equal_z_and_setup_nodes(newp_aux, x_aux-3*c, c); + continue_base_update_z_closest(list, newp_aux, false); + prevp_aux = newp_aux; + newp_aux++; + + for (int i = 1; i < c; i++) { + assert(newpx[2] == newp_aux->x[2]); // all c points have equal z + assert(prevp_aux->x[1] <= newp_aux->x[1]); // due to lexsort + if (newp_aux->x[0] < prevp_aux->x[0]){ + // if newp_aux is not dominated by prevp + continue_base_update_z_closest(list, newp_aux, false); + } + prevp_aux = newp_aux; + newp_aux++; + } + } + c = 0; + } + newp = newp->next[0]; + } + } + + newp = the_point->next[1]; + while (newp->x[3] <= the_point_x[3]) + newp = newp->next[1]; + + dlnode_t * tp_prev_z = the_point->prev[0]; + dlnode_t * tp_next_z = the_point->next[0]; + // FIXME: Does this call always return true? +#if DEBUG >= 1 + assert(restart_base_setup_z_and_closest(list, the_point)); +#else + restart_base_setup_z_and_closest(list, the_point); +#endif + double volume = one_contribution_3d(the_point); + the_point->prev[0] = tp_prev_z; + the_point->next[0] = tp_next_z; + + assert(volume > 0); + double height = newp->x[3] - the_point_x[3]; + // It cannot be zero because we exited the loop above. + assert(height > 0); + double hv = volume * height; + + // PART 3: Update the 3D contribution. + while (newp != last) { + const double * newpx = newp->x; + if (weakly_dominates(newpx, the_point_x, 3)) + break; + if (newp->ignore < 3) { + // FIXME: Is it possible that x_aux matches newpx? YES! So just assign and avoid the comparison. + // x_aux is the coordinate-wise maximum between newpx and the_point_x. + update_bound_3d(x_aux, newpx, the_point_x); + newp_aux->x = x_aux; + x_aux += 3; + if (restart_base_setup_z_and_closest(list, newp_aux)) { + // newp was not dominated by something else. + double newp_v = one_contribution_3d(newp_aux); + assert(newp_v > 0); + volume -= newp_v; + + add_to_z(newp_aux); + update_links(list, newp_aux); + newp_aux++; + } + + if (weakly_dominates(the_point_x, newpx, 3)){ + newp->ignore = 3; + } + } + + // FIXME: If newp was dominated, can we accumulate the height and update + // hv later? AG: Yes + height = newp->next[1]->x[3] - newpx[3]; + assert(height >= 0); + hv += volume * height; + + newp = newp->next[1]; + } + + // Restore z list + (list+1)->next[0] = z_first; + (list+2)->prev[0] = z_last; + + return hv; +} +#endif // HV_RECURSIVE + +#endif // _HV4D_PRIV_H diff --git a/c/hv_priv.h b/c/hv_priv.h index 6dadac436..320cf870a 100644 --- a/c/hv_priv.h +++ b/c/hv_priv.h @@ -12,15 +12,25 @@ // ----------------------- Data Structure ------------------------------------- /* - With HV_DIMENSION==3, we have 'struct dlnode * next[1]' instead of 'struct dlnode * next'. - The compiler should be able to remove the extra indirection. */ typedef struct dlnode { - const double * x; // point coordinates (objective vector). + const double * restrict x; // point coordinates (objective vector). +#ifdef HV_RECURSIVE + // In the recursive algorithm, the number of dimensions is not known in + // advance, so next and prev cannot be fixed-size arrays. + struct dlnode ** r_next; // next-node vector for dimensions 5 and above. + struct dlnode ** r_prev; // previous-node vector for dimensions 5 and above. + double * restrict area; // partial area for dimensions 4 and above. + double * restrict vol; // partial volume for dimensions 4 and above. +#endif + /* With HV_DIMENSION==3, we have 'struct dlnode * next[1]' instead of + 'struct dlnode * next'. GCC -O3 is able to remove the extra + indirection, so it is not worth having a special case. */ struct dlnode * next[HV_DIMENSION - 2]; /* keeps the points sorted according to coordinates 2,3 and 4 - (in the case of 2 and 3, only the points swept by 4 are kept) */ + (in the case of 2 and 3, only the points swept by 4 are kept) */ struct dlnode * prev[HV_DIMENSION - 2]; //keeps the points sorted according to coordinates 2 and 3 (except the sentinel 3) + struct dlnode * cnext[2]; //current next #if HV_DIMENSION == 4 || defined(HVC_ONLY) struct dlnode * closest[2]; // closest[0] == cx, closest[1] == cy @@ -31,8 +41,9 @@ typedef struct dlnode { double area, volume; double last_slice_z; // FIXME: Is this really needed? struct dlnode * head[2]; // lowest (x, y) - bool ignore; // hvc should be zero (duplicated or dominated) #endif + // In hvc this is used as a boolean (duplicated or dominated) + dimension_t ignore; // [0, 255] } dlnode_t; // ------------ Update data structure ----------------------------------------- @@ -80,8 +91,9 @@ print_x(const dlnode_t * p) #endif // HV_DIMENSION == 3 // ------------------------ Circular double-linked list ---------------------- + static inline void -reset_sentinels(dlnode_t * restrict list) +reset_sentinels_3d(dlnode_t * restrict list) { dlnode_t * restrict s1 = list; dlnode_t * restrict s2 = list + 1; @@ -89,33 +101,41 @@ reset_sentinels(dlnode_t * restrict list) s1->next[0] = s2; s1->prev[0] = s3; -#if HV_DIMENSION == 4 || defined(HVC_ONLY) - s1->closest[0] = s2; - s1->closest[1] = s1; -#endif -#if HV_DIMENSION == 4 - s1->next[1] = s2; - s1->prev[1] = s3; -#endif s2->next[0] = s3; s2->prev[0] = s1; -#if HV_DIMENSION == 4 || defined(HVC_ONLY) - s2->closest[0] = s2; - s2->closest[1] = s1; -#endif -#if HV_DIMENSION == 4 - s2->next[1] = s3; - s2->prev[1] = s1; -#endif s3->next[0] = s1; s3->prev[0] = s2; + #if HV_DIMENSION == 4 || defined(HVC_ONLY) + s1->closest[0] = s2; + s1->closest[1] = s1; + + s2->closest[0] = s2; + s2->closest[1] = s1; + s3->closest[0] = s2; s3->closest[1] = s1; #endif +} + +static inline void +reset_sentinels(dlnode_t * restrict list) +{ + reset_sentinels_3d(list); + #if HV_DIMENSION == 4 + dlnode_t * restrict s1 = list; + dlnode_t * restrict s2 = list + 1; + dlnode_t * restrict s3 = list + 2; + + s1->next[1] = s2; + s1->prev[1] = s3; + + s2->next[1] = s3; + s2->prev[1] = s1; + s3->next[1] = s1; s3->prev[1] = s2; #endif @@ -145,11 +165,13 @@ init_sentinels(dlnode_t * restrict list, const double * restrict ref) #if HV_DIMENSION == 3 -DBL_MAX, ref[1], -DBL_MAX, // Sentinel 1 ref[0], -DBL_MAX, -DBL_MAX, // Sentinel 2 - -DBL_MAX, -DBL_MAX, ref[2] // Sentinel 2 -#else + -DBL_MAX, -DBL_MAX, ref[2] // Sentinel 3 +#elif HV_DIMENSION == 4 -DBL_MAX, ref[1], -DBL_MAX, -DBL_MAX, // Sentinel 1 ref[0], -DBL_MAX, -DBL_MAX, -DBL_MAX, // Sentinel 2 - -DBL_MAX, -DBL_MAX, ref[2], ref[3] // Sentinel 2 + -DBL_MAX, -DBL_MAX, ref[2], ref[3] // Sentinel 3 +#else +# error "Unknown HV_DIMENSION" #endif }; diff --git a/c/hvapprox.c b/c/hvapprox.c index 446aa687c..b69cd1013 100644 --- a/c/hvapprox.c +++ b/c/hvapprox.c @@ -29,7 +29,7 @@ transform_and_filter(const double * restrict data, size_t * restrict npoints_p, const bool * restrict maximise) { size_t npoints = *npoints_p; - double * points = malloc(dim * npoints * sizeof(double)); + double * points = malloc(dim * npoints * sizeof(*points)); size_t i, j; // Transform points (ref - points) for (i = 0, j = 0; i < npoints; i++) { @@ -54,14 +54,14 @@ transform_and_filter(const double * restrict data, size_t * restrict npoints_p, } _attr_optimize_finite_math // Required so that GCC will vectorize the inner loop. -static inline double +static double get_expected_value(const double * restrict points, size_t npoints, dimension_t dim, const double * restrict w) { ASSUME(1 <= dim && dim <= 32); ASSUME(npoints > 0); // points >= 0 && w >=0 so max_s_w cannot be < 0. - double max_s_w = 0; + double max_s_w = -INFINITY; for (size_t i = 0; i < npoints; i++) { const double * restrict p = points + i * dim; double min_ratio = p[0] * w[0]; @@ -71,6 +71,7 @@ get_expected_value(const double * restrict points, size_t npoints, } max_s_w = MAX(max_s_w, min_ratio); } + ASSUME(max_s_w >= 0); return pow_uint(max_s_w, dim); } @@ -160,7 +161,7 @@ static const long double sphere_area_div_2_pow_d_times_d[] = { _attr_pure_func static double euclidean_norm(const double * restrict w, dimension_t dim) { - ASSUME(dim >= 2 && dim <= 32); + ASSUME(2 <= dim && dim <= 32); double norm = (w[0] * w[0]) + (w[1] * w[1]); for (dimension_t k = 2; k < dim; k++) norm += w[k] * w[k]; @@ -530,8 +531,6 @@ solve_inverse_int_of_power_sin(long double theta, dimension_t dim) static long double * compute_int_all(dimension_t dm1) { - ASSUME(dm1 >= 1); - ASSUME(dm1 < 32); long double * int_all = malloc(dm1 * sizeof(long double)); dimension_t i; DEBUG2_PRINT("int_all[%u] =", (unsigned int)dm1); @@ -560,8 +559,7 @@ static void compute_theta(long double * restrict theta, dimension_t dim, const long double * restrict int_all) { - ASSUME(dim >= 2); - ASSUME(dim <= 32); + ASSUME(2 <= dim && dim <= 32); for (dimension_t j = 0; j < dim - 1; j++) { // We multiply here because we computed 1 / int_all[j] before. theta[j] = solve_inverse_int_of_power_sin(theta[j] * int_all[(dim - 2) - j], @@ -573,8 +571,7 @@ static void compute_hua_wang_direction(double * restrict direction, dimension_t dim, const long double * restrict theta) { - ASSUME(dim >= 2); - ASSUME(dim <= 32); + ASSUME(2 <= dim && dim <= 32); dimension_t k, j; direction[0] = STATIC_CAST(double, sinl(theta[0])); for (k = 1; k < dim - 1; k++) diff --git a/c/libhv.mk b/c/libhv.mk index fbb0cde17..faa368ab9 100644 --- a/c/libhv.mk +++ b/c/libhv.mk @@ -1,6 +1,6 @@ # -*- Makefile-gmake -*- LIBHV_SRCS = hv.c hv3dplus.c hv4d.c hvc3d.c hv_contrib.c -LIBHV_HDRS = hv.h hv_priv.h hv3d_priv.h libmoocore-config.h +LIBHV_HDRS = hv.h hv_priv.h hv3d_priv.h hv4d_priv.h libmoocore-config.h LIBHV_OBJS = $(LIBHV_SRCS:.c=.o) HV_LIB = fpli_hv.a diff --git a/c/libmoocore-config.h b/c/libmoocore-config.h index 22a9a495c..c64b3c86c 100644 --- a/c/libmoocore-config.h +++ b/c/libmoocore-config.h @@ -2,7 +2,7 @@ # define LIBMOOCORE_CONFIG_H_ #ifndef R_PACKAGE -# if defined(_WIN32) && !(defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__)) +# if (defined(_WIN32) || defined(__CYGWIN__)) && !(defined(__MINGW__) || defined(__MINGW32__) || defined(__MINGW64__)) # ifndef MOOCORE_STATIC_LIB # ifdef MOOCORE_SHARED_LIB # define MOOCORE_API extern __declspec(dllexport) diff --git a/c/mt19937/mt19937.c b/c/mt19937/mt19937.c index 0ca524ffc..8da3db733 100644 --- a/c/mt19937/mt19937.c +++ b/c/mt19937/mt19937.c @@ -15,104 +15,6 @@ void mt19937_seed(mt19937_state *state, uint32_t seed) { state->pos = RK_STATE_LEN; } -/* initializes mt[RK_STATE_LEN] with a seed */ -static void init_genrand(mt19937_state *state, uint32_t s) { - int mti; - uint32_t *mt = state->key; - - mt[0] = s & 0xffffffffUL; - for (mti = 1; mti < RK_STATE_LEN; mti++) { - /* - * See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. - * In the previous versions, MSBs of the seed affect - * only MSBs of the array mt[]. - * 2002/01/09 modified by Makoto Matsumoto - */ - /* This code triggers conversions between uint32_t and unsigned long, - which various compilers warn about. */ -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wconversion" -#if defined(__clang__) -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wimplicit-int-conversion" -#endif - mt[mti] = (1812433253UL * (mt[mti - 1] ^ (mt[mti - 1] >> 30)) + mti); -# pragma GCC diagnostic pop -#if defined(__clang__) -# pragma clang diagnostic pop -#endif - /* for > 32 bit machines */ - mt[mti] &= 0xffffffffUL; - } - state->pos = mti; - return; -} - -/* - * initialize by an array with array-length - * init_key is the array for initializing keys - * key_length is its length - */ -void mt19937_init_by_array(mt19937_state *state, uint32_t *init_key, - int key_length) { - /* was signed in the original code. RDH 12/16/2002 */ - int i = 1; - int j = 0; - uint32_t *mt = state->key; - int k; - - init_genrand(state, 19650218UL); - k = (RK_STATE_LEN > key_length ? RK_STATE_LEN : key_length); - for (; k; k--) { -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wconversion" -#if defined(__clang__) -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wimplicit-int-conversion" -#endif - /* non linear */ - mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1664525UL)) + init_key[j] + j; -# pragma GCC diagnostic pop -#if defined(__clang__) -# pragma clang diagnostic pop -#endif - /* for > 32 bit machines */ - mt[i] &= 0xffffffffUL; - i++; - j++; - if (i >= RK_STATE_LEN) { - mt[0] = mt[RK_STATE_LEN - 1]; - i = 1; - } - if (j >= key_length) { - j = 0; - } - } - for (k = RK_STATE_LEN - 1; k; k--) { - /* This code triggers conversions between uint32_t and unsigned long, - which various compilers warn about. */ -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Wconversion" -#if defined(__clang__) -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wimplicit-int-conversion" -#endif - mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1566083941UL)) - i; /* non linear */ -# pragma GCC diagnostic pop -#if defined(__clang__) -# pragma clang diagnostic pop -#endif - mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */ - i++; - if (i >= RK_STATE_LEN) { - mt[0] = mt[RK_STATE_LEN - 1]; - i = 1; - } - } - - mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */ -} - void mt19937_gen(mt19937_state *state) { uint32_t y; int i; diff --git a/c/mt19937/mt19937.h b/c/mt19937/mt19937.h index 368fd799a..16c09c230 100644 --- a/c/mt19937/mt19937.h +++ b/c/mt19937/mt19937.h @@ -43,9 +43,6 @@ static inline uint32_t mt19937_next(mt19937_state *state) { return y; } -extern void mt19937_init_by_array(mt19937_state *state, uint32_t *init_key, - int key_length); - static inline uint64_t mt19937_next64(mt19937_state *state) { return (uint64_t)mt19937_next(state) << 32 | mt19937_next(state); } diff --git a/c/sort.h b/c/sort.h index 08488b8f9..f88846988 100644 --- a/c/sort.h +++ b/c/sort.h @@ -44,6 +44,12 @@ lexicographic_less_3d(const double * restrict a, const double * restrict b) return a[2] < b[2] || (a[2] == b[2] && (a[1] < b[1] || (a[1] == b[1] && a[0] <= b[0]))); } +static inline bool +lexicographic_less_2d(const double * restrict a, const double * restrict b) +{ + return (a[1] < b[1] || (a[1] == b[1] && a[0] <= b[0])); +} + static inline bool all_equal_double(const double * restrict a, const double * restrict b, dimension_t dim) { diff --git a/c/whv.c b/c/whv.c index 029d9a0c7..7859ed3f6 100644 --- a/c/whv.c +++ b/c/whv.c @@ -75,12 +75,12 @@ double rect_weighted_hv2d(double *data, int n, double * rectangles, int rectangles_nrow, const double * reference) { -#define print_point(k, p, r, rect) \ - DEBUG2_PRINT("%d: p[%lu] = (%16.15g, %16.15g)" \ +#define debug_print_point(k, p, r, rect) \ + DEBUG2_PRINT("%d: p[%lu] = (%16.15g, %16.15g)" \ "\trectangle[%lu] = (%16.15g, %16.15g, %16.15g, %16.15g)\n", \ __LINE__, (unsigned long) k, p[0], p[1], (unsigned long) r, rect[0], rect[1], rect[2], rect[3]) -#define print_rect(r, rect) \ +#define debug_print_rect(r, rect) \ DEBUG2_PRINT("%d: rectangle[%lu] = (%16.15g, %16.15g, %16.15g, %16.15g, %16.15g)\n", \ __LINE__, (unsigned long) r, rect[0], rect[1], rect[2], rect[3], rect[4]) @@ -90,10 +90,10 @@ rect_weighted_hv2d(double *data, int n, double * rectangles, rect = rectangles + (ROW) * (nobj * 2 + 1); \ lower0= rect[0]; lower1= rect[1]; upper0= rect[2]; upper1= rect[3]; \ color = rect[4]; \ - print_rect(ROW, rect); \ - assert(lower0 < upper0); \ - assert(lower1 < upper1); \ - assert(color >= 0); \ + debug_print_rect(ROW, rect); \ + assert(lower0 < upper0); \ + assert(lower1 < upper1); \ + assert(color >= 0); \ } while(0) #define next_point() do { \ @@ -102,8 +102,9 @@ rect_weighted_hv2d(double *data, int n, double * rectangles, if (pk >= n || top == last_top || p[0] >= last_right) \ goto return_whv; \ p += nobj; \ - print_point(pk, p, r, rect); \ + debug_print_point(pk, p, r, rect); \ } while(0) + // We cannot use %zu for size_t because of MingW compiler. DEBUG2_PRINT("n = %lu\trectangles = %lu\n", (unsigned long)n, (unsigned long)rectangles_nrow); if (rectangles_nrow <= 0 || n <= 0) return 0; @@ -128,7 +129,7 @@ rect_weighted_hv2d(double *data, int n, double * rectangles, const double *p = data; int pk = 0; - print_point(pk, p, r, rect); + debug_print_point(pk, p, r, rect); double top = upper1; // lowest_upper1; const double last_top = rectangles[rectangles_nrow * (nobj * 2 + 1) - 2]; diff --git a/python/doc/source/whatsnew/index.rst b/python/doc/source/whatsnew/index.rst index 293ea7de0..9dd8b7b32 100644 --- a/python/doc/source/whatsnew/index.rst +++ b/python/doc/source/whatsnew/index.rst @@ -21,7 +21,7 @@ Version 0.1.9 (31/10/2025) - :func:`~moocore.is_nondominated` and :func:`~moocore.filter_dominated` are faster for dimensions larger than 3. - ``moocore`` wheels are now built for ``aarch64`` (ARM64) in Linux and Windows. See the `installation instructions `_. -- :func:`~moocore.is_nondominated` and :func:`moocore.filter_dominated` are now stable in 2D and 3D with ``!keep_weakly``, that is, only the first of duplicated points is marked as nondominated. +- :func:`~moocore.is_nondominated` and :func:`~moocore.filter_dominated` are now stable in 2D and 3D with ``!keep_weakly``, that is, only the first of duplicated points is marked as nondominated. Version 0.1.8 (15/07/2025) diff --git a/python/pyproject.toml b/python/pyproject.toml index 6997bb6b1..da1d4b9f0 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -8,7 +8,7 @@ requires = [ [project] name = "moocore" -version = "0.1.9" +version = "0.2.0.dev0" description = "Core Algorithms for Multi-Objective Optimization" readme = "README.md" keywords = [ @@ -33,6 +33,7 @@ classifiers = [ "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", ] dependencies = [ "cffi>=1.17.1", diff --git a/python/src/moocore/_moocore.py b/python/src/moocore/_moocore.py index 578be70bc..b5119adb9 100644 --- a/python/src/moocore/_moocore.py +++ b/python/src/moocore/_moocore.py @@ -23,6 +23,7 @@ np1d_to_int_array, atleast_1d_of_length_n, is_integer_value, + _get_seed_for_c, ) ## The CFFI library is used to create C bindings. @@ -431,11 +432,12 @@ def hypervolume( for speed. For 5D or higher, it uses a recursive algorithm - :footcite:p:`FonPaqLop06:hypervolume` with HV4D\ :sup:`+` as the base case, - resulting in a :math:`O(n^{d-2})` time complexity and :math:`O(n)` space - complexity in the worst-case, where :math:`d` is the dimension of the - points. Experimental results show that the pruning techniques used may - reduce the time complexity even further. The original proposal + :footcite:p:`FonPaqLop06:hypervolume` that computes 4D contributions + :footcite:p:`GueFon2017hv4d` as the base case, resulting in a + :math:`O(n^{d-2})` time complexity and :math:`O(n)` space complexity in the + worst-case, where :math:`d` is the dimension of the points. Experimental + results show that the pruning techniques used may reduce the time + complexity even further. The original proposal :footcite:p:`FonPaqLop06:hypervolume` had the HV3D algorithm as the base case, giving a time complexity of :math:`O(n^{d-2} \log n)`. Andreia P. Guerreiro enhanced the numerical stability of the recursive algorithm by @@ -938,11 +940,7 @@ def hv_approx( raise ValueError(f"nsamples must be an integer value: {nsamples}") nsamples = ffi.cast("uint_fast32_t", nsamples) if method == "DZ2019-MC": - if not is_integer_value(seed): - seed = np.random.default_rng(seed).integers( - 2**32 - 2, dtype=np.uint32 - ) - seed = ffi.cast("uint32_t", seed) + seed = _get_seed_for_c(seed) hv = lib.hv_approx_normal( data_p, nobj, npoints, ref, maximise, nsamples, seed ) @@ -2545,14 +2543,11 @@ def whv_hype( ref[maximise] = -ref[maximise] ideal[maximise] = -ideal[maximise] - if not is_integer_value(seed): - seed = np.random.default_rng(seed).integers(2**32 - 2, dtype=np.uint32) - data_p, npoints, nobj = np2d_to_double_array(data) ref = ffi.from_buffer("double []", ref) ideal = ffi.from_buffer("double []", ideal) # FIXME: Check ranges. - seed = ffi.cast("uint32_t", seed) + seed = _get_seed_for_c(seed) nsamples = ffi.cast("int", nsamples) if dist == "uniform": diff --git a/python/src/moocore/_utils.py b/python/src/moocore/_utils.py index 26268e744..f7f403984 100644 --- a/python/src/moocore/_utils.py +++ b/python/src/moocore/_utils.py @@ -71,3 +71,9 @@ def is_integer_value(n): return False except TypeError: return False + + +def _get_seed_for_c(seed): + if not is_integer_value(seed): + seed = np.random.default_rng(seed).integers(2**32 - 2, dtype=np.uint32) + return ffi.cast("uint32_t", seed) diff --git a/python/tests/test_moocore.py b/python/tests/test_moocore.py index 6e8463536..92d5f634f 100644 --- a/python/tests/test_moocore.py +++ b/python/tests/test_moocore.py @@ -372,19 +372,33 @@ def test_hv_approx(dim): np.testing.assert_approx_equal(true_hv, appr_hv, significant=signif) +def test_hv_approx_default_seed(): + x = np.full((1, 5), 0.5) + ref = np.full(5, 0.0) + true_hv = moocore.hypervolume(x, ref=ref, maximise=True) + appr_hv = moocore.hv_approx( + x, ref=ref, method="DZ2019-MC", seed=None, maximise=True + ) + ## Precision goes down significantly with higher dimensions. + signif = 2 + np.testing.assert_approx_equal(true_hv, appr_hv, significant=signif) + + def check_hvc(points, ref, err_msg): hvc = moocore.hv_contributions(points, ref=ref) - nondom = moocore.is_nondominated(points, keep_weakly=True) - points = points[nondom, :] - hv_total = moocore.hypervolume(points, ref=ref) - true_hvc = np.zeros(len(nondom)) - true_hvc[nondom] = hv_total - np.array( + is_nondom = moocore.is_nondominated(points, keep_weakly=True) + nondom = points[is_nondom, :] + hv_total = moocore.hypervolume(nondom, ref=ref) + true_hvc = np.zeros(len(is_nondom)) + true_hvc[is_nondom] = hv_total - np.array( [ - moocore.hypervolume(np.delete(points, i, axis=0), ref=ref) - for i in range(len(points)) + moocore.hypervolume(np.delete(nondom, i, axis=0), ref=ref) + for i in range(len(nondom)) ] ) assert_allclose(true_hvc, hvc, err_msg=err_msg) + hvc = moocore.hv_contributions(-points, ref=-ref, maximise=True) + assert_allclose(true_hvc, hvc, err_msg=err_msg) @pytest.mark.parametrize("dim", range(2, 5)) diff --git a/r/DESCRIPTION b/r/DESCRIPTION index 42d68c92c..31bb24d66 100644 --- a/r/DESCRIPTION +++ b/r/DESCRIPTION @@ -1,7 +1,7 @@ Package: moocore Type: Package Title: Core Mathematical Functions for Multi-Objective Optimization -Version: 0.1.9 +Version: 0.1.9.900 Authors@R: c(person("Manuel", "López-Ibáñez", role = c("aut", "cre"), email = "manuel.lopez-ibanez@manchester.ac.uk", comment = c(ORCID = "0000-0001-9974-1295")), @@ -15,8 +15,8 @@ Authors@R: c(person("Manuel", "López-Ibáñez", role = c("aut", "cre"), person("Jean-Sebastien", "Roy", role = "cph", comment = "mt19937 library"), person("Makoto", "Matsumoto", role = "cph", comment = "mt19937 library"), person("Takuji", "Nishimura", role = "cph", comment = "mt19937 library")) -Description: Fast implementation of mathematical operations and performance metrics for multi-objective optimization, including filtering and ranking of dominated vectors according to Pareto optimality, computation of the empirical attainment function, V.G. da Fonseca, C.M. Fonseca, A.O. Hall (2001) , hypervolume metric, C.M. Fonseca, L. Paquete, M. López-Ibáñez (2006) , epsilon indicator, inverted generational distance, and Vorob'ev threshold, expectation and deviation, M. Binois, D. Ginsbourger, O. Roustant (2015) , among others. -Depends: R (>= 4.0) +Description: Fast implementations of mathematical operations and performance metrics for multi-objective optimization, including filtering and ranking of dominated vectors according to Pareto optimality, hypervolume metric, C.M. Fonseca, L. Paquete, M. López-Ibáñez (2006) , epsilon indicator, inverted generational distance, computation of the empirical attainment function, V.G. da Fonseca, C.M. Fonseca, A.O. Hall (2001) , and Vorob'ev threshold, expectation and deviation, M. Binois, D. Ginsbourger, O. Roustant (2015) , among others. +Depends: R (>= 4.1) Imports: matrixStats, Rdpack diff --git a/r/R/hv.R b/r/R/hv.R index ae293d35e..bb3f22eb3 100644 --- a/r/R/hv.R +++ b/r/R/hv.R @@ -54,15 +54,15 @@ #' speed. #' #' For 5D or higher, it uses a recursive algorithm -#' \citep{FonPaqLop06:hypervolume} with \eqn{\text{HV4D}^{+}} as the base case, -#' resulting in a \eqn{O(n^{d-2})} time complexity and \eqn{O(n)} space -#' complexity in the worst-case, where \eqn{d} is the dimension of the points. -#' Experimental results show that the pruning techniques used may reduce the -#' time complexity even further. The original proposal -#' \citep{FonPaqLop06:hypervolume} had the HV3D algorithm as the base case, -#' giving a time complexity of \eqn{O(n^{d-2} \log n)}. Andreia P. Guerreiro -#' enhanced the numerical stability of the algorithm by avoiding floating-point -#' comparisons of partial hypervolumes. +#' \citep{FonPaqLop06:hypervolume} that computes 4D contributions +#' \citep{GueFon2017hv4d} as the base case, resulting in a \eqn{O(n^{d-2})} +#' time complexity and \eqn{O(n)} space complexity in the worst-case, where +#' \eqn{d} is the dimension of the points. Experimental results show that the +#' pruning techniques used may reduce the time complexity even further. The +#' original proposal \citep{FonPaqLop06:hypervolume} had the HV3D algorithm as +#' the base case, giving a time complexity of \eqn{O(n^{d-2} \log n)}. Andreia +#' P. Guerreiro enhanced the numerical stability of the algorithm by avoiding +#' floating-point comparisons of partial hypervolumes. #' #' #' @references diff --git a/r/man/hypervolume.Rd b/r/man/hypervolume.Rd index 870eb6cba..5bffa9fbe 100644 --- a/r/man/hypervolume.Rd +++ b/r/man/hypervolume.Rd @@ -65,15 +65,15 @@ correctly handles weakly dominated points and has been further optimized for speed. For 5D or higher, it uses a recursive algorithm -\citep{FonPaqLop06:hypervolume} with \eqn{\text{HV4D}^{+}} as the base case, -resulting in a \eqn{O(n^{d-2})} time complexity and \eqn{O(n)} space -complexity in the worst-case, where \eqn{d} is the dimension of the points. -Experimental results show that the pruning techniques used may reduce the -time complexity even further. The original proposal -\citep{FonPaqLop06:hypervolume} had the HV3D algorithm as the base case, -giving a time complexity of \eqn{O(n^{d-2} \log n)}. Andreia P. Guerreiro -enhanced the numerical stability of the algorithm by avoiding floating-point -comparisons of partial hypervolumes. +\citep{FonPaqLop06:hypervolume} that computes 4D contributions +\citep{GueFon2017hv4d} as the base case, resulting in a \eqn{O(n^{d-2})} +time complexity and \eqn{O(n)} space complexity in the worst-case, where +\eqn{d} is the dimension of the points. Experimental results show that the +pruning techniques used may reduce the time complexity even further. The +original proposal \citep{FonPaqLop06:hypervolume} had the HV3D algorithm as +the base case, giving a time complexity of \eqn{O(n^{d-2} \log n)}. Andreia +P. Guerreiro enhanced the numerical stability of the algorithm by avoiding +floating-point comparisons of partial hypervolumes. } \examples{ diff --git a/r/man/moocore-package.Rd b/r/man/moocore-package.Rd index fa915ec5c..417dca2c0 100644 --- a/r/man/moocore-package.Rd +++ b/r/man/moocore-package.Rd @@ -6,7 +6,7 @@ \alias{moocore-package} \title{moocore: Core Mathematical Functions for Multi-Objective Optimization} \description{ -Fast implementation of mathematical operations and performance metrics for multi-objective optimization, including filtering and ranking of dominated vectors according to Pareto optimality, computation of the empirical attainment function, V.G. da Fonseca, C.M. Fonseca, A.O. Hall (2001) \doi{10.1007/3-540-44719-9_15}, hypervolume metric, C.M. Fonseca, L. Paquete, M. López-Ibáñez (2006) \doi{10.1109/CEC.2006.1688440}, epsilon indicator, inverted generational distance, and Vorob'ev threshold, expectation and deviation, M. Binois, D. Ginsbourger, O. Roustant (2015) \doi{10.1016/j.ejor.2014.07.032}, among others. +Fast implementations of mathematical operations and performance metrics for multi-objective optimization, including filtering and ranking of dominated vectors according to Pareto optimality, hypervolume metric, C.M. Fonseca, L. Paquete, M. López-Ibáñez (2006) \doi{10.1109/CEC.2006.1688440}, epsilon indicator, inverted generational distance, computation of the empirical attainment function, V.G. da Fonseca, C.M. Fonseca, A.O. Hall (2001) \doi{10.1007/3-540-44719-9_15}, and Vorob'ev threshold, expectation and deviation, M. Binois, D. Ginsbourger, O. Roustant (2015) \doi{10.1016/j.ejor.2014.07.032}, among others. } \seealso{ Useful links: diff --git a/release.py b/release.py index a2ea0cc24..3850724d0 100755 --- a/release.py +++ b/release.py @@ -111,6 +111,8 @@ def main(): update_package_version( "python/pyproject.toml", "version", replace=args.dev + ".dev0" ) + print("*** If happy with the changes, then do:") + print(f"git ci -a -m 'Start development of v{version}'") else: print(f"Current version is: {version}") print("Preparing for release")