From 3639bee06b1804d811e4d29ab31b2307404d9293 Mon Sep 17 00:00:00 2001 From: Daniel Oliver Date: Wed, 13 Aug 2025 13:17:00 +0100 Subject: [PATCH 01/95] Use RetryReader when reading from Azure --- pbm/storage/azure/azure.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pbm/storage/azure/azure.go b/pbm/storage/azure/azure.go index e4e3743e0..762d1388a 100644 --- a/pbm/storage/azure/azure.go +++ b/pbm/storage/azure/azure.go @@ -302,7 +302,14 @@ func (b *Blob) SourceReader(name string) (io.ReadCloser, error) { return nil, errors.Wrap(err, "download object") } - return o.Body, nil + rr := o.NewRetryReader(context.TODO(), &azblob.RetryReaderOptions{ + EarlyCloseAsError: true, + OnFailedRead: func(failureCount int32, lastError error, rnge azblob.HTTPRange, willRetry bool) { + // failureCount is reset on each call to Read(), so repeats of "attempt 1" are expected + b.log.Debug("Read from Azure failed (attempt %d): %v, retrying: %v\n", failureCount, lastError, willRetry) + }, + }) + return rr, nil } func (b *Blob) Delete(name string) error { From d2a5ed6c2db77410c68efb811747dbf6b18fa0d7 Mon Sep 17 00:00:00 2001 From: Daniel Oliver <57630918+DanielOliverRJ@users.noreply.github.com> Date: Fri, 22 Aug 2025 15:12:29 +0100 Subject: [PATCH 02/95] Accept suggestion from Copilot Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- pbm/storage/azure/azure.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbm/storage/azure/azure.go b/pbm/storage/azure/azure.go index 762d1388a..211a3f9db 100644 --- a/pbm/storage/azure/azure.go +++ b/pbm/storage/azure/azure.go @@ -306,7 +306,7 @@ func (b *Blob) SourceReader(name string) (io.ReadCloser, error) { EarlyCloseAsError: true, OnFailedRead: func(failureCount int32, lastError error, rnge azblob.HTTPRange, willRetry bool) { // failureCount is reset on each call to Read(), so repeats of "attempt 1" are expected - b.log.Debug("Read from Azure failed (attempt %d): %v, retrying: %v\n", failureCount, lastError, willRetry) + b.log.Debug("Read from Azure failed (attempt %d): %v, retrying: %v", failureCount, lastError, willRetry) }, }) return rr, nil From 32249f97d3cb6946b95f9a06f834bd2e8adc17b7 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Sun, 3 Aug 2025 06:19:59 +0700 Subject: [PATCH 03/95] feat: initialize oss client Signed-off-by: Imre Nagi --- go.mod | 3 + go.sum | 29 + pbm/config/config.go | 2 + pbm/storage/oss/client.go | 157 + pbm/storage/oss/oss.go | 68 + pbm/storage/storage.go | 1 + pbm/util/storage.go | 3 + .../github.com/alibabacloud-go/debug/LICENSE | 201 ++ .../alibabacloud-go/debug/debug/debug.go | 58 + .../aliyun/alibabacloud-oss-go-sdk-v2/LICENSE | 201 ++ .../oss/api_op_accesspoint.go | 468 +++ .../api_op_accesspoint_publicaccessblock.go | 163 ++ .../oss/api_op_bucket.go | 1310 +++++++++ .../oss/api_op_bucket_accessmonitor.go | 106 + .../oss/api_op_bucket_archivedirectread.go | 104 + .../oss/api_op_bucket_cname.go | 351 +++ .../oss/api_op_bucket_cors.go | 250 ++ .../oss/api_op_bucket_encryption.go | 170 ++ .../oss/api_op_bucket_httpsconfig.go | 114 + .../oss/api_op_bucket_inventory.go | 318 ++ .../oss/api_op_bucket_lifecycle.go | 280 ++ .../oss/api_op_bucket_logging.go | 320 ++ .../oss/api_op_bucket_metaquery.go | 534 ++++ .../oss/api_op_bucket_objectfcaccesspoint.go | 710 +++++ .../oss/api_op_bucket_policy.go | 209 ++ .../oss/api_op_bucket_publicaccessblock.go | 154 + .../oss/api_op_bucket_redundancytransition.go | 310 ++ .../oss/api_op_bucket_referer.go | 133 + .../oss/api_op_bucket_replication.go | 469 +++ .../oss/api_op_bucket_resourcegroup.go | 104 + .../oss/api_op_bucket_style.go | 244 ++ .../oss/api_op_bucket_tags.go | 146 + .../oss/api_op_bucket_transferacceleration.go | 113 + .../oss/api_op_bucket_website.go | 280 ++ .../oss/api_op_bucket_worm.go | 273 ++ .../oss/api_op_cloud_box.go | 89 + .../oss/api_op_common.go | 12 + .../oss/api_op_object.go | 2572 +++++++++++++++++ .../oss/api_op_publicaccessblock.go | 147 + .../oss/api_op_region.go | 72 + .../oss/api_op_select_object.go | 740 +++++ .../oss/api_op_service.go | 106 + .../oss/checkpoint.go | 369 +++ .../alibabacloud-oss-go-sdk-v2/oss/client.go | 1499 ++++++++++ .../oss/client_extension.go | 164 ++ .../oss/client_paginators.go | 407 +++ .../oss/client_presign.go | 164 ++ .../alibabacloud-oss-go-sdk-v2/oss/config.go | 286 ++ .../alibabacloud-oss-go-sdk-v2/oss/copier.go | 587 ++++ .../oss/credentials/credentials.go | 47 + .../ecs_role_credentials_provider.go | 168 ++ .../environment_credentials_provider.go | 27 + .../fetcher_credentials_provider.go | 183 ++ .../process_credentials_provider.go | 168 ++ .../static_credentials_provider.go | 26 + .../oss/crypto/aes_ctr.go | 65 + .../oss/crypto/aes_ctr_cipher.go | 208 ++ .../oss/crypto/cipher.go | 69 + .../oss/crypto/crypto_const.go | 8 + .../oss/crypto/crypto_type.go | 125 + .../oss/crypto/master_rsa_cipher.go | 102 + .../oss/defaults.go | 79 + .../oss/downloader.go | 598 ++++ .../oss/encryption_client.go | 503 ++++ .../oss/endpoints.go | 62 + .../alibabacloud-oss-go-sdk-v2/oss/enums.go | 344 +++ .../alibabacloud-oss-go-sdk-v2/oss/errors.go | 170 ++ .../oss/filelike.go | 795 +++++ .../oss/from_ptr.go | 63 + .../oss/io_utils.go | 869 ++++++ .../alibabacloud-oss-go-sdk-v2/oss/limiter.go | 44 + .../alibabacloud-oss-go-sdk-v2/oss/logger.go | 130 + .../oss/progress.go | 41 + .../oss/retry/backoff.go | 79 + .../oss/retry/retryable_error.go | 103 + .../oss/retry/retryer.go | 22 + .../oss/retry/standard.go | 71 + .../oss/retry/types.go | 19 + .../oss/signer/signer.go | 51 + .../oss/signer/v1.go | 264 ++ .../oss/signer/v4.go | 390 +++ .../alibabacloud-oss-go-sdk-v2/oss/to_ptr.go | 15 + .../oss/transport/dialer.go | 88 + .../oss/transport/http.go | 177 ++ .../alibabacloud-oss-go-sdk-v2/oss/types.go | 162 ++ .../oss/uploader.go | 768 +++++ .../alibabacloud-oss-go-sdk-v2/oss/utils.go | 405 +++ .../oss/utils_copy.go | 95 + .../oss/utils_crc.go | 140 + .../oss/utils_mime.go | 595 ++++ .../oss/utils_pool.go | 248 ++ .../oss/validation.go | 84 + .../alibabacloud-oss-go-sdk-v2/oss/version.go | 34 + .../oss/xml_utils.go | 246 ++ .../github.com/aliyun/credentials-go/LICENSE | 201 ++ .../credentials/internal/http/http.go | 145 + .../credentials/internal/utils/path.go | 18 + .../credentials/internal/utils/runtime.go | 36 + .../credentials/internal/utils/utils.go | 204 ++ .../credentials/providers/cli_profile.go | 266 ++ .../credentials/providers/cloud_sso.go | 216 ++ .../credentials/providers/credentials.go | 22 + .../credentials/providers/default.go | 113 + .../credentials/providers/ecs_ram_role.go | 283 ++ .../credentials/providers/env.go | 55 + .../credentials/providers/hook.go | 7 + .../credentials/providers/oidc.go | 278 ++ .../credentials/providers/profile.go | 169 ++ .../credentials/providers/ram_role_arn.go | 375 +++ .../credentials/providers/static_ak.go | 67 + .../credentials/providers/static_sts.go | 83 + .../credentials/providers/uri.go | 152 + vendor/modules.txt | 16 + 113 files changed, 27226 insertions(+) create mode 100644 pbm/storage/oss/client.go create mode 100644 pbm/storage/oss/oss.go create mode 100644 vendor/github.com/alibabacloud-go/debug/LICENSE create mode 100644 vendor/github.com/alibabacloud-go/debug/debug/debug.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/LICENSE create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint_publicaccessblock.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_accessmonitor.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_archivedirectread.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cname.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cors.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_encryption.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_httpsconfig.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_inventory.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_lifecycle.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_logging.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_metaquery.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_objectfcaccesspoint.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_policy.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_publicaccessblock.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_redundancytransition.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_referer.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_replication.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_resourcegroup.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_style.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_tags.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_transferacceleration.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_website.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_worm.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_cloud_box.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_common.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_object.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_publicaccessblock.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_region.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_select_object.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_service.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/checkpoint.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_extension.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_paginators.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_presign.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/config.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/copier.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/credentials.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/ecs_role_credentials_provider.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/environment_credentials_provider.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/fetcher_credentials_provider.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/process_credentials_provider.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/static_credentials_provider.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr_cipher.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/cipher.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_const.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_type.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/master_rsa_cipher.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/defaults.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/downloader.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/encryption_client.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/endpoints.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/enums.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/errors.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/filelike.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/from_ptr.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/io_utils.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/limiter.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/logger.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/progress.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/backoff.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryable_error.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryer.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/standard.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/types.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/signer.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v1.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v4.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/to_ptr.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/dialer.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/http.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/types.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/uploader.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_copy.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_crc.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_mime.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_pool.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/validation.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/version.go create mode 100644 vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/xml_utils.go create mode 100644 vendor/github.com/aliyun/credentials-go/LICENSE create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/internal/http/http.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/internal/utils/path.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/internal/utils/runtime.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/internal/utils/utils.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/cli_profile.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/cloud_sso.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/credentials.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/default.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/ecs_ram_role.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/env.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/hook.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/oidc.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/profile.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/ram_role_arn.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/static_ak.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/static_sts.go create mode 100644 vendor/github.com/aliyun/credentials-go/credentials/providers/uri.go diff --git a/go.mod b/go.mod index 6dfc9fef7..0fb6cc1fb 100644 --- a/go.mod +++ b/go.mod @@ -54,6 +54,9 @@ require ( github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/alibabacloud-go/debug v1.0.1 // indirect + github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.2.3 // indirect + github.com/aliyun/credentials-go v1.4.7 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28 // indirect diff --git a/go.sum b/go.sum index c6603f569..79a1e9361 100644 --- a/go.sum +++ b/go.sum @@ -48,6 +48,14 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapp github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/alibabacloud-go/debug v1.0.0/go.mod h1:8gfgZCCAC3+SCzjWtY053FrOcd4/qlH6IHTI4QyICOc= +github.com/alibabacloud-go/debug v1.0.1 h1:MsW9SmUtbb1Fnt3ieC6NNZi6aEwrXfDksD4QA6GSbPg= +github.com/alibabacloud-go/debug v1.0.1/go.mod h1:8gfgZCCAC3+SCzjWtY053FrOcd4/qlH6IHTI4QyICOc= +github.com/alibabacloud-go/tea v1.2.2/go.mod h1:CF3vOzEMAG+bR4WOql8gc2G9H3EkH3ZLAQdpmpXMgwk= +github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.2.3 h1:LyeTJauAchnWdre3sAyterGrzaAtZ4dSNoIvDvaWfo4= +github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.2.3/go.mod h1:FTzydeQVmR24FI0D6XWUOMKckjXehM/jgMn1xC+DA9M= +github.com/aliyun/credentials-go v1.4.7 h1:T17dLqEtPUFvjDRRb5giVvLh6dFT8IcNFJJb7MeyCxw= +github.com/aliyun/credentials-go v1.4.7/go.mod h1:Jm6d+xIgwJVLVWT561vy67ZRP4lPTQxMbEYRuT2Ti1U= github.com/aws/aws-sdk-go-v2 v1.33.0 h1:Evgm4DI9imD81V0WwD+TN4DCwjUMdc94TrduMLbgZJs= github.com/aws/aws-sdk-go-v2 v1.33.0/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= @@ -191,6 +199,8 @@ github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -228,6 +238,7 @@ github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8 github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -288,6 +299,7 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -358,6 +370,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20240529005216-23cca8864a10 h1:vpzMC/iZhYFAjJzHU0Cfuq+w1vLLsF2vLkDrPjzKYck= @@ -365,6 +378,7 @@ golang.org/x/exp v0.0.0-20240529005216-23cca8864a10/go.mod h1:XtvwrStGgqGPLc4cjQ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -373,6 +387,9 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= @@ -381,6 +398,7 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -399,16 +417,23 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= @@ -418,6 +443,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -435,10 +461,13 @@ google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7E google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pbm/config/config.go b/pbm/config/config.go index f03f2213a..b4027d72a 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -26,6 +26,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/storage/azure" "github.com/percona/percona-backup-mongodb/pbm/storage/fs" "github.com/percona/percona-backup-mongodb/pbm/storage/gcs" + "github.com/percona/percona-backup-mongodb/pbm/storage/oss" "github.com/percona/percona-backup-mongodb/pbm/storage/s3" "github.com/percona/percona-backup-mongodb/pbm/topo" ) @@ -227,6 +228,7 @@ type StorageConf struct { GCS *gcs.Config `bson:"gcs,omitempty" json:"gcs,omitempty" yaml:"gcs,omitempty"` Azure *azure.Config `bson:"azure,omitempty" json:"azure,omitempty" yaml:"azure,omitempty"` Filesystem *fs.Config `bson:"filesystem,omitempty" json:"filesystem,omitempty" yaml:"filesystem,omitempty"` + OSS *oss.Config `bson:"oss,omitempty" json:"oss,omitempty" yaml:"oss,omitempty"` } func (s *StorageConf) Clone() *StorageConf { diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go new file mode 100644 index 000000000..2da86d864 --- /dev/null +++ b/pbm/storage/oss/client.go @@ -0,0 +1,157 @@ +package oss + +import ( + "context" + "fmt" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss" + osscred "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry" + "github.com/aliyun/credentials-go/credentials/providers" +) + +const ( + defaultPartSize int64 = 10 * 1024 * 1024 // 10Mb + defaultS3Region = "ap-southeast-5" + + defaultRetryBaseDelay = 30 * time.Millisecond + defaultRetryerMaxBackoff = 300 * time.Second +) + +//nolint:lll +type Config struct { + Region string `bson:"region" json:"region" yaml:"region"` + EndpointURL string `bson:"endpointUrl,omitempty" json:"endpointUrl" yaml:"endpointUrl,omitempty"` + + Bucket string `bson:"bucket" json:"bucket" yaml:"bucket"` + Prefix string `bson:"prefix,omitempty" json:"prefix,omitempty" yaml:"prefix,omitempty"` + Credentials Credentials `bson:"credentials" json:"-" yaml:"credentials"` + + Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` + + ConnectTimeout int `bson:"connectTimeout" json:"connectTimeout" yaml:"connectTimeout"` + UploadPartSize int `bson:"uploadPartSize,omitempty" json:"uploadPartSize,omitempty" yaml:"uploadPartSize,omitempty"` + MaxUploadParts int32 `bson:"maxUploadParts,omitempty" json:"maxUploadParts,omitempty" yaml:"maxUploadParts,omitempty"` +} + +type Retryer struct { + MaxAttempts int `bson:"maxAttempts" json:"maxAttempts" yaml:"maxAttempts"` + MaxBackoff time.Duration `bson:"maxBackoff" json:"maxBackoff" yaml:"maxBackoff"` + BaseDelay time.Duration `bson:"baseDelay" json:"baseDelay" yaml:"baseDelay"` +} + +type Credentials struct { + AccessKeyID string `bson:"accessKeyId" json:"accessKeyId,omitempty" yaml:"accessKeyId,omitempty"` + AccessKeySecret string `bson:"accessKeySecret" json:"accessKeySecret,omitempty" yaml:"accessKeySecret,omitempty"` + SecurityToken string `bson:"securityToken" json:"securityToken,omitempty" yaml:"securityToken,omitempty"` + RoleARN string `bson:"roleArn,omitempty" json:"roleArn,omitempty" yaml:"roleArn,omitempty"` + SessionName string `bson:"sessionName,omitempty" json:"sessionName,omitempty" yaml:"sessionName,omitempty"` +} + +func (cfg *Config) Cast() error { + if cfg.Region == "" { + cfg.Region = defaultS3Region + } + if cfg.Retryer != nil { + if cfg.Retryer.BaseDelay == 0 { + cfg.Retryer.BaseDelay = defaultRetryBaseDelay + } + if cfg.Retryer.MaxBackoff == 0 { + cfg.Retryer.MaxBackoff = defaultRetryerMaxBackoff + } + } + return nil +} + +const ( + defaultSessionExpiration = 3600 +) + +func newCred(config *Config) (*cred, error) { + var credentialsProvider providers.CredentialsProvider + var err error + + if config.Credentials.AccessKeyID == "" || config.Credentials.AccessKeySecret == "" { + return nil, fmt.Errorf("access key ID and secret are required") + } + + if config.Credentials.SecurityToken != "" { + credentialsProvider, err = providers.NewStaticSTSCredentialsProviderBuilder(). + WithAccessKeyId(config.Credentials.AccessKeyID). + WithAccessKeySecret(config.Credentials.AccessKeySecret). + WithSecurityToken(config.Credentials.SecurityToken). + Build() + } else { + credentialsProvider, err = providers.NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(config.Credentials.AccessKeyID). + WithAccessKeySecret(config.Credentials.AccessKeySecret). + Build() + } + if err != nil { + return nil, fmt.Errorf("credentials provider: %w", err) + } + + if config.Credentials.RoleARN != "" { + internalProvider := credentialsProvider + credentialsProvider, err = providers.NewRAMRoleARNCredentialsProviderBuilder(). + WithCredentialsProvider(internalProvider). + WithRoleArn(config.Credentials.RoleARN). + WithRoleSessionName(config.Credentials.SessionName). + WithDurationSeconds(defaultSessionExpiration). + Build() + if err != nil { + return nil, fmt.Errorf("ram role credential provider: %w", err) + } + } + + return &cred{ + provider: credentialsProvider, + }, nil +} + +type cred struct { + provider providers.CredentialsProvider +} + +func (c *cred) GetCredentials(ctx context.Context) (osscred.Credentials, error) { + cc, err := c.provider.GetCredentials() + if err != nil { + return osscred.Credentials{}, err + } + + return osscred.Credentials{ + AccessKeyID: cc.AccessKeyId, + AccessKeySecret: cc.AccessKeySecret, + SecurityToken: cc.SecurityToken, + }, nil +} + +func configureClient(config *Config) (*oss.Client, error) { + if config.Region == "" { + return nil, fmt.Errorf("oss region is required") + } + + cred, err := newCred(config) + if err != nil { + return nil, fmt.Errorf("create credentials: %w", err) + } + + ossConfig := oss.LoadDefaultConfig(). + WithRegion(config.Region). + WithCredentialsProvider(cred). + WithSignatureVersion(oss.SignatureVersionV4). + WithRetryMaxAttempts(config.Retryer.MaxAttempts). + WithRetryer(retry.NewStandard(func(ro *retry.RetryOptions) { + ro.MaxAttempts = config.Retryer.MaxAttempts + ro.MaxBackoff = config.Retryer.MaxBackoff + ro.BaseDelay = config.Retryer.BaseDelay + })). + WithConnectTimeout(time.Duration(config.ConnectTimeout) * time.Second) + + if config.EndpointURL != "" { + ossConfig = ossConfig.WithEndpoint(config.EndpointURL) + } + + return oss.NewClient(ossConfig), nil +} diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go new file mode 100644 index 000000000..71a99f4a8 --- /dev/null +++ b/pbm/storage/oss/oss.go @@ -0,0 +1,68 @@ +package oss + +import ( + "fmt" + "io" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss" + + "github.com/percona/percona-backup-mongodb/pbm/log" + "github.com/percona/percona-backup-mongodb/pbm/storage" +) + +var _ storage.Storage = &OSS{} + +func New(cfg *Config, node string, l log.LogEvent) (*OSS, error) { + if err := cfg.Cast(); err != nil { + return nil, fmt.Errorf("cast config: %w", err) + } + + client, err := configureClient(cfg) + if err != nil { + return nil, fmt.Errorf("configure client: %w", err) + } + + o := &OSS{ + cfg: cfg, + node: node, + log: l, + ossCli: client, + } + + return o, nil +} + +type OSS struct { + cfg *Config + node string + log log.LogEvent + ossCli *oss.Client +} + +func (o *OSS) Type() storage.Type { + return storage.OSS +} + +func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error { + return nil +} + +func (o *OSS) SourceReader(name string) (io.ReadCloser, error) { + return nil, nil +} + +func (o *OSS) FileStat(name string) (storage.FileInfo, error) { + return storage.FileInfo{}, nil +} + +func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { + return nil, nil +} + +func (o *OSS) Delete(name string) error { + return nil +} + +func (o *OSS) Copy(src, dst string) error { + return nil +} diff --git a/pbm/storage/storage.go b/pbm/storage/storage.go index 843772495..117f0c51f 100644 --- a/pbm/storage/storage.go +++ b/pbm/storage/storage.go @@ -28,6 +28,7 @@ const ( Filesystem Type = "filesystem" Blackhole Type = "blackhole" GCS Type = "gcs" + OSS Type = "oss" ) type FileInfo struct { diff --git a/pbm/util/storage.go b/pbm/util/storage.go index fd22dc8b1..987db71f3 100644 --- a/pbm/util/storage.go +++ b/pbm/util/storage.go @@ -14,6 +14,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/storage/blackhole" "github.com/percona/percona-backup-mongodb/pbm/storage/fs" "github.com/percona/percona-backup-mongodb/pbm/storage/gcs" + "github.com/percona/percona-backup-mongodb/pbm/storage/oss" "github.com/percona/percona-backup-mongodb/pbm/storage/s3" "github.com/percona/percona-backup-mongodb/pbm/version" ) @@ -35,6 +36,8 @@ func StorageFromConfig(cfg *config.StorageConf, node string, l log.LogEvent) (st return blackhole.New(), nil case storage.GCS: return gcs.New(cfg.GCS, node, l) + case storage.OSS: + return oss.New(cfg.OSS, node, l) case storage.Undefined: return nil, ErrStorageUndefined default: diff --git a/vendor/github.com/alibabacloud-go/debug/LICENSE b/vendor/github.com/alibabacloud-go/debug/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/debug/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/debug/debug/debug.go b/vendor/github.com/alibabacloud-go/debug/debug/debug.go new file mode 100644 index 000000000..c9c8b5422 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/debug/debug/debug.go @@ -0,0 +1,58 @@ +// Package debug is a library to display debug info that control by enviroment variable DEBUG +// +// # Example +// +// package main +// // import the package +// import "github.com/alibabacloud-go/debug/debug" +// +// // init a debug method +// var d = debug.Init("sdk") +// +// func main() { +// // try `go run demo.go` +// // and `DEBUG=sdk go run demo.go` +// d("this debug information just print when DEBUG environment variable was set") +// } +// +// When you run application with `DEBUG=sdk go run main.go`, it will display logs. Otherwise +// it do nothing +package debug + +import ( + "fmt" + "os" + "strings" +) + +// Debug is a method that display logs, it is useful for developer to trace program running +// details when troubleshooting +type Debug func(format string, v ...interface{}) + +var hookGetEnv = func() string { + return os.Getenv("DEBUG") +} + +var hookPrint = func(input string) { + fmt.Println(input) +} + +// Init returns a debug method that based the enviroment variable DEBUG value +func Init(flag string) Debug { + enable := false + + env := hookGetEnv() + parts := strings.Split(env, ",") + for _, part := range parts { + if part == flag { + enable = true + break + } + } + + return func(format string, v ...interface{}) { + if enable { + hookPrint(fmt.Sprintf(format, v...)) + } + } +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/LICENSE b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint.go new file mode 100644 index 000000000..f18b17b3c --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint.go @@ -0,0 +1,468 @@ +package oss + +import ( + "context" + "io" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type AccessPointVpcConfiguration struct { + // The ID of the VPC that is required only when the NetworkOrigin parameter is set to vpc. + VpcId *string `xml:"VpcId"` +} + +type CreateAccessPointConfiguration struct { + // The name of the access point. The name of the access point must meet the following naming rules:* The name must be unique in a region of your Alibaba Cloud account.* The name cannot end with -ossalias.* The name can contain only lowercase letters, digits, and hyphens (-). It cannot start or end with a hyphen (-).* The name must be 3 to 19 characters in length. + AccessPointName *string `xml:"AccessPointName"` + + // The network origin of the access point. + NetworkOrigin *string `xml:"NetworkOrigin"` + + // The container that stores the information about the VPC. + VpcConfiguration *AccessPointVpcConfiguration `xml:"VpcConfiguration"` +} + +type ListAccessPointsRequest struct { + // The maximum number of access points that can be returned. Valid values:* For user-level access points: (0,1000].* For bucket-level access points: (0,100]. + MaxKeys int64 `input:"query,max-keys"` + + // The token from which the listing operation starts. You must specify the value of NextContinuationToken that is obtained from the previous query as the value of continuation-token. + ContinuationToken *string `input:"query,continuation-token"` + + // The name of the bucket. + Bucket *string `input:"host,bucket"` + + RequestCommon +} + +type AccessPoint struct { + // The network origin of the access point. + NetworkOrigin *string `xml:"NetworkOrigin"` + + // The container that stores the information about the VPC. + VpcConfiguration *AccessPointVpcConfiguration `xml:"VpcConfiguration"` + + // The status of the access point. + Status *string `xml:"Status"` + + // The name of the bucket for which the access point is configured. + Bucket *string `xml:"Bucket"` + + // The name of the access point. + AccessPointName *string `xml:"AccessPointName"` + + // The alias of the access point. + Alias *string `xml:"Alias"` +} + +type ListAccessPointsResult struct { + // The maximum number of results set for this enumeration operation. + MaxKeys *int32 `xml:"MaxKeys"` + + // Indicates whether the returned list is truncated. Valid values: * true: indicates that not all results are returned. * false: indicates that all results are returned. + IsTruncated *bool `xml:"IsTruncated"` + + // Indicates that this ListAccessPoints request does not return all results that can be listed. You can use NextContinuationToken to continue obtaining list results. + NextContinuationToken *string `xml:"NextContinuationToken"` + + // The ID of the Alibaba Cloud account to which the access point belongs. + AccountId *string `xml:"AccountId"` + + // The container that stores the information about all access point. + AccessPoints []AccessPoint `xml:"AccessPoints>AccessPoint"` + + ResultCommon +} + +// ListAccessPoints Queries the information about user-level or bucket-level access points. +func (c *Client) ListAccessPoints(ctx context.Context, request *ListAccessPointsRequest, optFns ...func(*Options)) (*ListAccessPointsResult, error) { + var err error + if request == nil { + request = &ListAccessPointsRequest{} + } + input := &OperationInput{ + OpName: "ListAccessPoints", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPoint": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPoint"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListAccessPointsResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetAccessPointRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"header,x-oss-access-point-name,required"` + + RequestCommon +} + +type GetAccessPointResult struct { + // The ARN of the access point. + AccessPointArn *string `xml:"AccessPointArn"` + + // The alias of the access point. + Alias *string `xml:"Alias"` + + // The public endpoint of the access point. + PublicEndpoint *string `xml:"Endpoints>PublicEndpoint"` + + // The internal endpoint of the access point. + InternalEndpoint *string `xml:"Endpoints>InternalEndpoint"` + + // The time when the access point was created. + CreationDate *string `xml:"CreationDate"` + + // The name of the access point. + AccessPointName *string `xml:"AccessPointName"` + + // The name of the bucket for which the access point is configured. + Bucket *string `xml:"Bucket"` + + // The ID of the Alibaba Cloud account for which the access point is configured. + AccountId *string `xml:"AccountId"` + + // The network origin of the access point. Valid values: vpc and internet. vpc: You can only use the specified VPC ID to access the access point. internet: You can use public endpoints and internal endpoints to access the access point. + NetworkOrigin *string `xml:"NetworkOrigin"` + + // The container that stores the information about the VPC. + VpcConfiguration *AccessPointVpcConfiguration `xml:"VpcConfiguration"` + + // The status of the access point. + AccessPointStatus *string `xml:"Status"` + + // The container that stores the Block Public Access configurations. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `xml:"PublicAccessBlockConfiguration"` + + ResultCommon +} + +// GetAccessPoint Queries the information about an access point. +func (c *Client) GetAccessPoint(ctx context.Context, request *GetAccessPointRequest, optFns ...func(*Options)) (*GetAccessPointResult, error) { + var err error + if request == nil { + request = &GetAccessPointRequest{} + } + input := &OperationInput{ + OpName: "GetAccessPoint", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPoint": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"accessPoint"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetAccessPointResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetAccessPointPolicyRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"header,x-oss-access-point-name,required"` + + RequestCommon +} + +type GetAccessPointPolicyResult struct { + // The configurations of the access point policy. + Body string + + ResultCommon +} + +// GetAccessPointPolicy Queries the configurations of an access point policy. +func (c *Client) GetAccessPointPolicy(ctx context.Context, request *GetAccessPointPolicyRequest, optFns ...func(*Options)) (*GetAccessPointPolicyResult, error) { + var err error + if request == nil { + request = &GetAccessPointPolicyRequest{} + } + input := &OperationInput{ + OpName: "GetAccessPointPolicy", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointPolicy": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"accessPointPolicy"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(output.Body) + defer output.Body.Close() + if err != nil { + return nil, err + } + result := &GetAccessPointPolicyResult{ + Body: string(body), + } + + if err = c.unmarshalOutput(result, output); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteAccessPointPolicyRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"header,x-oss-access-point-name,required"` + + RequestCommon +} + +type DeleteAccessPointPolicyResult struct { + ResultCommon +} + +// DeleteAccessPointPolicy Deletes an access point policy. +func (c *Client) DeleteAccessPointPolicy(ctx context.Context, request *DeleteAccessPointPolicyRequest, optFns ...func(*Options)) (*DeleteAccessPointPolicyResult, error) { + var err error + if request == nil { + request = &DeleteAccessPointPolicyRequest{} + } + input := &OperationInput{ + OpName: "DeleteAccessPointPolicy", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointPolicy": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"accessPointPolicy"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteAccessPointPolicyResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutAccessPointPolicyRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"header,x-oss-access-point-name,required"` + + // The configurations of the access point policy. + Body io.Reader `input:"body,nop,required"` + + RequestCommon +} + +type PutAccessPointPolicyResult struct { + ResultCommon +} + +// PutAccessPointPolicy Configures an access point policy. +func (c *Client) PutAccessPointPolicy(ctx context.Context, request *PutAccessPointPolicyRequest, optFns ...func(*Options)) (*PutAccessPointPolicyResult, error) { + var err error + if request == nil { + request = &PutAccessPointPolicyRequest{} + } + input := &OperationInput{ + OpName: "PutAccessPointPolicy", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointPolicy": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"accessPointPolicy"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutAccessPointPolicyResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteAccessPointRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"header,x-oss-access-point-name,required"` + + RequestCommon +} + +type DeleteAccessPointResult struct { + ResultCommon +} + +// DeleteAccessPoint Deletes an access point. +func (c *Client) DeleteAccessPoint(ctx context.Context, request *DeleteAccessPointRequest, optFns ...func(*Options)) (*DeleteAccessPointResult, error) { + var err error + if request == nil { + request = &DeleteAccessPointRequest{} + } + input := &OperationInput{ + OpName: "DeleteAccessPoint", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPoint": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"accessPoint"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteAccessPointResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type CreateAccessPointRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The container of the request body. + CreateAccessPointConfiguration *CreateAccessPointConfiguration `input:"body,CreateAccessPointConfiguration,xml,required"` + + RequestCommon +} + +type CreateAccessPointResult struct { + // The Alibaba Cloud Resource Name (ARN) of the access point. + AccessPointArn *string `xml:"AccessPointArn"` + + // The alias of the access point. + Alias *string `xml:"Alias"` + + ResultCommon +} + +// CreateAccessPoint Creates an access point. +func (c *Client) CreateAccessPoint(ctx context.Context, request *CreateAccessPointRequest, optFns ...func(*Options)) (*CreateAccessPointResult, error) { + var err error + if request == nil { + request = &CreateAccessPointRequest{} + } + input := &OperationInput{ + OpName: "CreateAccessPoint", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPoint": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"accessPoint"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CreateAccessPointResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint_publicaccessblock.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint_publicaccessblock.go new file mode 100644 index 000000000..4fa22593a --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_accesspoint_publicaccessblock.go @@ -0,0 +1,163 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type GetAccessPointPublicAccessBlockRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"query,x-oss-access-point-name,required"` + + RequestCommon +} + +type GetAccessPointPublicAccessBlockResult struct { + // The container in which the Block Public Access configurations are stored. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `output:"body,PublicAccessBlockConfiguration,xml"` + + ResultCommon +} + +// GetAccessPointPublicAccessBlock Queries the Block Public Access configurations of an access point. +func (c *Client) GetAccessPointPublicAccessBlock(ctx context.Context, request *GetAccessPointPublicAccessBlockRequest, optFns ...func(*Options)) (*GetAccessPointPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &GetAccessPointPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "GetAccessPointPublicAccessBlock", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetAccessPointPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutAccessPointPublicAccessBlockRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"query,x-oss-access-point-name,required"` + + // The request body. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `input:"body,PublicAccessBlockConfiguration,xml,required"` + + RequestCommon +} + +type PutAccessPointPublicAccessBlockResult struct { + ResultCommon +} + +// PutAccessPointPublicAccessBlock Enables or disables Block Public Access for an access point. +func (c *Client) PutAccessPointPublicAccessBlock(ctx context.Context, request *PutAccessPointPublicAccessBlockRequest, optFns ...func(*Options)) (*PutAccessPointPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &PutAccessPointPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "PutAccessPointPublicAccessBlock", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutAccessPointPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteAccessPointPublicAccessBlockRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the access point. + AccessPointName *string `input:"query,x-oss-access-point-name,required"` + + RequestCommon +} + +type DeleteAccessPointPublicAccessBlockResult struct { + ResultCommon +} + +// DeleteAccessPointPublicAccessBlock Deletes the Block Public Access configurations of an access point. +func (c *Client) DeleteAccessPointPublicAccessBlock(ctx context.Context, request *DeleteAccessPointPublicAccessBlockRequest, optFns ...func(*Options)) (*DeleteAccessPointPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &DeleteAccessPointPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "DeleteAccessPointPublicAccessBlock", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteAccessPointPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket.go new file mode 100644 index 000000000..9c73d7ef5 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket.go @@ -0,0 +1,1310 @@ +package oss + +import ( + "context" + "encoding/xml" + "net/url" + "strings" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type PutBucketRequest struct { + // The name of the bucket to create. + Bucket *string `input:"host,bucket,required"` + + // The access control list (ACL) of the bucket. + Acl BucketACLType `input:"header,x-oss-acl"` + + // The ID of the resource group. + ResourceGroupId *string `input:"header,x-oss-resource-group-id"` + + // The configuration information for the bucket. + CreateBucketConfiguration *CreateBucketConfiguration `input:"body,CreateBucketConfiguration,xml"` + + RequestCommon +} + +type CreateBucketConfiguration struct { + XMLName xml.Name `xml:"CreateBucketConfiguration"` + + // The storage class of the bucket. + StorageClass StorageClassType `xml:"StorageClass,omitempty"` + + // The redundancy type of the bucket. + DataRedundancyType DataRedundancyType `xml:"DataRedundancyType,omitempty"` +} + +type PutBucketResult struct { + ResultCommon +} + +// PutBucket Creates a bucket. +func (c *Client) PutBucket(ctx context.Context, request *PutBucketRequest, optFns ...func(*Options)) (*PutBucketResult, error) { + var err error + if request == nil { + request = &PutBucketRequest{} + } + input := &OperationInput{ + OpName: "PutBucket", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketResult{} + + if err = c.unmarshalOutput(result, output, discardBody); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteBucketRequest struct { + // The name of the bucket to delete. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketResult struct { + ResultCommon +} + +// DeleteBucket Deletes a bucket. +func (c *Client) DeleteBucket(ctx context.Context, request *DeleteBucketRequest, optFns ...func(*Options)) (*DeleteBucketResult, error) { + var err error + if request == nil { + request = &DeleteBucketRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucket", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketResult{} + if err = c.unmarshalOutput(result, output, discardBody); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListObjectsRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + // The character that is used to group objects by name. If you specify the delimiter parameter in the request, + // the response contains the CommonPrefixes parameter. The objects whose names contain the same string from + // the prefix to the next occurrence of the delimiter are grouped as a single result element in CommonPrefixes. + Delimiter *string `input:"query,delimiter"` + + // The encoding type of the content in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // The name of the object after which the ListObjects (GetBucket) operation starts. + // If this parameter is specified, objects whose names are alphabetically greater than the marker value are returned. + Marker *string `input:"query,marker"` + + // The maximum number of objects that you want to return. If the list operation cannot be complete at a time + // because the max-keys parameter is specified, the NextMarker element is included in the response as the marker + // for the next list operation. + MaxKeys int32 `input:"query,max-keys"` + + // The prefix that the names of the returned objects must contain. + Prefix *string `input:"query,prefix"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type ListObjectsResult struct { + // The name of the bucket. + Name *string `xml:"Name"` + + // The prefix contained in the returned object names. + Prefix *string `xml:"Prefix"` + + // The name of the object after which the list operation begins. + Marker *string `xml:"Marker"` + + // The maximum number of returned objects in the response. + MaxKeys int32 `xml:"MaxKeys"` + + // The character that is used to group objects by name. + Delimiter *string `xml:"Delimiter"` + + // Indicates whether the returned results are truncated. + // true indicates that not all results are returned this time. + // false indicates that all results are returned this time. + IsTruncated bool `xml:"IsTruncated"` + + // The position from which the next list operation starts. + NextMarker *string `xml:"NextMarker"` + + // The encoding type of the content in the response. + EncodingType *string `xml:"EncodingType"` + + // The container that stores the metadata of the returned objects. + Contents []ObjectProperties `xml:"Contents"` + + // If the Delimiter parameter is specified in the request, the response contains the CommonPrefixes element. + CommonPrefixes []CommonPrefix `xml:"CommonPrefixes"` + + ResultCommon +} + +type ObjectProperties struct { + // The name of the object. + Key *string `xml:"Key"` + + // The type of the object. Valid values: Normal, Multipart and Appendable + Type *string `xml:"Type"` + + // The size of the returned object. Unit: bytes. + Size int64 `xml:"Size"` + + // The entity tag (ETag). An ETag is created when an object is created to identify the content of the object. + ETag *string `xml:"ETag"` + + // The time when the returned objects were last modified. + LastModified *time.Time `xml:"LastModified"` + + // The storage class of the object. + StorageClass *string `xml:"StorageClass"` + + // The container that stores information about the bucket owner. + Owner *Owner `xml:"Owner"` + + // The restoration status of the object. + RestoreInfo *string `xml:"RestoreInfo"` + + // The time when the storage class of the object is converted to Cold Archive or Deep Cold Archive based on lifecycle rules. + TransitionTime *time.Time `xml:"TransitionTime"` +} + +type Owner struct { + // The ID of the bucket owner. + ID *string `xml:"ID"` + + // The name of the object owner. + DisplayName *string `xml:"DisplayName"` +} + +type CommonPrefix struct { + // The prefix contained in the returned object names. + Prefix *string `xml:"Prefix"` +} + +// ListObjects Lists the information about all objects in a bucket. +func (c *Client) ListObjects(ctx context.Context, request *ListObjectsRequest, optFns ...func(*Options)) (*ListObjectsResult, error) { + var err error + if request == nil { + request = &ListObjectsRequest{} + } + input := &OperationInput{ + OpName: "ListObjects", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "encoding-type": "url", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5, enableNonStream); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListObjectsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalEncodeType); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +func unmarshalEncodeType(result any, output *OperationOutput) error { + switch r := result.(type) { + case *ListObjectsResult: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + fields := []**string{&r.Prefix, &r.Marker, &r.Delimiter, &r.NextMarker} + var s string + var err error + for _, pp := range fields { + if pp != nil && *pp != nil { + if s, err = url.QueryUnescape(**pp); err != nil { + return err + } + *pp = Ptr(s) + } + } + for i := 0; i < len(r.Contents); i++ { + if r.Contents[i].Key != nil { + if *r.Contents[i].Key, err = url.QueryUnescape(*r.Contents[i].Key); err != nil { + return err + } + } + + } + for i := 0; i < len(r.CommonPrefixes); i++ { + if r.CommonPrefixes[i].Prefix != nil { + if *r.CommonPrefixes[i].Prefix, err = url.QueryUnescape(*r.CommonPrefixes[i].Prefix); err != nil { + return err + } + } + } + } + case *ListObjectsV2Result: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + fields := []**string{&r.Prefix, &r.StartAfter, &r.Delimiter, &r.ContinuationToken, &r.NextContinuationToken} + var s string + var err error + for _, pp := range fields { + if pp != nil && *pp != nil { + if s, err = url.QueryUnescape(**pp); err != nil { + return err + } + *pp = Ptr(s) + } + } + for i := 0; i < len(r.Contents); i++ { + if r.Contents[i].Key != nil { + if *r.Contents[i].Key, err = url.QueryUnescape(*r.Contents[i].Key); err != nil { + return err + } + } + } + for i := 0; i < len(r.CommonPrefixes); i++ { + if r.CommonPrefixes[i].Prefix != nil { + if *r.CommonPrefixes[i].Prefix, err = url.QueryUnescape(*r.CommonPrefixes[i].Prefix); err != nil { + return err + } + } + } + } + case *DeleteMultipleObjectsResult: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + var err error + for i := 0; i < len(r.DeletedObjects); i++ { + if r.DeletedObjects[i].Key != nil { + if *r.DeletedObjects[i].Key, err = url.QueryUnescape(*r.DeletedObjects[i].Key); err != nil { + return err + } + } + } + } + case *InitiateMultipartUploadResult: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + var err error + if r.Key != nil { + if *r.Key, err = url.QueryUnescape(*r.Key); err != nil { + return err + } + } + } + case *CompleteMultipartUploadResult: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + var err error + if r.Key != nil { + if *r.Key, err = url.QueryUnescape(*r.Key); err != nil { + return err + } + } + } + case *ListMultipartUploadsResult: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + fields := []**string{&r.KeyMarker, &r.NextKeyMarker, &r.Prefix, &r.Delimiter} + var s string + var err error + for _, pp := range fields { + if pp != nil && *pp != nil { + if s, err = url.QueryUnescape(**pp); err != nil { + return err + } + *pp = Ptr(s) + } + } + for i := 0; i < len(r.Uploads); i++ { + if r.Uploads[i].Key != nil { + if *r.Uploads[i].Key, err = url.QueryUnescape(*r.Uploads[i].Key); err != nil { + return err + } + } + } + } + case *ListPartsResult: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + fields := []**string{&r.Key} + var s string + var err error + for _, pp := range fields { + if pp != nil && *pp != nil { + if s, err = url.QueryUnescape(**pp); err != nil { + return err + } + *pp = Ptr(s) + } + } + } + case *ListObjectVersionsResult: + if r.EncodingType != nil && strings.EqualFold(*r.EncodingType, "url") { + fields := []**string{&r.Prefix, &r.KeyMarker, &r.Delimiter, &r.NextKeyMarker} + var s string + var err error + for _, pp := range fields { + if pp != nil && *pp != nil { + if s, err = url.QueryUnescape(**pp); err != nil { + return err + } + *pp = Ptr(s) + } + } + for i := 0; i < len(r.ObjectVersions); i++ { + if r.ObjectVersions[i].Key != nil { + if *r.ObjectVersions[i].Key, err = url.QueryUnescape(*r.ObjectVersions[i].Key); err != nil { + return err + } + } + + } + for i := 0; i < len(r.ObjectDeleteMarkers); i++ { + if r.ObjectDeleteMarkers[i].Key != nil { + if *r.ObjectDeleteMarkers[i].Key, err = url.QueryUnescape(*r.ObjectDeleteMarkers[i].Key); err != nil { + return err + } + } + } + for i := 0; i < len(r.ObjectVersionsDeleteMarkers); i++ { + if r.ObjectVersionsDeleteMarkers[i].Key != nil { + if *r.ObjectVersionsDeleteMarkers[i].Key, err = url.QueryUnescape(*r.ObjectVersionsDeleteMarkers[i].Key); err != nil { + return err + } + } + } + for i := 0; i < len(r.CommonPrefixes); i++ { + if r.CommonPrefixes[i].Prefix != nil { + if *r.CommonPrefixes[i].Prefix, err = url.QueryUnescape(*r.CommonPrefixes[i].Prefix); err != nil { + return err + } + } + } + } + } + return nil +} + +type ListObjectsV2Request struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + // The character that is used to group objects by name. If you specify the delimiter parameter in the request, + // the response contains the CommonPrefixes parameter. The objects whose names contain the same string from + // the prefix to the next occurrence of the delimiter are grouped as a single result element in CommonPrefixes. + Delimiter *string `input:"query,delimiter"` + + // The name of the object after which the ListObjectsV2 (GetBucketV2) operation starts. + // The objects are returned in alphabetical order of their names. The start-after parameter + // is used to list the returned objects by page. + // The value of the parameter must be less than 1,024 bytes in length. + // Even if the specified start-after value does not exist during a conditional query, + // the ListObjectsV2 (GetBucketV2) operation starts from the object whose name is alphabetically greater than the start-after value. + // By default, this parameter is left empty. + StartAfter *string `input:"query,start-after"` + + // The token from which the ListObjectsV2 (GetBucketV2) operation must start. + // You can obtain the token from the NextContinuationToken parameter in the ListObjectsV2 (GetBucketV2) response. + ContinuationToken *string `input:"query,continuation-token"` + + // The maximum number of objects that you want to return. If the list operation cannot be complete at a time + // because the max-keys parameter is specified, the NextMarker element is included in the response as the marker + // for the next list operation. + MaxKeys int32 `input:"query,max-keys"` + + // The prefix that the names of the returned objects must contain. + Prefix *string `input:"query,prefix"` + + // The encoding type of the content in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // Specifies whether to include information about the object owner in the response. + FetchOwner bool `input:"query,fetch-owner"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type ListObjectsV2Result struct { + // The name of the bucket. + Name *string `xml:"Name"` + + // The prefix contained in the returned object names. + Prefix *string `xml:"Prefix"` + + // If the StartAfter parameter is specified in the request, the response contains the StartAfter parameter. + StartAfter *string `xml:"StartAfter"` + + // The maximum number of returned objects in the response. + MaxKeys int32 `xml:"MaxKeys"` + + // The character that is used to group objects by name. + Delimiter *string `xml:"Delimiter"` + + // Indicates whether the returned results are truncated. + // true indicates that not all results are returned this time. + // false indicates that all results are returned this time. + IsTruncated bool `xml:"IsTruncated"` + + // If the ContinuationToken parameter is specified in the request, the response contains the ContinuationToken parameter. + ContinuationToken *string `xml:"ContinuationToken"` + + // The name of the object from which the next ListObjectsV2 (GetBucketV2) operation starts. + // The NextContinuationToken value is used as the ContinuationToken value to query subsequent results. + NextContinuationToken *string `xml:"NextContinuationToken"` + + // The encoding type of the content in the response. + EncodingType *string `xml:"EncodingType"` + + // The container that stores the metadata of the returned objects. + Contents []ObjectProperties `xml:"Contents"` + + // If the Delimiter parameter is specified in the request, the response contains the CommonPrefixes element. + CommonPrefixes []CommonPrefix `xml:"CommonPrefixes"` + + // The number of objects returned for this request. If Delimiter is specified, KeyCount is the sum of the values of Key and CommonPrefixes. + KeyCount int `xml:"KeyCount"` + + // The time when the storage class of the object is converted to Cold Archive or Deep Cold Archive based on lifecycle rules. + TransitionTime *time.Time `xml:"TransitionTime"` + + ResultCommon +} + +// ListObjectsV2 Queries information about all objects in a bucket. +func (c *Client) ListObjectsV2(ctx context.Context, request *ListObjectsV2Request, optFns ...func(*Options)) (*ListObjectsV2Result, error) { + var err error + if request == nil { + request = &ListObjectsV2Request{} + } + input := &OperationInput{ + OpName: "ListObjectsV2", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "list-type": "2", + "encoding-type": "url", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5, enableNonStream); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListObjectsV2Result{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalEncodeType); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketInfoRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + RequestCommon +} + +type GetBucketInfoResult struct { + // The container that stores the bucket information. + BucketInfo BucketInfo `xml:"Bucket"` + ResultCommon +} + +// BucketInfo defines Bucket information +type BucketInfo struct { + // The name of the bucket. + Name *string `xml:"Name"` + + // Indicates whether access tracking is enabled for the bucket. + AccessMonitor *string `xml:"AccessMonitor"` + + // The region in which the bucket is located. + Location *string `xml:"Location"` + + // The time when the bucket is created. The time is in UTC. + CreationDate *time.Time `xml:"CreationDate"` + + // The public endpoint that is used to access the bucket over the Internet. + ExtranetEndpoint *string `xml:"ExtranetEndpoint"` + + // The internal endpoint that is used to access the bucket from Elastic + IntranetEndpoint *string `xml:"IntranetEndpoint"` + + // The container that stores the access control list (ACL) information about the bucket. + ACL *string `xml:"AccessControlList>Grant"` + + // The disaster recovery type of the bucket. + DataRedundancyType *string `xml:"DataRedundancyType"` + + // The container that stores the information about the bucket owner. + Owner *Owner `xml:"Owner"` + + // The storage class of the bucket. + StorageClass *string `xml:"StorageClass"` + + // The ID of the resource group to which the bucket belongs. + ResourceGroupId *string `xml:"ResourceGroupId"` + + // The container that stores the server-side encryption method. + SseRule SSERule `xml:"ServerSideEncryptionRule"` + + // Indicates whether versioning is enabled for the bucket. + Versioning *string `xml:"Versioning"` + + // Indicates whether transfer acceleration is enabled for the bucket. + TransferAcceleration *string `xml:"TransferAcceleration"` + + // Indicates whether cross-region replication (CRR) is enabled for the bucket. + CrossRegionReplication *string `xml:"CrossRegionReplication"` + + // The container that stores the logs. + BucketPolicy BucketPolicy `xml:"BucketPolicy"` + + // The description of the bucket. + Comment *string `xml:"Comment"` + + // Indicates whether Block Public Access is enabled for the bucket. + // true: Block Public Access is enabled. false: Block Public Access is disabled. + BlockPublicAccess *bool `xml:"BlockPublicAccess"` +} + +type SSERule struct { + // The customer master key (CMK) ID in use. A valid value is returned only if you set SSEAlgorithm to KMS + // and specify the CMK ID. In other cases, an empty value is returned. + KMSMasterKeyID *string `xml:"KMSMasterKeyID"` + + // The server-side encryption method that is used by default. + SSEAlgorithm *string `xml:"SSEAlgorithm"` + + // Object's encryption algorithm. If this element is not included in the response, + // it indicates that the object is using the AES256 encryption algorithm. + // This option is only valid if the SSEAlgorithm value is KMS. + KMSDataEncryption *string `xml:"KMSDataEncryption"` +} + +type BucketPolicy struct { + // The name of the bucket that stores the logs. + LogBucket *string `xml:"LogBucket"` + + // The directory in which logs are stored. + LogPrefix *string `xml:"LogPrefix"` +} + +// GetBucketInfo Queries information about a bucket. +func (c *Client) GetBucketInfo(ctx context.Context, request *GetBucketInfoRequest, optFns ...func(*Options)) (*GetBucketInfoResult, error) { + var err error + if request == nil { + request = &GetBucketInfoRequest{} + } + input := &OperationInput{ + OpName: "GetBucketInfo", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "bucketInfo": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketInfoResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalSseRule); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +func unmarshalSseRule(result any, output *OperationOutput) error { + switch r := result.(type) { + case *GetBucketInfoResult: + fields := []*string{r.BucketInfo.SseRule.KMSMasterKeyID, r.BucketInfo.SseRule.SSEAlgorithm, r.BucketInfo.SseRule.KMSDataEncryption} + for _, pp := range fields { + if pp != nil && *pp == "None" { + *pp = "" + } + } + } + return nil +} + +type GetBucketLocationRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + RequestCommon +} + +type GetBucketLocationResult struct { + // The region in which the bucket is located. + LocationConstraint *string `xml:",chardata"` + ResultCommon +} + +// GetBucketLocation Queries the region of an Object Storage Service (OSS) bucket. +func (c *Client) GetBucketLocation(ctx context.Context, request *GetBucketLocationRequest, optFns ...func(*Options)) (*GetBucketLocationResult, error) { + var err error + if request == nil { + request = &GetBucketLocationRequest{} + } + input := &OperationInput{ + OpName: "GetBucketLocation", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "location": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketLocationResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketStatRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + RequestCommon +} + +type GetBucketStatResult struct { + // The storage capacity of the bucket. Unit: bytes. + Storage int64 `xml:"Storage"` + + // The total number of objects that are stored in the bucket. + ObjectCount int64 `xml:"ObjectCount"` + + // The number of multipart upload tasks that have been initiated but are not completed or canceled. + MultipartUploadCount int64 `xml:"MultipartUploadCount"` + + // The number of LiveChannels in the bucket. + LiveChannelCount int64 `xml:"LiveChannelCount"` + + // The time when the obtained information is last modified. The value of this element is a UNIX timestamp. Unit: seconds. + LastModifiedTime int64 `xml:"LastModifiedTime"` + + // The storage usage of Standard objects in the bucket. Unit: bytes. + StandardStorage int64 `xml:"StandardStorage"` + + // The number of Standard objects in the bucket. + StandardObjectCount int64 `xml:"StandardObjectCount"` + + // The billed storage usage of Infrequent Access (IA) objects in the bucket. Unit: bytes. + InfrequentAccessStorage int64 `xml:"InfrequentAccessStorage"` + + // The actual storage usage of IA objects in the bucket. Unit: bytes. + InfrequentAccessRealStorage int64 `xml:"InfrequentAccessRealStorage"` + + // The number of IA objects in the bucket. + InfrequentAccessObjectCount int64 `xml:"InfrequentAccessObjectCount"` + + // The billed storage usage of Archive objects in the bucket. Unit: bytes. + ArchiveStorage int64 `xml:"ArchiveStorage"` + + // The actual storage usage of Archive objects in the bucket. Unit: bytes. + ArchiveRealStorage int64 `xml:"ArchiveRealStorage"` + + // The number of Archive objects in the bucket. + ArchiveObjectCount int64 `xml:"ArchiveObjectCount"` + + // The billed storage usage of Cold Archive objects in the bucket. Unit: bytes. + ColdArchiveStorage int64 `xml:"ColdArchiveStorage"` + + // The actual storage usage of Cold Archive objects in the bucket. Unit: bytes. + ColdArchiveRealStorage int64 `xml:"ColdArchiveRealStorage"` + + // The number of Cold Archive objects in the bucket. + ColdArchiveObjectCount int64 `xml:"ColdArchiveObjectCount"` + + // The number of Deep Cold Archive objects in the bucket. + DeepColdArchiveObjectCount int64 `xml:"DeepColdArchiveObjectCount"` + + // The billed storage usage of Deep Cold Archive objects in the bucket. Unit: bytes. + DeepColdArchiveStorage int64 `xml:"DeepColdArchiveStorage"` + + // The actual storage usage of Deep Cold Archive objects in the bucket. Unit: bytes. + DeepColdArchiveRealStorage int64 `xml:"DeepColdArchiveRealStorage"` + + // The number of multipart parts in the bucket. + MultipartPartCount int64 `xml:"MultipartPartCount"` + + // The number of delete marker in the bucket. + DeleteMarkerCount int64 `xml:"DeleteMarkerCount"` + + ResultCommon +} + +// GetBucketStat Queries the storage capacity of a specified bucket and the number of objects that are stored in the bucket. +func (c *Client) GetBucketStat(ctx context.Context, request *GetBucketStatRequest, optFns ...func(*Options)) (*GetBucketStatResult, error) { + var err error + if request == nil { + request = &GetBucketStatRequest{} + } + input := &OperationInput{ + OpName: "GetBucketStat", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "stat": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketStatResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type PutBucketAclRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + // The access control list (ACL) of the object. + Acl BucketACLType `input:"header,x-oss-acl,required"` + + RequestCommon +} + +type PutBucketAclResult struct { + ResultCommon +} + +// PutBucketAcl You can call this operation to configure or modify the ACL of a bucket. +func (c *Client) PutBucketAcl(ctx context.Context, request *PutBucketAclRequest, optFns ...func(*Options)) (*PutBucketAclResult, error) { + var err error + if request == nil { + request = &PutBucketAclRequest{} + } + input := &OperationInput{ + OpName: "PutBucketAcl", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "acl": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &PutBucketAclResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketAclRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketAclResult struct { + // The container that stores the access control list (ACL) information about the bucket. + ACL *string `xml:"AccessControlList>Grant"` + + // The container that stores information about the bucket owner. + Owner *Owner `xml:"Owner"` + + ResultCommon +} + +// GetBucketAcl You can call this operation to query the ACL of a bucket. +func (c *Client) GetBucketAcl(ctx context.Context, request *GetBucketAclRequest, optFns ...func(*Options)) (*GetBucketAclResult, error) { + var err error + if request == nil { + request = &GetBucketAclRequest{} + } + input := &OperationInput{ + OpName: "GetBucketAcl", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "acl": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketAclResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type PutBucketVersioningRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + VersioningConfiguration *VersioningConfiguration `input:"body,VersioningConfiguration,xml,required"` + + RequestCommon +} + +type VersioningConfiguration struct { + // The versioning state of the bucket. Valid values: Enabled,Suspended + Status VersioningStatusType `xml:"Status"` +} + +type PutBucketVersioningResult struct { + ResultCommon +} + +// PutBucketVersioning Configures the versioning state for a bucket. +func (c *Client) PutBucketVersioning(ctx context.Context, request *PutBucketVersioningRequest, optFns ...func(*Options)) (*PutBucketVersioningResult, error) { + var err error + if request == nil { + request = &PutBucketVersioningRequest{} + } + input := &OperationInput{ + OpName: "PutBucketVersioning", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "versioning": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &PutBucketVersioningResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketVersioningRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketVersioningResult struct { + // The versioning state of the bucket. Valid values: Enabled,Suspended + VersionStatus *string `xml:"Status"` + + ResultCommon +} + +// GetBucketVersioning You can call this operation to query the versioning state of a bucket. +func (c *Client) GetBucketVersioning(ctx context.Context, request *GetBucketVersioningRequest, optFns ...func(*Options)) (*GetBucketVersioningResult, error) { + var err error + if request == nil { + request = &GetBucketVersioningRequest{} + } + input := &OperationInput{ + OpName: "GetBucketVersioning", + Method: "GET", + Parameters: map[string]string{ + "versioning": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketVersioningResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type ListObjectVersionsRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + // The character that is used to group objects by name. If you specify the delimiter parameter in the request, + // the response contains the CommonPrefixes parameter. The objects whose names contain the same string from + // the prefix to the next occurrence of the delimiter are grouped as a single result element in CommonPrefixes. + Delimiter *string `input:"query,delimiter"` + + // Specifies that objects whose names are alphabetically after the value of the key-marker parameter are returned. + // This parameter can be specified together with version-id-marker. + // By default, this parameter is left empty. + KeyMarker *string `input:"query,key-marker"` + + // Specifies that the versions created before the version specified by version-id-marker for the object + // whose name is specified by key-marker are returned by creation time in descending order. + // By default, if this parameter is not specified, the results are returned from the latest + // version of the object whose name is alphabetically after the value of key-marker. + VersionIdMarker *string `input:"query,version-id-marker"` + + // The maximum number of objects that you want to return. If the list operation cannot be complete at a time + // because the max-keys parameter is specified, the NextMarker element is included in the response as the marker + // for the next list operation. + MaxKeys int32 `input:"query,max-keys"` + + // The prefix that the names of the returned objects must contain. + Prefix *string `input:"query,prefix"` + + // The encoding type of the content in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + // To indicate that whether to stores the versions of objects and delete markers together in one container. + // When false(default), stores the versions of objects into ListObjectVersionsResult.ObjectVersions, + // When false(default), stores the delete markers into ListObjectVersionsResult.ObjectDeleteMarkers, + // When true, stores the versions and delete markers into ListObjectVersionsResult.ObjectVersionsDeleteMarkers, + IsMix bool + + RequestCommon +} + +type ListObjectVersionsResult struct { + // The name of the bucket. + Name *string `xml:"Name"` + + // Indicates the object from which the ListObjectVersions (GetBucketVersions) operation starts. + KeyMarker *string `xml:"KeyMarker"` + + // The version from which the ListObjectVersions (GetBucketVersions) operation starts. + // This parameter is used together with KeyMarker. + VersionIdMarker *string `xml:"VersionIdMarker"` + + // If not all results are returned for the request, the NextKeyMarker parameter is included + // in the response to indicate the key-marker value of the next ListObjectVersions (GetBucketVersions) request. + NextKeyMarker *string `xml:"NextKeyMarker"` + + // If not all results are returned for the request, the NextVersionIdMarker parameter is included in + // the response to indicate the version-id-marker value of the next ListObjectVersions (GetBucketVersions) request. + NextVersionIdMarker *string `xml:"NextVersionIdMarker"` + + // The container that stores delete markers. + ObjectDeleteMarkers []ObjectDeleteMarkerProperties `xml:"DeleteMarker"` + + // The container that stores the versions of objects, excluding delete markers. + ObjectVersions []ObjectVersionProperties `xml:"Version"` + + // The container that stores the versions of objects and delete markers together in the order they are returned. + // Only valid when ListObjectVersionsRequest.IsMix is set to true + ObjectVersionsDeleteMarkers []ObjectMixProperties `xml:"ObjectMix"` + + // The prefix contained in the returned object names. + Prefix *string `xml:"Prefix"` + + // The maximum number of returned objects in the response. + MaxKeys int32 `xml:"MaxKeys"` + + // The character that is used to group objects by name. + Delimiter *string `xml:"Delimiter"` + + // Indicates whether the returned results are truncated. + // true indicates that not all results are returned this time. + // false indicates that all results are returned this time. + IsTruncated bool `xml:"IsTruncated"` + + // The encoding type of the content in the response. + EncodingType *string `xml:"EncodingType"` + + // If the Delimiter parameter is specified in the request, the response contains the CommonPrefixes element. + CommonPrefixes []CommonPrefix `xml:"CommonPrefixes"` + + ResultCommon +} + +type ObjectMixProperties ObjectVersionProperties + +func (m ObjectMixProperties) IsDeleteMarker() bool { + if m.VersionId != nil && m.Type == nil { + return true + } + return false +} + +type ObjectDeleteMarkerProperties struct { + // The name of the object. + Key *string `xml:"Key"` + + // The version ID of the object. + VersionId *string `xml:"VersionId"` + + // Indicates whether the version is the current version. + IsLatest bool `xml:"IsLatest"` + + // The time when the returned objects were last modified. + LastModified *time.Time `xml:"LastModified"` + + // The container that stores information about the bucket owner. + Owner *Owner `xml:"Owner"` +} + +type ObjectVersionProperties struct { + // The name of the object. + Key *string `xml:"Key"` + + // The version ID of the object. + VersionId *string `xml:"VersionId"` + + // Indicates whether the version is the current version. + IsLatest bool `xml:"IsLatest"` + + // The time when the returned objects were last modified. + LastModified *time.Time `xml:"LastModified"` + + // The type of the returned object. + Type *string `xml:"Type"` + + // The size of the returned object. Unit: bytes. + Size int64 `xml:"Size"` + + // The entity tag (ETag) that is generated when an object is created. ETags are used to identify the content of objects. + ETag *string `xml:"ETag"` + + // The storage class of the object. + StorageClass *string `xml:"StorageClass"` + + // The container that stores information about the bucket owner. + Owner *Owner `xml:"Owner"` + + // The restoration status of the object. + RestoreInfo *string `xml:"RestoreInfo"` + + // The time when the storage class of the object is converted to Cold Archive or Deep Cold Archive based on lifecycle rules. + TransitionTime *time.Time `xml:"TransitionTime"` +} + +// ListObjectVersions Lists the versions of all objects in a bucket, including delete markers. +func (c *Client) ListObjectVersions(ctx context.Context, request *ListObjectVersionsRequest, optFns ...func(*Options)) (*ListObjectVersionsResult, error) { + var err error + if request == nil { + request = &ListObjectVersionsRequest{} + } + input := &OperationInput{ + OpName: "ListObjectVersions", + Method: "GET", + Parameters: map[string]string{ + "versions": "", + "encoding-type": "url", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5, enableNonStream); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListObjectVersionsResult{} + var unmarshalFns []func(result any, output *OperationOutput) error + if request.IsMix { + unmarshalFns = append(unmarshalFns, unmarshalBodyXmlVersions) + } else { + unmarshalFns = append(unmarshalFns, unmarshalBodyXml) + } + unmarshalFns = append(unmarshalFns, unmarshalEncodeType) + if err = c.unmarshalOutput(result, output, unmarshalFns...); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutBucketRequestPaymentRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + // The request payment configuration information for the bucket. + PaymentConfiguration *RequestPaymentConfiguration `input:"body,RequestPaymentConfiguration,xml,required"` + + RequestCommon +} + +type RequestPaymentConfiguration struct { + XMLName xml.Name `xml:"RequestPaymentConfiguration"` + + // The payer of the request and traffic fees. + Payer PayerType `xml:"Payer"` +} + +type PutBucketRequestPaymentResult struct { + ResultCommon +} + +// PutBucketRequestPayment You can call this operation to enable pay-by-requester for a bucket. +func (c *Client) PutBucketRequestPayment(ctx context.Context, request *PutBucketRequestPaymentRequest, optFns ...func(*Options)) (*PutBucketRequestPaymentResult, error) { + var err error + if request == nil { + request = &PutBucketRequestPaymentRequest{} + } + input := &OperationInput{ + OpName: "PutBucketRequestPayment", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "requestPayment": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"requestPayment"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &PutBucketRequestPaymentResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketRequestPaymentRequest struct { + // The name of the bucket containing the objects + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketRequestPaymentResult struct { + // Indicates who pays the download and request fees. + Payer *string `xml:"Payer"` + + ResultCommon +} + +// GetBucketRequestPayment You can call this operation to obtain pay-by-requester configurations for a bucket. +func (c *Client) GetBucketRequestPayment(ctx context.Context, request *GetBucketRequestPaymentRequest, optFns ...func(*Options)) (*GetBucketRequestPaymentResult, error) { + var err error + if request == nil { + request = &GetBucketRequestPaymentRequest{} + } + input := &OperationInput{ + OpName: "GetBucketRequestPayment", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + Parameters: map[string]string{ + "requestPayment": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"requestPayment"}) + if err = c.marshalInput(request, input); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketRequestPaymentResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_accessmonitor.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_accessmonitor.go new file mode 100644 index 000000000..3226aff4c --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_accessmonitor.go @@ -0,0 +1,106 @@ +package oss + +import ( + "context" +) + +type AccessMonitorConfiguration struct { + // The access tracking status of the bucket. Valid values:- Enabled: Access tracking is enabled.- Disabled: Access tracking is disabled. + Status AccessMonitorStatusType `xml:"Status"` +} + +type PutBucketAccessMonitorRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + AccessMonitorConfiguration *AccessMonitorConfiguration `input:"body,AccessMonitorConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketAccessMonitorResult struct { + ResultCommon +} + +// PutBucketAccessMonitor Modifies the access tracking status of a bucket. +func (c *Client) PutBucketAccessMonitor(ctx context.Context, request *PutBucketAccessMonitorRequest, optFns ...func(*Options)) (*PutBucketAccessMonitorResult, error) { + var err error + if request == nil { + request = &PutBucketAccessMonitorRequest{} + } + input := &OperationInput{ + OpName: "PutBucketAccessMonitor", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessmonitor": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketAccessMonitorResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketAccessMonitorRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketAccessMonitorResult struct { + // The container that stores access monitor configuration. + AccessMonitorConfiguration *AccessMonitorConfiguration `output:"body,AccessMonitorConfiguration,xml"` + + ResultCommon +} + +// GetBucketAccessMonitor Queries the access tracking status of a bucket. +func (c *Client) GetBucketAccessMonitor(ctx context.Context, request *GetBucketAccessMonitorRequest, optFns ...func(*Options)) (*GetBucketAccessMonitorResult, error) { + var err error + if request == nil { + request = &GetBucketAccessMonitorRequest{} + } + input := &OperationInput{ + OpName: "GetBucketAccessMonitor", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessmonitor": "", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketAccessMonitorResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_archivedirectread.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_archivedirectread.go new file mode 100644 index 000000000..fac2aa56e --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_archivedirectread.go @@ -0,0 +1,104 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type ArchiveDirectReadConfiguration struct { + // Specifies whether to enable real-time access of Archive objects for a bucket. Valid values:- true- false + Enabled *bool `xml:"Enabled"` +} + +type GetBucketArchiveDirectReadRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketArchiveDirectReadResult struct { + // The container that stores the configurations for real-time access of Archive objects. + ArchiveDirectReadConfiguration *ArchiveDirectReadConfiguration `output:"body,ArchiveDirectReadConfiguration,xml"` + + ResultCommon +} + +// GetBucketArchiveDirectRead Queries whether real-time access of Archive objects is enabled for a bucket. +func (c *Client) GetBucketArchiveDirectRead(ctx context.Context, request *GetBucketArchiveDirectReadRequest, optFns ...func(*Options)) (*GetBucketArchiveDirectReadResult, error) { + var err error + if request == nil { + request = &GetBucketArchiveDirectReadRequest{} + } + input := &OperationInput{ + OpName: "GetBucketArchiveDirectRead", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "bucketArchiveDirectRead": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"bucketArchiveDirectRead"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketArchiveDirectReadResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type PutBucketArchiveDirectReadRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body. + ArchiveDirectReadConfiguration *ArchiveDirectReadConfiguration `input:"body,ArchiveDirectReadConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketArchiveDirectReadResult struct { + ResultCommon +} + +// PutBucketArchiveDirectRead Enables or disables real-time access of Archive objects for a bucket. +func (c *Client) PutBucketArchiveDirectRead(ctx context.Context, request *PutBucketArchiveDirectReadRequest, optFns ...func(*Options)) (*PutBucketArchiveDirectReadResult, error) { + var err error + if request == nil { + request = &PutBucketArchiveDirectReadRequest{} + } + input := &OperationInput{ + OpName: "PutBucketArchiveDirectRead", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "bucketArchiveDirectRead": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"bucketArchiveDirectRead"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &PutBucketArchiveDirectReadResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cname.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cname.go new file mode 100644 index 000000000..20c9dad78 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cname.go @@ -0,0 +1,351 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type CertificateConfiguration struct { + // The ID of the certificate. + CertId *string `xml:"CertId"` + + // The public key of the certificate. + Certificate *string `xml:"Certificate"` + + // The private key of the certificate. + PrivateKey *string `xml:"PrivateKey"` + + // The ID of the certificate. If the Force parameter is not set to true, the OSS server checks whether the value of the Force parameter matches the current certificate ID. If the value does not match the certificate ID, an error is returned.noticeIf you do not specify the PreviousCertId parameter when you bind a certificate, you must set the Force parameter to true./notice + PreviousCertId *string `xml:"PreviousCertId"` + + // Specifies whether to overwrite the certificate. Valid values:- true: overwrites the certificate.- false: does not overwrite the certificate. + Force *bool `xml:"Force"` + + // Specifies whether to delete the certificate. Valid values:- true: deletes the certificate.- false: does not delete the certificate. + DeleteCertificate *bool `xml:"DeleteCertificate"` +} + +type BucketCnameConfiguration struct { + // The custom domain name. + Domain *string `xml:"Cname>Domain"` + + // The container for which the certificate is configured. + CertificateConfiguration *CertificateConfiguration `xml:"Cname>CertificateConfiguration"` +} + +type CnameCertificate struct { + // The time when the certificate was bound. + CreationDate *string `xml:"CreationDate"` + + // The signature of the certificate. + Fingerprint *string `xml:"Fingerprint"` + + // The time when the certificate takes effect. + ValidStartDate *string `xml:"ValidStartDate"` + + // The time when the certificate expires. + ValidEndDate *string `xml:"ValidEndDate"` + + // The source of the certificate.Valid values:* CAS * Upload + Type *string `xml:"Type"` + + // The ID of the certificate. + CertId *string `xml:"CertId"` + + // The status of the certificate.Valid values:* Enabled * Disabled + Status *string `xml:"Status"` +} + +type CnameInfo struct { + // The custom domain name. + Domain *string `xml:"Domain"` + + // The time when the custom domain name was mapped. + LastModified *string `xml:"LastModified"` + + // The status of the domain name. Valid values:* Enabled* Disabled + Status *string `xml:"Status"` + + // The container in which the certificate information is stored. + Certificate *CnameCertificate `xml:"Certificate"` +} + +type CnameToken struct { + // The name of the bucket to which the CNAME record is mapped. + Bucket *string `xml:"Bucket"` + + // The name of the CNAME record that is mapped to the bucket. + Cname *string `xml:"Cname"` + + // The CNAME token that is returned by OSS. + Token *string `xml:"Token"` + + // The time when the CNAME token expires. + ExpireTime *string `xml:"ExpireTime"` +} + +type PutCnameRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + BucketCnameConfiguration *BucketCnameConfiguration `input:"body,BucketCnameConfiguration,xml,required"` + + RequestCommon +} + +type PutCnameResult struct { + ResultCommon +} + +// PutCname Maps a CNAME record to a bucket. +func (c *Client) PutCname(ctx context.Context, request *PutCnameRequest, optFns ...func(*Options)) (*PutCnameResult, error) { + var err error + if request == nil { + request = &PutCnameRequest{} + } + input := &OperationInput{ + OpName: "PutCname", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cname": "", + "comp": "add", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"comp", "cname"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutCnameResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListCnameRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type ListCnameResult struct { + // The container that is used to store the information about all CNAME records. + Cnames []CnameInfo `xml:"Cname"` + + // The name of the bucket to which the CNAME records you want to query are mapped. + Bucket *string `xml:"Bucket"` + + // The name of the bucket owner. + Owner *string `xml:"Owner"` + + ResultCommon +} + +// ListCname Queries all CNAME records that are mapped to a bucket. +func (c *Client) ListCname(ctx context.Context, request *ListCnameRequest, optFns ...func(*Options)) (*ListCnameResult, error) { + var err error + if request == nil { + request = &ListCnameRequest{} + } + input := &OperationInput{ + OpName: "ListCname", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cname": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"cname"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListCnameResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteCnameRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + BucketCnameConfiguration *BucketCnameConfiguration `input:"body,BucketCnameConfiguration,xml,required"` + + RequestCommon +} + +type DeleteCnameResult struct { + ResultCommon +} + +// DeleteCname Deletes a CNAME record that is mapped to a bucket. +func (c *Client) DeleteCname(ctx context.Context, request *DeleteCnameRequest, optFns ...func(*Options)) (*DeleteCnameResult, error) { + var err error + if request == nil { + request = &DeleteCnameRequest{} + } + input := &OperationInput{ + OpName: "DeleteCname", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cname": "", + "comp": "delete", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"cname", "comp"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteCnameResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetCnameTokenRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the CNAME record that is mapped to the bucket. + Cname *string `input:"query,cname,required"` + + RequestCommon +} + +type GetCnameTokenResult struct { + // The container in which the CNAME token is stored. + CnameToken *CnameToken `output:"body,CnameToken,xml"` + + ResultCommon +} + +// GetCnameToken Queries the created CNAME tokens. +func (c *Client) GetCnameToken(ctx context.Context, request *GetCnameTokenRequest, optFns ...func(*Options)) (*GetCnameTokenResult, error) { + var err error + if request == nil { + request = &GetCnameTokenRequest{} + } + input := &OperationInput{ + OpName: "GetCnameToken", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "comp": "token", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"comp", "cname"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetCnameTokenResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type CreateCnameTokenRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + BucketCnameConfiguration *BucketCnameConfiguration `input:"body,BucketCnameConfiguration,xml,required"` + + RequestCommon +} + +type CreateCnameTokenResult struct { + // The container in which the CNAME token is stored. + CnameToken *CnameToken `output:"body,CnameToken,xml"` + + ResultCommon +} + +// CreateCnameToken Creates a CNAME token to verify the ownership of a domain name. +func (c *Client) CreateCnameToken(ctx context.Context, request *CreateCnameTokenRequest, optFns ...func(*Options)) (*CreateCnameTokenResult, error) { + var err error + if request == nil { + request = &CreateCnameTokenRequest{} + } + input := &OperationInput{ + OpName: "CreateCnameToken", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cname": "", + "comp": "token", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"cname", "comp"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CreateCnameTokenResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cors.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cors.go new file mode 100644 index 000000000..83b0107aa --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_cors.go @@ -0,0 +1,250 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type CORSConfiguration struct { + // The container that stores CORS rules. Up to 10 rules can be configured for a bucket. + CORSRules []CORSRule `xml:"CORSRule"` + + // Indicates whether the Vary: Origin header was returned. Default value: false.- true: The Vary: Origin header is returned regardless whether the request is a cross-origin request or whether the cross-origin request succeeds.- false: The Vary: Origin header is not returned. + ResponseVary *bool `xml:"ResponseVary"` +} + +type CORSRule struct { + // The origins from which cross-origin requests are allowed. + AllowedOrigins []string `xml:"AllowedOrigin"` + + // The methods that you can use in cross-origin requests. + AllowedMethods []string `xml:"AllowedMethod"` + + // Specifies whether the headers specified by Access-Control-Request-Headers in the OPTIONS preflight request are allowed. Each header specified by Access-Control-Request-Headers must match the value of an AllowedHeader element. You can use only one asterisk (\*) as the wildcard character. + AllowedHeaders []string `xml:"AllowedHeader"` + + // The response headers for allowed access requests from applications, such as an XMLHttpRequest object in JavaScript. The asterisk (\*) wildcard character is not supported. + ExposeHeaders []string `xml:"ExposeHeader"` + + // The period of time within which the browser can cache the response to an OPTIONS preflight request for the specified resource. Unit: seconds.You can specify only one MaxAgeSeconds element in a CORS rule. + MaxAgeSeconds *int64 `xml:"MaxAgeSeconds"` +} + +type PutBucketCorsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + CORSConfiguration *CORSConfiguration `input:"body,CORSConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketCorsResult struct { + ResultCommon +} + +// PutBucketCors Configures cross-origin resource sharing (CORS) rules for a bucket. +func (c *Client) PutBucketCors(ctx context.Context, request *PutBucketCorsRequest, optFns ...func(*Options)) (*PutBucketCorsResult, error) { + var err error + if request == nil { + request = &PutBucketCorsRequest{} + } + input := &OperationInput{ + OpName: "PutBucketCors", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cors": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"cors"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketCorsResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketCorsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketCorsResult struct { + // The container that stores CORS configuration. + CORSConfiguration *CORSConfiguration `output:"body,CORSConfiguration,xml"` + + ResultCommon +} + +// GetBucketCors Queries the cross-origin resource sharing (CORS) rules that are configured for a bucket. +func (c *Client) GetBucketCors(ctx context.Context, request *GetBucketCorsRequest, optFns ...func(*Options)) (*GetBucketCorsResult, error) { + var err error + if request == nil { + request = &GetBucketCorsRequest{} + } + input := &OperationInput{ + OpName: "GetBucketCors", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cors": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"cors"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketCorsResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteBucketCorsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketCorsResult struct { + ResultCommon +} + +// DeleteBucketCors Disables the cross-origin resource sharing (CORS) feature and deletes all CORS rules for a bucket. +func (c *Client) DeleteBucketCors(ctx context.Context, request *DeleteBucketCorsRequest, optFns ...func(*Options)) (*DeleteBucketCorsResult, error) { + var err error + if request == nil { + request = &DeleteBucketCorsRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketCors", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cors": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"cors"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketCorsResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type OptionObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The full path of the object. + Key *string `input:"path,key,required"` + + // The origin of the request. It is used to identify a cross-origin request. You can specify only one Origin header in a cross-origin request. By default, this header is left empty. + Origin *string `input:"header,Origin,required"` + + // The method to be used in the actual cross-origin request. You can specify only one Access-Control-Request-Method header in a cross-origin request. By default, this header is left empty. + AccessControlRequestMethod *string `input:"header,Access-Control-Request-Method,required"` + + // The custom headers to be sent in the actual cross-origin request. You can configure multiple custom headers in a cross-origin request. Custom headers are separated by commas (,). By default, this header is left empty. + AccessControlRequestHeaders *string `input:"header,Access-Control-Request-Headers"` + + RequestCommon +} + +type OptionObjectResult struct { + // The HTTP method of the request. If the request is denied, the response does not contain the header. + AccessControlAllowMethods *string `output:"header,Access-Control-Allow-Methods"` + + // The list of headers included in the request. If the request includes headers that are not allowed, the response does not contain the headers and the request is denied. + AccessControlAllowHeaders *string `output:"header,Access-Control-Allow-Headers"` + + // The list of headers that can be accessed by JavaScript applications on a client. + AccessControlExposeHeaders *string `output:"header,Access-Control-Expose-Headers"` + + // The maximum duration for the browser to cache preflight results. Unit: seconds. + AccessControlMaxAge *int64 `output:"header,Access-Control-Max-Age"` + + // The origin that is included in the request. If the request is denied, the response does not contain the header. + AccessControlAllowOrigin *string `output:"header,Access-Control-Allow-Origin"` + + ResultCommon +} + +// OptionObject Determines whether to send a cross-origin request. Before a cross-origin request is sent, the browser sends a preflight OPTIONS request that includes a specific origin, HTTP method, and header information to Object Storage Service (OSS) to determine whether to send the cross-origin request. +func (c *Client) OptionObject(ctx context.Context, request *OptionObjectRequest, optFns ...func(*Options)) (*OptionObjectResult, error) { + var err error + if request == nil { + request = &OptionObjectRequest{} + } + input := &OperationInput{ + OpName: "OptionObject", + Method: "OPTIONS", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Bucket: request.Bucket, + Key: request.Key, + } + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &OptionObjectResult{} + + if err = c.unmarshalOutput(result, output, unmarshalHeader, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_encryption.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_encryption.go new file mode 100644 index 000000000..4af046ccf --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_encryption.go @@ -0,0 +1,170 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type ApplyServerSideEncryptionByDefault struct { + // The default server-side encryption method. Valid values: KMS, AES256, and SM4. You are charged when you call API operations to encrypt or decrypt data by using CMKs managed by KMS. For more information, see [Billing of KMS](~~52608~~). If the default server-side encryption method is configured for the destination bucket and ReplicaCMKID is configured in the CRR rule:* If objects in the source bucket are not encrypted, they are encrypted by using the default encryption method of the destination bucket after they are replicated.* If objects in the source bucket are encrypted by using SSE-KMS or SSE-OSS, they are encrypted by using the same method after they are replicated.For more information, see [Use data replication with server-side encryption](~~177216~~). + SSEAlgorithm *string `xml:"SSEAlgorithm"` + + // The CMK ID that is specified when SSEAlgorithm is set to KMS and a specified CMK is used for encryption. In other cases, leave this parameter empty. + KMSMasterKeyID *string `xml:"KMSMasterKeyID"` + + // The algorithm that is used to encrypt objects. If this parameter is not specified, objects are encrypted by using AES256. This parameter is valid only when SSEAlgorithm is set to KMS. Valid value: SM4. + KMSDataEncryption *string `xml:"KMSDataEncryption"` +} + +type ServerSideEncryptionRule struct { + // The container that stores the default server-side encryption method. + ApplyServerSideEncryptionByDefault *ApplyServerSideEncryptionByDefault `xml:"ApplyServerSideEncryptionByDefault"` +} + +type PutBucketEncryptionRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + ServerSideEncryptionRule *ServerSideEncryptionRule `input:"body,ServerSideEncryptionRule,xml,required"` + + RequestCommon +} + +type PutBucketEncryptionResult struct { + ResultCommon +} + +// PutBucketEncryption Configures encryption rules for a bucket. +func (c *Client) PutBucketEncryption(ctx context.Context, request *PutBucketEncryptionRequest, optFns ...func(*Options)) (*PutBucketEncryptionResult, error) { + var err error + if request == nil { + request = &PutBucketEncryptionRequest{} + } + input := &OperationInput{ + OpName: "PutBucketEncryption", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "encryption": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"encryption"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketEncryptionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketEncryptionRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketEncryptionResult struct { + // The container that stores server-side encryption rules. + ServerSideEncryptionRule *ServerSideEncryptionRule `output:"body,ServerSideEncryptionRule,xml"` + + ResultCommon +} + +// GetBucketEncryption Queries the encryption rules configured for a bucket. +func (c *Client) GetBucketEncryption(ctx context.Context, request *GetBucketEncryptionRequest, optFns ...func(*Options)) (*GetBucketEncryptionResult, error) { + var err error + if request == nil { + request = &GetBucketEncryptionRequest{} + } + input := &OperationInput{ + OpName: "GetBucketEncryption", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "encryption": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"encryption"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketEncryptionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteBucketEncryptionRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketEncryptionResult struct { + ResultCommon +} + +// DeleteBucketEncryption Deletes encryption rules for a bucket. +func (c *Client) DeleteBucketEncryption(ctx context.Context, request *DeleteBucketEncryptionRequest, optFns ...func(*Options)) (*DeleteBucketEncryptionResult, error) { + var err error + if request == nil { + request = &DeleteBucketEncryptionRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketEncryption", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "encryption": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"encryption"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketEncryptionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_httpsconfig.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_httpsconfig.go new file mode 100644 index 000000000..08f4f9ab8 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_httpsconfig.go @@ -0,0 +1,114 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type TLS struct { + // Specifies whether to enable TLS version management for the bucket.Valid values:* true * false + Enable *bool `xml:"Enable"` + + // The TLS versions. + TLSVersions []string `xml:"TLSVersion"` +} + +type HttpsConfiguration struct { + // The container that stores TLS version configurations. + TLS *TLS `xml:"TLS"` +} + +type GetBucketHttpsConfigRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketHttpsConfigResult struct { + // The container that stores HTTPS configurations. + HttpsConfiguration *HttpsConfiguration `output:"body,HttpsConfiguration,xml"` + + ResultCommon +} + +// GetBucketHttpsConfig Queries the Transport Layer Security (TLS) version configurations of a bucket. +func (c *Client) GetBucketHttpsConfig(ctx context.Context, request *GetBucketHttpsConfigRequest, optFns ...func(*Options)) (*GetBucketHttpsConfigResult, error) { + var err error + if request == nil { + request = &GetBucketHttpsConfigRequest{} + } + input := &OperationInput{ + OpName: "GetBucketHttpsConfig", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "httpsConfig": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"httpsConfig"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketHttpsConfigResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type PutBucketHttpsConfigRequest struct { + // This name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + HttpsConfiguration *HttpsConfiguration `input:"body,HttpsConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketHttpsConfigResult struct { + ResultCommon +} + +// PutBucketHttpsConfig Enables or disables Transport Layer Security (TLS) version management for a bucket. +func (c *Client) PutBucketHttpsConfig(ctx context.Context, request *PutBucketHttpsConfigRequest, optFns ...func(*Options)) (*PutBucketHttpsConfigResult, error) { + var err error + if request == nil { + request = &PutBucketHttpsConfigRequest{} + } + input := &OperationInput{ + OpName: "PutBucketHttpsConfig", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "httpsConfig": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"httpsConfig"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketHttpsConfigResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_inventory.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_inventory.go new file mode 100644 index 000000000..54d83529f --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_inventory.go @@ -0,0 +1,318 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type InventoryOSSBucketDestination struct { + // The format of exported inventory lists. The exported inventory lists are CSV objects compressed by using GZIP. + Format InventoryFormatType `xml:"Format"` + + // The ID of the account to which permissions are granted by the bucket owner. + AccountId *string `xml:"AccountId"` + + // The Alibaba Cloud Resource Name (ARN) of the role that has the permissions to read all objects from the source bucket and write objects to the destination bucket. Format: `acs:ram::uid:role/rolename`. + RoleArn *string `xml:"RoleArn"` + + // The name of the bucket in which exported inventory lists are stored. + Bucket *string `xml:"Bucket"` + + // The prefix of the path in which the exported inventory lists are stored. + Prefix *string `xml:"Prefix"` + + // The container that stores the encryption method of the exported inventory lists. + Encryption *InventoryEncryption `xml:"Encryption"` +} + +type InventoryDestination struct { + // The container that stores information about the bucket in which exported inventory lists are stored. + OSSBucketDestination *InventoryOSSBucketDestination `xml:"OSSBucketDestination"` +} + +type InventorySchedule struct { + // The frequency at which the inventory list is exported. Valid values:- Daily: The inventory list is exported on a daily basis. - Weekly: The inventory list is exported on a weekly basis. + Frequency InventoryFrequencyType `xml:"Frequency"` +} + +type InventoryFilter struct { + // The beginning of the time range during which the object was last modified. Unit: seconds.Valid values: [1262275200, 253402271999] + LastModifyBeginTimeStamp *int64 `xml:"LastModifyBeginTimeStamp"` + + // The end of the time range during which the object was last modified. Unit: seconds.Valid values: [1262275200, 253402271999] + LastModifyEndTimeStamp *int64 `xml:"LastModifyEndTimeStamp"` + + // The minimum size of the specified object. Unit: B.Valid values: [0 B, 48.8 TB] + LowerSizeBound *int64 `xml:"LowerSizeBound"` + + // The maximum size of the specified object. Unit: B.Valid values: (0 B, 48.8 TB] + UpperSizeBound *int64 `xml:"UpperSizeBound"` + + // The storage class of the object. You can specify multiple storage classes.Valid values:StandardIAArchiveColdArchiveAll + StorageClass *string `xml:"StorageClass"` + + // The prefix that is specified in the inventory. + Prefix *string `xml:"Prefix"` +} + +type SSEKMS struct { + // The ID of the key that is managed by Key Management Service (KMS). + KeyId *string `xml:"KeyId"` +} + +type InventoryEncryption struct { + // The container that stores information about the SSE-OSS encryption method. + SseOss *string `xml:"SSE-OSS"` + + // The container that stores the customer master key (CMK) used for SSE-KMS encryption. + SseKms *SSEKMS `xml:"SSE-KMS"` +} + +type InventoryConfiguration struct { + // The name of the inventory. The name must be unique in the bucket. + Id *string `xml:"Id"` + + // Specifies whether to enable the bucket inventory feature. Valid values:* true* false + IsEnabled *bool `xml:"IsEnabled"` + + // The container that stores the exported inventory lists. + Destination *InventoryDestination `xml:"Destination"` + + // The container that stores information about the frequency at which inventory lists are exported. + Schedule *InventorySchedule `xml:"Schedule"` + + // The container that stores the prefix used to filter objects. Only objects whose names contain the specified prefix are included in the inventory. + Filter *InventoryFilter `xml:"Filter"` + + // Specifies whether to include the version information about the objects in inventory lists. Valid values:* All: The information about all versions of the objects is exported.* Current: Only the information about the current versions of the objects is exported. + IncludedObjectVersions *string `xml:"IncludedObjectVersions"` + + // The container that stores the configuration fields in inventory lists. + OptionalFields *OptionalFields `xml:"OptionalFields"` +} + +type ListInventoryConfigurationsResult struct { + // The container that stores inventory configurations. + InventoryConfigurations []InventoryConfiguration `xml:"InventoryConfiguration"` + + // Specifies whether to list all inventory tasks configured for the bucket.Valid values: true and false- The value of false indicates that all inventory tasks configured for the bucket are listed.- The value of true indicates that not all inventory tasks configured for the bucket are listed. To list the next page of inventory configurations, set the continuation-token parameter in the next request to the value of the NextContinuationToken header in the response to the current request. + IsTruncated *bool `xml:"IsTruncated"` + + // If the value of IsTruncated in the response is true and value of this header is not null, set the continuation-token parameter in the next request to the value of this header. + NextContinuationToken *string `xml:"NextContinuationToken"` +} + +type OptionalFields struct { + // The configuration fields that are included in inventory lists. Available configuration fields:* Size: the size of the object.* LastModifiedDate: the time when the object was last modified.* ETag: the ETag of the object. It is used to identify the content of the object.* StorageClass: the storage class of the object.* IsMultipartUploaded: specifies whether the object is uploaded by using multipart upload.* EncryptionStatus: the encryption status of the object. + Fields []InventoryOptionalFieldType `xml:"Field"` +} + +type PutBucketInventoryRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the inventory. + InventoryId *string `input:"query,inventoryId,required"` + + // Request body schema. + InventoryConfiguration *InventoryConfiguration `input:"body,InventoryConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketInventoryResult struct { + ResultCommon +} + +// PutBucketInventory Configures an inventory for a bucket. +func (c *Client) PutBucketInventory(ctx context.Context, request *PutBucketInventoryRequest, optFns ...func(*Options)) (*PutBucketInventoryResult, error) { + var err error + if request == nil { + request = &PutBucketInventoryRequest{} + } + input := &OperationInput{ + OpName: "PutBucketInventory", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "inventory": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"inventory", "inventoryId"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketInventoryResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketInventoryRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the inventory to be queried. + InventoryId *string `input:"query,inventoryId,required"` + + RequestCommon +} + +type GetBucketInventoryResult struct { + // The inventory task configured for a bucket. + InventoryConfiguration *InventoryConfiguration `output:"body,InventoryConfiguration,xml"` + + ResultCommon +} + +// GetBucketInventory Queries the inventories that are configured for a bucket. +func (c *Client) GetBucketInventory(ctx context.Context, request *GetBucketInventoryRequest, optFns ...func(*Options)) (*GetBucketInventoryResult, error) { + var err error + if request == nil { + request = &GetBucketInventoryRequest{} + } + input := &OperationInput{ + OpName: "GetBucketInventory", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "inventory": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"inventory", "inventoryId"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketInventoryResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListBucketInventoryRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // Specify the start position of the list operation. You can obtain this token from the NextContinuationToken field of last ListBucketInventory's result. + ContinuationToken *string `input:"query,continuation-token"` + + RequestCommon +} + +type ListBucketInventoryResult struct { + // The container that stores inventory configuration list. + ListInventoryConfigurationsResult *ListInventoryConfigurationsResult `output:"body,ListInventoryConfigurationsResult,xml"` + + ResultCommon +} + +// ListBucketInventory Queries all inventories in a bucket at a time. +func (c *Client) ListBucketInventory(ctx context.Context, request *ListBucketInventoryRequest, optFns ...func(*Options)) (*ListBucketInventoryResult, error) { + var err error + if request == nil { + request = &ListBucketInventoryRequest{} + } + input := &OperationInput{ + OpName: "ListBucketInventory", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "inventory": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"inventory"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListBucketInventoryResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteBucketInventoryRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the inventory that you want to delete. + InventoryId *string `input:"query,inventoryId,required"` + + RequestCommon +} + +type DeleteBucketInventoryResult struct { + ResultCommon +} + +// DeleteBucketInventory Deletes an inventory for a bucket. +func (c *Client) DeleteBucketInventory(ctx context.Context, request *DeleteBucketInventoryRequest, optFns ...func(*Options)) (*DeleteBucketInventoryResult, error) { + var err error + if request == nil { + request = &DeleteBucketInventoryRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketInventory", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "inventory": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"inventory", "inventoryId"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketInventoryResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_lifecycle.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_lifecycle.go new file mode 100644 index 000000000..66952e7a8 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_lifecycle.go @@ -0,0 +1,280 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type LifecycleRule struct { + // Specifies whether to enable the rule. Valid values:* Enabled: enables the rule. OSS periodically executes the rule.* Disabled: does not enable the rule. OSS ignores the rule. + Status *string `xml:"Status"` + + // The delete operation that you want OSS to perform on the parts that are uploaded in incomplete multipart upload tasks when the parts expire. + AbortMultipartUpload *LifecycleRuleAbortMultipartUpload `xml:"AbortMultipartUpload"` + + // Timestamp for when access tracking was enabled. + AtimeBase *int64 `xml:"AtimeBase"` + + // The conversion of the storage class of previous versions of the objects that match the lifecycle rule when the previous versions expire. The storage class of the previous versions can be converted to IA or Archive. The period of time from when the previous versions expire to when the storage class of the previous versions is converted to Archive must be longer than the period of time from when the previous versions expire to when the storage class of the previous versions is converted to IA. + NoncurrentVersionTransitions []NoncurrentVersionTransition `xml:"NoncurrentVersionTransition"` + + // The container that stores the Not parameter that is used to filter objects. + Filter *LifecycleRuleFilter `xml:"Filter"` + + // The ID of the lifecycle rule. The ID can contain up to 255 characters. If you do not specify the ID, OSS automatically generates a unique ID for the lifecycle rule. + ID *string `xml:"ID"` + + // The prefix in the names of the objects to which the rule applies. The prefixes specified by different rules cannot overlap.* If Prefix is specified, this rule applies only to objects whose names contain the specified prefix in the bucket.* If Prefix is not specified, this rule applies to all objects in the bucket. + Prefix *string `xml:"Prefix"` + + // The delete operation to perform on objects based on the lifecycle rule. For an object in a versioning-enabled bucket, the delete operation specified by this parameter is performed only on the current version of the object.The period of time from when the objects expire to when the objects are deleted must be longer than the period of time from when the objects expire to when the storage class of the objects is converted to IA or Archive. + Expiration *LifecycleRuleExpiration `xml:"Expiration"` + + // The conversion of the storage class of objects that match the lifecycle rule when the objects expire. The storage class of the objects can be converted to IA, Archive, and ColdArchive. The storage class of Standard objects in a Standard bucket can be converted to IA, Archive, or Cold Archive. The period of time from when the objects expire to when the storage class of the objects is converted to Archive must be longer than the period of time from when the objects expire to when the storage class of the objects is converted to IA. For example, if the validity period is set to 30 for objects whose storage class is converted to IA after the validity period, the validity period must be set to a value greater than 30 for objects whose storage class is converted to Archive. Either Days or CreatedBeforeDate is required. + Transitions []LifecycleRuleTransition `xml:"Transition"` + + // The tag of the objects to which the lifecycle rule applies. You can specify multiple tags. + Tags []Tag `xml:"Tag"` + + // The delete operation that you want OSS to perform on the previous versions of the objects that match the lifecycle rule when the previous versions expire. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration"` +} + +type LifecycleRuleAbortMultipartUpload struct { + // The number of days from when the objects were last modified to when the lifecycle rule takes effect. + + Days *int32 `xml:"Days"` + + // The date based on which the lifecycle rule takes effect. OSS performs the specified operation on data whose last modified date is earlier than this date. Specify the time in the ISO 8601 standard. The time must be at 00:00:00 in UTC. + CreatedBeforeDate *string `xml:"CreatedBeforeDate"` + + // Deprecated: please use Days or CreateDateBefore. + // The date after which the lifecycle rule takes effect. If the specified time is earlier than the current moment, it'll takes effect immediately. (This fields is NOT RECOMMENDED, please use Days or CreateDateBefore) + Date *string `xml:"Date"` +} + +type LifecycleRuleNot struct { + // The tag of the objects to which the lifecycle rule does not apply. + Tag *Tag `xml:"Tag"` + + // The prefix in the names of the objects to which the lifecycle rule does not apply. + Prefix *string `xml:"Prefix"` +} + +type LifecycleRuleFilter struct { + // The condition that is matched by objects to which the lifecycle rule does not apply. + Not *LifecycleRuleNot `xml:"Not"` + + // This lifecycle rule only applies to files larger than this size. + ObjectSizeGreaterThan *int64 `xml:"ObjectSizeGreaterThan"` + + // This lifecycle rule only applies to files smaller than this size. + ObjectSizeLessThan *int64 `xml:"ObjectSizeLessThan"` +} + +type LifecycleRuleExpiration struct { + // The date based on which the lifecycle rule takes effect. OSS performs the specified operation on data whose last modified date is earlier than this date. The value of this parameter is in the yyyy-MM-ddT00:00:00.000Z format.Specify the time in the ISO 8601 standard. The time must be at 00:00:00 in UTC. + CreatedBeforeDate *string `xml:"CreatedBeforeDate"` + + // The number of days from when the objects were last modified to when the lifecycle rule takes effect. + Days *int32 `xml:"Days"` + + // Specifies whether to automatically remove expired delete markers.* true: Expired delete markers are automatically removed. If you set this parameter to true, you cannot specify the Days or CreatedBeforeDate parameter.* false: Expired delete markers are not automatically removed. If you set this parameter to false, you must specify the Days or CreatedBeforeDate parameter. + ExpiredObjectDeleteMarker *bool `xml:"ExpiredObjectDeleteMarker"` + + // Deprecated: please use Days or CreateDateBefore. + // The date after which the lifecycle rule takes effect. If the specified time is earlier than the current moment, it'll takes effect immediately. (This fields is NOT RECOMMENDED, please use Days or CreateDateBefore) + Date *string `xml:"Date"` +} + +type NoncurrentVersionExpiration struct { + // The number of days from when the objects became previous versions to when the lifecycle rule takes effect. + NoncurrentDays *int32 `xml:"NoncurrentDays"` +} + +type NoncurrentVersionTransition struct { + // Specifies whether the lifecycle rule applies to objects based on their last access time. Valid values:* true: The rule applies to objects based on their last access time.* false: The rule applies to objects based on their last modified time. + IsAccessTime *bool `xml:"IsAccessTime"` + + // Specifies whether to convert the storage class of non-Standard objects back to Standard after the objects are accessed. This parameter takes effect only when the IsAccessTime parameter is set to true. Valid values:* true: converts the storage class of the objects to Standard.* false: does not convert the storage class of the objects to Standard. + ReturnToStdWhenVisit *bool `xml:"ReturnToStdWhenVisit"` + + // Specifies whether to convert the storage class of objects whose sizes are less than 64 KB to IA, Archive, or Cold Archive based on their last access time. Valid values:* true: converts the storage class of objects that are smaller than 64 KB to IA, Archive, or Cold Archive. Objects that are smaller than 64 KB are charged as 64 KB. Objects that are greater than or equal to 64 KB are charged based on their actual sizes. If you set this parameter to true, the storage fees may increase.* false: does not convert the storage class of an object that is smaller than 64 KB. + AllowSmallFile *bool `xml:"AllowSmallFile"` + + // The number of days from when the objects became previous versions to when the lifecycle rule takes effect. + NoncurrentDays *int32 `xml:"NoncurrentDays"` + + // The storage class to which objects are converted. Valid values:* IA* Archive* ColdArchive You can convert the storage class of objects in an IA bucket to only Archive or Cold Archive. + StorageClass StorageClassType `xml:"StorageClass"` +} + +type LifecycleRuleTransition struct { + // The date based on which the lifecycle rule takes effect. OSS performs the specified operation on data whose last modified date is earlier than this date. Specify the time in the ISO 8601 standard. The time must be at 00:00:00 in UTC. + CreatedBeforeDate *string `xml:"CreatedBeforeDate"` + + // The number of days from when the objects were last modified to when the lifecycle rule takes effect. + Days *int32 `xml:"Days"` + + // The storage class to which objects are converted. Valid values:* IA* Archive* ColdArchive You can convert the storage class of objects in an IA bucket to only Archive or Cold Archive. + StorageClass StorageClassType `xml:"StorageClass"` + + // Specifies whether the lifecycle rule applies to objects based on their last access time. Valid values:* true: The rule applies to objects based on their last access time.* false: The rule applies to objects based on their last modified time. + IsAccessTime *bool `xml:"IsAccessTime"` + + // Specifies whether to convert the storage class of non-Standard objects back to Standard after the objects are accessed. This parameter takes effect only when the IsAccessTime parameter is set to true. Valid values:* true: converts the storage class of the objects to Standard.* false: does not convert the storage class of the objects to Standard. + ReturnToStdWhenVisit *bool `xml:"ReturnToStdWhenVisit"` + + // Specifies whether to convert the storage class of objects whose sizes are less than 64 KB to IA, Archive, or Cold Archive based on their last access time. Valid values:* true: converts the storage class of objects that are smaller than 64 KB to IA, Archive, or Cold Archive. Objects that are smaller than 64 KB are charged as 64 KB. Objects that are greater than or equal to 64 KB are charged based on their actual sizes. If you set this parameter to true, the storage fees may increase.* false: does not convert the storage class of an object that is smaller than 64 KB. + AllowSmallFile *bool `xml:"AllowSmallFile"` +} + +type LifecycleConfiguration struct { + // The container that stores the lifecycle rules. + Rules []LifecycleRule `xml:"Rule"` +} + +type PutBucketLifecycleRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // Specifies whether to allow overlapped prefixes. Valid values:true: Overlapped prefixes are allowed.false: Overlapped prefixes are not allowed. + AllowSameActionOverlap *string `input:"header,x-oss-allow-same-action-overlap"` + + // The container of the request body. + LifecycleConfiguration *LifecycleConfiguration `input:"body,LifecycleConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketLifecycleResult struct { + ResultCommon +} + +// PutBucketLifecycle Configures a lifecycle rule for a bucket. After you configure a lifecycle rule for a bucket, Object Storage Service (OSS) automatically deletes the objects that match the rule or converts the storage type of the objects based on the point in time that is specified in the lifecycle rule. +func (c *Client) PutBucketLifecycle(ctx context.Context, request *PutBucketLifecycleRequest, optFns ...func(*Options)) (*PutBucketLifecycleResult, error) { + var err error + if request == nil { + request = &PutBucketLifecycleRequest{} + } + input := &OperationInput{ + OpName: "PutBucketLifecycle", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "lifecycle": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"lifecycle"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketLifecycleResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketLifecycleRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketLifecycleResult struct { + // The container that stores the lifecycle rules configured for the bucket. + LifecycleConfiguration *LifecycleConfiguration `output:"body,LifecycleConfiguration,xml"` + + ResultCommon +} + +// GetBucketLifecycle Queries the lifecycle rules configured for a bucket. Only the owner of a bucket has the permissions to query the lifecycle rules configured for the bucket. +func (c *Client) GetBucketLifecycle(ctx context.Context, request *GetBucketLifecycleRequest, optFns ...func(*Options)) (*GetBucketLifecycleResult, error) { + var err error + if request == nil { + request = &GetBucketLifecycleRequest{} + } + input := &OperationInput{ + OpName: "GetBucketLifecycle", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "lifecycle": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"lifecycle"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketLifecycleResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type DeleteBucketLifecycleRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketLifecycleResult struct { + ResultCommon +} + +// DeleteBucketLifecycle Deletes the lifecycle rules of a bucket. +func (c *Client) DeleteBucketLifecycle(ctx context.Context, request *DeleteBucketLifecycleRequest, optFns ...func(*Options)) (*DeleteBucketLifecycleResult, error) { + var err error + if request == nil { + request = &DeleteBucketLifecycleRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketLifecycle", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "lifecycle": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"lifecycle"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketLifecycleResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_logging.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_logging.go new file mode 100644 index 000000000..640d9d0c6 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_logging.go @@ -0,0 +1,320 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type LoggingEnabled struct { + // The bucket that stores access logs. + TargetBucket *string `xml:"TargetBucket"` + + // The prefix of the log objects. This parameter can be left empty. + TargetPrefix *string `xml:"TargetPrefix"` +} + +type BucketLoggingStatus struct { + // Indicates the container used to store access logging information. This element is returned if it is enabled and is not returned if it is disabled. + LoggingEnabled *LoggingEnabled `xml:"LoggingEnabled"` +} + +type LoggingHeaderSet struct { + // The list of the custom request headers. + Headers []string `xml:"header"` +} + +type LoggingParamSet struct { + // The list of the custom URL parameters. + Parameters []string `xml:"parameter"` +} + +type UserDefinedLogFieldsConfiguration struct { + // The container that stores the configurations of custom request headers. + HeaderSet *LoggingHeaderSet `xml:"HeaderSet"` + + // The container that stores the configurations of custom URL parameters. + ParamSet *LoggingParamSet `xml:"ParamSet"` +} + +type PutBucketLoggingRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + BucketLoggingStatus *BucketLoggingStatus `input:"body,BucketLoggingStatus,xml,required"` + + RequestCommon +} + +type PutBucketLoggingResult struct { + ResultCommon +} + +// PutBucketLogging Enables logging for a bucket. After you enable logging for a bucket, Object Storage Service (OSS) generates logs every hour based on the defined naming rule and stores the logs as objects in the specified destination bucket. +func (c *Client) PutBucketLogging(ctx context.Context, request *PutBucketLoggingRequest, optFns ...func(*Options)) (*PutBucketLoggingResult, error) { + var err error + if request == nil { + request = &PutBucketLoggingRequest{} + } + input := &OperationInput{ + OpName: "PutBucketLogging", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "logging": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"logging"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketLoggingResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketLoggingRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketLoggingResult struct { + // Indicates the container used to store access logging configuration of a bucket. + BucketLoggingStatus *BucketLoggingStatus `output:"body,BucketLoggingStatus,xml"` + + ResultCommon +} + +// GetBucketLogging Queries the configurations of access log collection of a bucket. Only the owner of a bucket can query the configurations of access log collection of the bucket. +func (c *Client) GetBucketLogging(ctx context.Context, request *GetBucketLoggingRequest, optFns ...func(*Options)) (*GetBucketLoggingResult, error) { + var err error + if request == nil { + request = &GetBucketLoggingRequest{} + } + input := &OperationInput{ + OpName: "GetBucketLogging", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "logging": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"logging"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketLoggingResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type DeleteBucketLoggingRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketLoggingResult struct { + ResultCommon +} + +// DeleteBucketLogging Disables the logging feature for a bucket. +func (c *Client) DeleteBucketLogging(ctx context.Context, request *DeleteBucketLoggingRequest, optFns ...func(*Options)) (*DeleteBucketLoggingResult, error) { + var err error + if request == nil { + request = &DeleteBucketLoggingRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketLogging", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "logging": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"logging"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketLoggingResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type PutUserDefinedLogFieldsConfigRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The container that stores the specified log configurations. + UserDefinedLogFieldsConfiguration *UserDefinedLogFieldsConfiguration `input:"body,UserDefinedLogFieldsConfiguration,xml,required"` + + RequestCommon +} + +type PutUserDefinedLogFieldsConfigResult struct { + ResultCommon +} + +// PutUserDefinedLogFieldsConfig Customizes the user_defined_log_fields field in real-time logs by adding custom request headers or query parameters to the field for subsequent analysis of requests. +func (c *Client) PutUserDefinedLogFieldsConfig(ctx context.Context, request *PutUserDefinedLogFieldsConfigRequest, optFns ...func(*Options)) (*PutUserDefinedLogFieldsConfigResult, error) { + var err error + if request == nil { + request = &PutUserDefinedLogFieldsConfigRequest{} + } + input := &OperationInput{ + OpName: "PutUserDefinedLogFieldsConfig", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "userDefinedLogFieldsConfig": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"userDefinedLogFieldsConfig"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutUserDefinedLogFieldsConfigResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetUserDefinedLogFieldsConfigRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetUserDefinedLogFieldsConfigResult struct { + // The container for the user-defined logging configuration. + UserDefinedLogFieldsConfiguration *UserDefinedLogFieldsConfiguration `output:"body,UserDefinedLogFieldsConfiguration,xml"` + + ResultCommon +} + +// GetUserDefinedLogFieldsConfig Queries the custom configurations of the user_defined_log_fields field in the real-time logs of a bucket. +func (c *Client) GetUserDefinedLogFieldsConfig(ctx context.Context, request *GetUserDefinedLogFieldsConfigRequest, optFns ...func(*Options)) (*GetUserDefinedLogFieldsConfigResult, error) { + var err error + if request == nil { + request = &GetUserDefinedLogFieldsConfigRequest{} + } + input := &OperationInput{ + OpName: "GetUserDefinedLogFieldsConfig", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "userDefinedLogFieldsConfig": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"userDefinedLogFieldsConfig"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetUserDefinedLogFieldsConfigResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type DeleteUserDefinedLogFieldsConfigRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteUserDefinedLogFieldsConfigResult struct { + ResultCommon +} + +// DeleteUserDefinedLogFieldsConfig Deletes the custom configurations of the user_defined_log_fields field in the real-time logs of a bucket. +func (c *Client) DeleteUserDefinedLogFieldsConfig(ctx context.Context, request *DeleteUserDefinedLogFieldsConfigRequest, optFns ...func(*Options)) (*DeleteUserDefinedLogFieldsConfigResult, error) { + var err error + if request == nil { + request = &DeleteUserDefinedLogFieldsConfigRequest{} + } + input := &OperationInput{ + OpName: "DeleteUserDefinedLogFieldsConfig", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "userDefinedLogFieldsConfig": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"userDefinedLogFieldsConfig"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteUserDefinedLogFieldsConfigResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_metaquery.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_metaquery.go new file mode 100644 index 000000000..27f0df73c --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_metaquery.go @@ -0,0 +1,534 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type MetaQueryAggregation struct { + // The field name. + Field *string `xml:"Field"` + + // The operator for aggregate operations.* min* max* average* sum* count* distinct* group + Operation *string `xml:"Operation"` + + // The result of the aggregate operation. + Value *float64 `xml:"Value"` + + // The grouped aggregations. + Groups *MetaQueryGroups `xml:"Groups"` +} + +type MetaQueryGroups struct { + // The grouped aggregations. + Groups []MetaQueryGroup `xml:"Group"` +} + +type MetaQueryGroup struct { + // The value for the grouped aggregation. + Value *string `xml:"Value"` + + // The number of results in the grouped aggregation. + Count *int64 `xml:"Count"` +} + +type MetaQueryAggregations struct { + // The container that stores the information about a single aggregate operation. + Aggregations []MetaQueryAggregation `xml:"Aggregation"` +} + +type MetaQueryUserMeta struct { + // The key of the user metadata item. + Key *string `xml:"Key"` + + // The value of the user metadata item. + Value *string `xml:"Value"` +} + +type MetaQueryFile struct { + // The time when the object was last modified. + FileModifiedTime *string `xml:"FileModifiedTime"` + + // The type of the object.Valid values:* Multipart : The object is uploaded by using multipart upload .* Symlink : The object is a symbolic link that was created by calling the PutSymlink operation. * Appendable : The object is uploaded by using AppendObject .* Normal : The object is uploaded by using PutObject. + OSSObjectType *string `xml:"OSSObjectType"` + + // The ETag of the object. + ETag *string `xml:"ETag"` + + // The server-side encryption algorithm used when the object was created. + ServerSideEncryptionCustomerAlgorithm *string `xml:"ServerSideEncryptionCustomerAlgorithm"` + + // The number of the tags of the object. + OSSTaggingCount *int64 `xml:"OSSTaggingCount"` + + // The tags. + OSSTagging []MetaQueryTagging `xml:"OSSTagging>Tagging"` + + // The user metadata items. + OSSUserMeta []MetaQueryUserMeta `xml:"OSSUserMeta>UserMeta"` + + // The full path of the object. + Filename *string `xml:"Filename"` + + // The storage class of the object.Valid values:* Archive : the Archive storage class .* ColdArchive : the Cold Archive storage class .* IA : the Infrequent Access (IA) storage class .* Standard : The Standard storage class . + OSSStorageClass *string `xml:"OSSStorageClass"` + + // The access control list (ACL) of the object.Valid values:* default : the ACL of the bucket .* private : private .* public-read : public-read .* public-read-write : public-read-write . + ObjectACL *string `xml:"ObjectACL"` + + // The CRC-64 value of the object. + OSSCRC64 *string `xml:"OSSCRC64"` + + // The server-side encryption of the object. + ServerSideEncryption *string `xml:"ServerSideEncryption"` + + // The object size. + Size *int64 `xml:"Size"` + + // The list of audio streams. + AudioStreams []MetaQueryAudioStream `xml:"AudioStreams>AudioStream"` + + // The algorithm used to encrypt objects. + ServerSideDataEncryption *string `xml:"ServerSideDataEncryption"` + + // The cross-origin request methods that are allowed. + AccessControlRequestMethod *string `xml:"AccessControlRequestMethod"` + + // The artist. + Artist *string `xml:"Artist"` + + // The total duration of the video. Unit: seconds. + Duration *float64 `xml:"Duration"` + + // The longitude and latitude information. + LatLong *string `xml:"LatLong"` + + // The list of subtitle streams. + Subtitles []MetaQuerySubtitle `xml:"Subtitles>Subtitle"` + + // The time when the image or video was taken. + ProduceTime *string `xml:"ProduceTime"` + + // The origins allowed in cross-origin requests. + AccessControlAllowOrigin *string `xml:"AccessControlAllowOrigin"` + + // The name of the object when it is downloaded. + ContentDisposition *string `xml:"ContentDisposition"` + + // The player. + Performer *string `xml:"Performer"` + + // The album. + Album *string `xml:"Album"` + + // The addresses. + Addresses []MetaQueryAddress `xml:"Addresses>Address"` + + // The Multipurpose Internet Mail Extensions (MIME) type of the object. + ContentType *string `xml:"ContentType"` + + // The content encoding format of the object when the object is downloaded. + ContentEncoding *string `xml:"ContentEncoding"` + + // The language of the object content. + ContentLanguage *string `xml:"ContentLanguage"` + + // The height of the image. Unit: pixel. + ImageHeight *int64 `xml:"ImageHeight"` + + // The type of multimedia. + MediaType *string `xml:"MediaType"` + + // The time when the object expires. + OSSExpiration *string `xml:"OSSExpiration"` + + // The width of the image. Unit: pixel. + ImageWidth *int64 `xml:"ImageWidth"` + + // The width of the video image. Unit: pixel. + VideoWidth *int64 `xml:"VideoWidth"` + + // The composer. + Composer *string `xml:"Composer"` + + // The full path of the object. + URI *string `xml:"URI"` + + // The height of the video image. Unit: pixel. + VideoHeight *int64 `xml:"VideoHeight"` + + // The list of video streams. + VideoStreams []MetaQueryVideoStream `xml:"VideoStreams>VideoStream"` + + // The web page caching behavior that is performed when the object is downloaded. + CacheControl *string `xml:"CacheControl"` + + // The bitrate. Unit: bit/s. + Bitrate *int64 `xml:"Bitrate"` + + // The singer. + AlbumArtist *string `xml:"AlbumArtist"` + + // The title of the object. + Title *string `xml:"Title"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + ServerSideEncryptionKeyId *string `xml:"ServerSideEncryptionKeyId"` +} + +type MetaQueryVideoStream struct { + // The bitrate. Unit: bit/s. + Bitrate *int64 `xml:"Bitrate"` + + // The start time of the audio stream in seconds. + StartTime *float64 `xml:"StartTime"` + + // The duration of the audio stream in seconds. + Duration *float64 `xml:"Duration"` + + // The pixel format of the video stream. + PixelFormat *string `xml:"PixelFormat"` + + // The image height of the video stream. Unit: pixel. + Height *int64 `xml:"Height"` + + // The color space. + ColorSpace *string `xml:"ColorSpace"` + + // The image width of the video stream. Unit: pixels. + Width *int64 `xml:"Width"` + + // The abbreviated name of the codec. + CodecName *string `xml:"CodecName"` + + // The language used in the audio stream. The value follows the BCP 47 format. + Language *string `xml:"Language"` + + // The frame rate of the video stream. + FrameRate *string `xml:"FrameRate"` + + // The number of video frames. + FrameCount *int64 `xml:"FrameCount"` + + // The bit depth. + BitDepth *int64 `xml:"BitDepth"` +} + +type MetaQueryAddress struct { + // The country. + Country *string `xml:"Country"` + + // The city. + City *string `xml:"City"` + + // The district. + District *string `xml:"District"` + + // The language of the address. The value follows the BCP 47 format. + Language *string `xml:"Language"` + + // The province. + Province *string `xml:"Province"` + + // The street. + Township *string `xml:"Township"` + + // The full address. + AddressLine *string `xml:"AddressLine"` +} + +type MetaQuerySubtitle struct { + // The start time of the subtitle stream in seconds. + StartTime *float64 `xml:"StartTime"` + + // The duration of the subtitle stream in seconds. + Duration *float64 `xml:"Duration"` + + // The abbreviated name of the codec. + CodecName *string `xml:"CodecName"` + + // The language of the subtitle. The value follows the BCP 47 format. + Language *string `xml:"Language"` +} + +type MetaQueryAudioStream struct { + // The sampling rate. + SampleRate *int64 `xml:"SampleRate"` + + // The start time of the video stream. + StartTime *float64 `xml:"StartTime"` + + // The duration of the video stream. + Duration *float64 `xml:"Duration"` + + // The number of sound channels. + Channels *int64 `xml:"Channels"` + + // The language used in the audio stream. The value follows the BCP 47 format. + Language *string `xml:"Language"` + + // The abbreviated name of the codec. + CodecName *string `xml:"CodecName"` + + // The bitrate. Unit: bit/s. + Bitrate *int64 `xml:"Bitrate"` +} + +type MetaQuery struct { + // The maximum number of objects to return. Valid values: 0 to 100. If this parameter is not set or is set to 0, up to 100 objects are returned. + MaxResults *int64 `xml:"MaxResults"` + + // The query conditions. A query condition includes the following elements:* Operation: the operator. Valid values: eq (equal to), gt (greater than), gte (greater than or equal to), lt (less than), lte (less than or equal to), match (fuzzy query), prefix (prefix query), and (AND), or (OR), and not (NOT).* Field: the field name.* Value: the field value.* SubQueries: the subquery conditions. Options that are included in this element are the same as those of simple query. You need to set subquery conditions only when Operation is set to and, or, or not. + Query *string `xml:"Query"` + + // The field based on which the results are sorted. + Sort *string `xml:"Sort"` + + // The sort order. + Order *MetaQueryOrderType `xml:"Order"` + + // The container that stores the information about aggregate operations. + Aggregations *MetaQueryAggregations `xml:"Aggregations"` + + // The pagination token used to obtain information in the next request. The object information is returned in alphabetical order starting from the value of NextToken. + NextToken *string `xml:"NextToken"` + + // The type of multimedia that you want to query. Valid values: image, video, audio, document + MediaType *string `xml:"MediaTypes>MediaType"` + + //The query conditions + SimpleQuery *string `xml:"SimpleQuery"` +} + +type MetaQueryStatus struct { + // The time when the metadata index library was created. The value follows the RFC 3339 standard in the YYYY-MM-DDTHH:mm:ss+TIMEZONE format. YYYY-MM-DD indicates the year, month, and day. T indicates the beginning of the time element. HH:mm:ss indicates the hour, minute, and second. TIMEZONE indicates the time zone. + CreateTime *string `xml:"CreateTime"` + + // The time when the metadata index library was updated. The value follows the RFC 3339 standard in the YYYY-MM-DDTHH:mm:ss+TIMEZONE format. YYYY-MM-DD indicates the year, month, and day. T indicates the beginning of the time element. HH:mm:ss indicates the hour, minute, and second. TIMEZONE indicates the time zone. + UpdateTime *string `xml:"UpdateTime"` + + // The status of the metadata index library. Valid values:- Ready: The metadata index library is being prepared after it is created.In this case, the metadata index library cannot be used to query data.- Stop: The metadata index library is paused.- Running: The metadata index library is running.- Retrying: The metadata index library failed to be created and is being created again.- Failed: The metadata index library failed to be created.- Deleted: The metadata index library is deleted. + State *string `xml:"State"` + + // The scan type. Valid values:- FullScanning: Full scanning is in progress.- IncrementalScanning: Incremental scanning is in progress. + Phase *string `xml:"Phase"` +} + +type MetaQueryTagging struct { + // The tag key. + Key *string `xml:"Key"` + + // The tag value. + Value *string `xml:"Value"` +} + +type GetMetaQueryStatusRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetMetaQueryStatusResult struct { + // The container that stores the metadata information. + MetaQueryStatus *MetaQueryStatus `output:"body,MetaQueryStatus,xml"` + + ResultCommon +} + +// GetMetaQueryStatus Queries the information about the metadata index library of a bucket. +func (c *Client) GetMetaQueryStatus(ctx context.Context, request *GetMetaQueryStatusRequest, optFns ...func(*Options)) (*GetMetaQueryStatusResult, error) { + var err error + if request == nil { + request = &GetMetaQueryStatusRequest{} + } + input := &OperationInput{ + OpName: "GetMetaQueryStatus", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "metaQuery": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"metaQuery"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetMetaQueryStatusResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type CloseMetaQueryRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type CloseMetaQueryResult struct { + ResultCommon +} + +// CloseMetaQuery Disables the metadata management feature for an Object Storage Service (OSS) bucket. After the metadata management feature is disabled for a bucket, OSS automatically deletes the metadata index library of the bucket and you cannot perform metadata indexing. +func (c *Client) CloseMetaQuery(ctx context.Context, request *CloseMetaQueryRequest, optFns ...func(*Options)) (*CloseMetaQueryResult, error) { + var err error + if request == nil { + request = &CloseMetaQueryRequest{} + } + input := &OperationInput{ + OpName: "CloseMetaQuery", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "comp": "delete", + "metaQuery": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"metaQuery", "comp"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CloseMetaQueryResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DoMetaQueryRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + Mode *string `input:"query,mode"` + + // The request body schema. + MetaQuery *MetaQuery `input:"body,MetaQuery,xml,required"` + + RequestCommon +} + +type DoMetaQueryResult struct { + // The token that is used for the next query when the total number of objects exceeds the value of MaxResults.The value of NextToken is used to return the unreturned results in the next query.This parameter has a value only when not all objects are returned. + NextToken *string `xml:"NextToken"` + + // The list of file information. + Files []MetaQueryFile `xml:"Files>File"` + + // The list of file information. + Aggregations []MetaQueryAggregation `xml:"Aggregations>Aggregation"` + + ResultCommon +} + +// DoMetaQuery Queries the objects in a bucket that meet the specified conditions by using the data indexing feature. The information about the objects is listed based on the specified fields and sorting methods. +func (c *Client) DoMetaQuery(ctx context.Context, request *DoMetaQueryRequest, optFns ...func(*Options)) (*DoMetaQueryResult, error) { + var err error + if request == nil { + request = &DoMetaQueryRequest{} + } + input := &OperationInput{ + OpName: "DoMetaQuery", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "comp": "query", + "metaQuery": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"metaQuery", "comp"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DoMetaQueryResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type OpenMetaQueryRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + Mode *string `input:"query,mode"` + + RequestCommon +} + +type OpenMetaQueryResult struct { + ResultCommon +} + +// OpenMetaQuery Enables metadata management for a bucket. After you enable the metadata management feature for a bucket, Object Storage Service (OSS) creates a metadata index library for the bucket and creates metadata indexes for all objects in the bucket. After the metadata index library is created, OSS continues to perform quasi-real-time scans on incremental objects in the bucket and creates metadata indexes for the incremental objects. +func (c *Client) OpenMetaQuery(ctx context.Context, request *OpenMetaQueryRequest, optFns ...func(*Options)) (*OpenMetaQueryResult, error) { + var err error + if request == nil { + request = &OpenMetaQueryRequest{} + } + input := &OperationInput{ + OpName: "OpenMetaQuery", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "comp": "add", + "metaQuery": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"metaQuery", "comp"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &OpenMetaQueryResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_objectfcaccesspoint.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_objectfcaccesspoint.go new file mode 100644 index 000000000..8f162a35c --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_objectfcaccesspoint.go @@ -0,0 +1,710 @@ +package oss + +import ( + "context" + "io" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type AccessPointsForObjectProcess struct { + // The container that stores information about a single Object FC Access Point. + AccessPointForObjectProcesss []AccessPointForObjectProcess `xml:"AccessPointForObjectProcess"` +} + +type TransformationConfiguration struct { + // The container that stores the operations. + Actions *AccessPointActions `xml:"Actions"` + + // The container that stores the content of the transformation configurations. + ContentTransformation *ContentTransformation `xml:"ContentTransformation"` +} + +type ObjectProcessConfiguration struct { + // Specifies that Function Compute supports Range GetObject requests. + AllowedFeatures []string `xml:"AllowedFeatures>AllowedFeature"` + + // The container that stores the transformation configurations. + TransformationConfigurations []TransformationConfiguration `xml:"TransformationConfigurations>TransformationConfiguration"` +} + +type CreateAccessPointForObjectProcessConfiguration struct { + // Whether allow anonymous user to access this FC Access Point. + AllowAnonymousAccessForObjectProcess *string `xml:"AllowAnonymousAccessForObjectProcess"` + + // The name of the access point. + AccessPointName *string `xml:"AccessPointName"` + + // The container that stores the processing information about the Object FC Access Point. + ObjectProcessConfiguration *ObjectProcessConfiguration `xml:"ObjectProcessConfiguration"` +} + +type AccessPointEndpoints struct { + // The internal endpoint of the Object FC Access Point. + InternalEndpoint *string `xml:"InternalEndpoint"` + + // The public endpoint of the Object FC Access Point. + PublicEndpoint *string `xml:"PublicEndpoint"` +} + +type AccessPointForObjectProcess struct { + // The status of the Object FC Access Point. Valid values:enable: The Object FC Access Point is created.disable: The Object FC Access Point is disabled.creating: The Object FC Access Point is being created.deleting: The Object FC Access Point is deleted. + Status *string `xml:"Status"` + + // Whether allow anonymous user access this FC Access Point. + AllowAnonymousAccessForObjectProcess *string `xml:"AllowAnonymousAccessForObjectProcess"` + + // The name of the Object FC Access Point. + AccessPointNameForObjectProcess *string `xml:"AccessPointNameForObjectProcess"` + + // The alias of the Object FC Access Point. + AccessPointForObjectProcessAlias *string `xml:"AccessPointForObjectProcessAlias"` + + // The name of the access point. + AccessPointName *string `xml:"AccessPointName"` +} + +type AccessPointActions struct { + // The supported OSS API operations. Only the GetObject operation is supported. + Actions []string `xml:"Action"` +} + +type CustomForwardHeaders struct { + CustomForwardHeaders []string `xml:"CustomForwardHeader"` +} + +type ContentTransformation struct { + // The Alibaba Cloud Resource Name (ARN) of the role that Function Compute uses to access your resources in other cloud services. The default role is AliyunFCDefaultRole. + FunctionAssumeRoleArn *string `xml:"FunctionCompute>FunctionAssumeRoleArn"` + + // The ARN of the function. For more information, + FunctionArn *string `xml:"FunctionCompute>FunctionArn"` + + //CustomForwardHeaders *CustomForwardHeaders `xml:"AdditionalFeatures>CustomForwardHeaders"` +} + +type PutAccessPointConfigForObjectProcessConfiguration struct { + // Whether allow anonymous user to access this FC Access Point. + AllowAnonymousAccessForObjectProcess *string `xml:"AllowAnonymousAccessForObjectProcess"` + + // The container in which the Block Public Access configurations are stored. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `xml:"PublicAccessBlockConfiguration"` + + // The container that stores the processing information about the Object FC Access Point. + ObjectProcessConfiguration *ObjectProcessConfiguration `xml:"ObjectProcessConfiguration"` +} + +type CreateAccessPointForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + // The request body. + CreateAccessPointForObjectProcessConfiguration *CreateAccessPointForObjectProcessConfiguration `input:"body,CreateAccessPointForObjectProcessConfiguration,xml,required"` + + RequestCommon +} + +type CreateAccessPointForObjectProcessResult struct { + // The ARN of the Object FC Access Point. + AccessPointForObjectProcessArn *string `xml:"AccessPointForObjectProcessArn"` + + // The alias of the Object FC Access Point. + AccessPointForObjectProcessAlias *string `xml:"AccessPointForObjectProcessAlias"` + + ResultCommon +} + +// CreateAccessPointForObjectProcess Creates an Object FC Access Point. +func (c *Client) CreateAccessPointForObjectProcess(ctx context.Context, request *CreateAccessPointForObjectProcessRequest, optFns ...func(*Options)) (*CreateAccessPointForObjectProcessResult, error) { + var err error + if request == nil { + request = &CreateAccessPointForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "CreateAccessPointForObjectProcess", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CreateAccessPointForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetAccessPointForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. The name of an Object FC Access Point must meet the following requirements:The name cannot exceed 63 characters in length.The name can contain only lowercase letters, digits, and hyphens (-) and cannot start or end with a hyphen (-).The name must be unique in the current region. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + RequestCommon +} + +type GetAccessPointForObjectProcessResult struct { + // The public endpoint of the Object FC Access Point. + AllowAnonymousAccessForObjectProcess *string `xml:"AllowAnonymousAccessForObjectProcess"` + + // The container in which the Block Public Access configurations are stored. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `xml:"PublicAccessBlockConfiguration"` + + // The internal endpoint of the Object FC Access Point. + AccessPointNameForObjectProcess *string `xml:"AccessPointNameForObjectProcess"` + + // The ARN of the Object FC Access Point. + AccessPointForObjectProcessArn *string `xml:"AccessPointForObjectProcessArn"` + + // The time when the Object FC Access Point was created. The value is a timestamp. + CreationDate *string `xml:"CreationDate"` + + // The status of the Object FC Access Point. Valid values:enable: The Object FC Access Point is created.disable: The Object FC Access Point is disabled.creating: The Object FC Access Point is being created.deleting: The Object FC Access Point is deleted. + AccessPointForObjectProcessStatus *string `xml:"Status"` + + // The container that stores the endpoints of the Object FC Access Point. + Endpoints *AccessPointEndpoints `xml:"Endpoints"` + + // The alias of the Object FC Access Point. + AccessPointForObjectProcessAlias *string `xml:"AccessPointForObjectProcessAlias"` + + // The public endpoint of the Object FC Access Point. + AccessPointName *string `xml:"AccessPointName"` + + // The public endpoint of the Object FC Access Point. + AccountId *string `xml:"AccountId"` + + ResultCommon +} + +// GetAccessPointForObjectProcess Queries basic information about an Object FC Access Point. +func (c *Client) GetAccessPointForObjectProcess(ctx context.Context, request *GetAccessPointForObjectProcessRequest, optFns ...func(*Options)) (*GetAccessPointForObjectProcessResult, error) { + var err error + if request == nil { + request = &GetAccessPointForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "GetAccessPointForObjectProcess", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetAccessPointForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListAccessPointsForObjectProcessRequest struct { + // The maximum number of Object FC Access Points to return.Valid values: 1 to 1000 If the list cannot be complete at a time due to the configurations of the max-keys element, the NextContinuationToken element is included in the response as the token for the next list. + MaxKeys int64 `input:"query,max-keys"` + + // The token from which the list operation must start. You can obtain this token from the NextContinuationToken element in the returned result. + ContinuationToken *string `input:"query,continuation-token"` + + RequestCommon +} + +type ListAccessPointsForObjectProcessResult struct { + // The container that stores information about all Object FC Access Points. + AccessPointsForObjectProcess *AccessPointsForObjectProcess `xml:"AccessPointsForObjectProcess"` + + // Indicates whether the returned results are truncated. Valid values:true: indicates that not all results are returned for the request.false: indicates that all results are returned for the request. + IsTruncated *bool `xml:"IsTruncated"` + + // The container that stores information about a single Object FC Access Point. + NextContinuationToken *string `xml:"NextContinuationToken"` + + // The UID of the Alibaba Cloud account to which the Object FC Access Points belong. + AccountId *string `xml:"AccountId"` + + ResultCommon +} + +// ListAccessPointsForObjectProcess Lists information about Object FC Access Points in an Alibaba Cloud account. +func (c *Client) ListAccessPointsForObjectProcess(ctx context.Context, request *ListAccessPointsForObjectProcessRequest, optFns ...func(*Options)) (*ListAccessPointsForObjectProcessResult, error) { + var err error + if request == nil { + request = &ListAccessPointsForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "ListAccessPointsForObjectProcess", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointForObjectProcess": "", + }, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListAccessPointsForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteAccessPointForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + RequestCommon +} + +type DeleteAccessPointForObjectProcessResult struct { + ResultCommon +} + +// DeleteAccessPointForObjectProcess Deletes an Object FC Access Point. +func (c *Client) DeleteAccessPointForObjectProcess(ctx context.Context, request *DeleteAccessPointForObjectProcessRequest, optFns ...func(*Options)) (*DeleteAccessPointForObjectProcessResult, error) { + var err error + if request == nil { + request = &DeleteAccessPointForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "DeleteAccessPointForObjectProcess", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteAccessPointForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetAccessPointConfigForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + RequestCommon +} + +type GetAccessPointConfigForObjectProcessResult struct { + // The container that stores the processing information about the Object FC Access Point. + ObjectProcessConfiguration *ObjectProcessConfiguration `xml:"ObjectProcessConfiguration"` + + // Whether allow anonymous user to access this FC Access Points. + AllowAnonymousAccessForObjectProcess *string `xml:"AllowAnonymousAccessForObjectProcess"` + + // The container in which the Block Public Access configurations are stored. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `xml:"PublicAccessBlockConfiguration"` + + ResultCommon +} + +// GetAccessPointConfigForObjectProcess Queries the configurations of an Object FC Access Point. +func (c *Client) GetAccessPointConfigForObjectProcess(ctx context.Context, request *GetAccessPointConfigForObjectProcessRequest, optFns ...func(*Options)) (*GetAccessPointConfigForObjectProcessResult, error) { + var err error + if request == nil { + request = &GetAccessPointConfigForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "GetAccessPointConfigForObjectProcess", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointConfigForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointConfigForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetAccessPointConfigForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutAccessPointConfigForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. The name of an Object FC Access Point must meet the following requirements:The name cannot exceed 63 characters in length.The name can contain only lowercase letters, digits, and hyphens (-) and cannot start or end with a hyphen (-).The name must be unique in the current region. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + // The request body. + PutAccessPointConfigForObjectProcessConfiguration *PutAccessPointConfigForObjectProcessConfiguration `input:"body,PutAccessPointConfigForObjectProcessConfiguration,xml,required"` + + RequestCommon +} + +type PutAccessPointConfigForObjectProcessResult struct { + ResultCommon +} + +// PutAccessPointConfigForObjectProcess Changes the configurations of an Object FC Access Point. +func (c *Client) PutAccessPointConfigForObjectProcess(ctx context.Context, request *PutAccessPointConfigForObjectProcessRequest, optFns ...func(*Options)) (*PutAccessPointConfigForObjectProcessResult, error) { + var err error + if request == nil { + request = &PutAccessPointConfigForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "PutAccessPointConfigForObjectProcess", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointConfigForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointConfigForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutAccessPointConfigForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutAccessPointPolicyForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + // The json format permission policies for an Object FC Access Point. + Body io.Reader `input:"body,nop,required"` + + RequestCommon +} + +type PutAccessPointPolicyForObjectProcessResult struct { + ResultCommon +} + +// PutAccessPointPolicyForObjectProcess Configures policies for an Object FC Access Point. +func (c *Client) PutAccessPointPolicyForObjectProcess(ctx context.Context, request *PutAccessPointPolicyForObjectProcessRequest, optFns ...func(*Options)) (*PutAccessPointPolicyForObjectProcessResult, error) { + var err error + if request == nil { + request = &PutAccessPointPolicyForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "PutAccessPointPolicyForObjectProcess", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointPolicyForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointPolicyForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutAccessPointPolicyForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetAccessPointPolicyForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + RequestCommon +} + +type GetAccessPointPolicyForObjectProcessResult struct { + // The configurations of the access point policy for object process. + Body string + + ResultCommon +} + +// GetAccessPointPolicyForObjectProcess Queries the policies of an Object FC Access Point. +func (c *Client) GetAccessPointPolicyForObjectProcess(ctx context.Context, request *GetAccessPointPolicyForObjectProcessRequest, optFns ...func(*Options)) (*GetAccessPointPolicyForObjectProcessResult, error) { + var err error + if request == nil { + request = &GetAccessPointPolicyForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "GetAccessPointPolicyForObjectProcess", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointPolicyForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointPolicyForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(output.Body) + defer output.Body.Close() + if err != nil { + return nil, err + } + result := &GetAccessPointPolicyForObjectProcessResult{ + Body: string(body), + } + + if err = c.unmarshalOutput(result, output); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteAccessPointPolicyForObjectProcessRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the Object FC Access Point. + AccessPointForObjectProcessName *string `input:"header,x-oss-access-point-for-object-process-name,required"` + + RequestCommon +} + +type DeleteAccessPointPolicyForObjectProcessResult struct { + ResultCommon +} + +// DeleteAccessPointPolicyForObjectProcess Deletes the policies of an Object FC Access Point. +func (c *Client) DeleteAccessPointPolicyForObjectProcess(ctx context.Context, request *DeleteAccessPointPolicyForObjectProcessRequest, optFns ...func(*Options)) (*DeleteAccessPointPolicyForObjectProcessResult, error) { + var err error + if request == nil { + request = &DeleteAccessPointPolicyForObjectProcessRequest{} + } + input := &OperationInput{ + OpName: "DeleteAccessPointPolicyForObjectProcess", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "accessPointPolicyForObjectProcess": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"accessPointPolicyForObjectProcess"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteAccessPointPolicyForObjectProcessResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type WriteGetObjectResponseRequest struct { + // The router forwarding address obtained from the event parameter of Function Compute. + RequestRoute *string `input:"header,x-oss-request-route,required"` + + // The unique forwarding token obtained from the event parameter of Function Compute. + RequestToken *string `input:"header,x-oss-request-token,required"` + + // The HTTP status code returned by the backend server. + FwdStatus *string `input:"header,x-oss-fwd-status,required"` + + // The HTTP response header returned by the backend server. It is used to specify the scope of the resources that you want to query. + FwdHeaderAcceptRanges *string `input:"header,x-oss-fwd-header-Accept-Ranges"` + + // The HTTP response header returned by the backend server. It is used to specify the resource cache method that the client uses. Valid values: no-cache, no-store, public, private, max-age + FwdHeaderCacheControl *string `input:"header,x-oss-fwd-header-Cache-Control"` + + FwdHeaderContentDisposition *string `input:"header,x-oss-fwd-header-Content-Disposition"` + + FwdHeaderContentEncoding *string `input:"header,x-oss-fwd-header-Content-Encoding"` + + FwdHeaderContentLanguage *string `input:"header,x-oss-fwd-header-Content-Language"` + + FwdHeaderContentRange *string `input:"header,x-oss-fwd-header-Content-Range"` + + // The HTTP response header returned by the backend server. It is used to specify the type of the received or sent data. + FwdHeaderContentType *string `input:"header,x-oss-fwd-header-Content-Type"` + + // The HTTP response header returned by the backend server. It uniquely identifies the object. + FwdHeaderEtag *string `input:"header,x-oss-fwd-header-ETag"` + + // The HTTP response header returned by the backend server. It specifies the absolute expiration time of the cache. + FwdHeaderExpires *string `input:"header,x-oss-fwd-header-Expires"` + + // The HTTP response header returned by the backend server. It specifies the time when the requested resource was last modified. + FwdHeaderLastModified *string `input:"header,x-oss-fwd-header-Last-Modified"` + + Body io.Reader `input:"body,nop"` + + RequestCommon +} + +type WriteGetObjectResponseResult struct { + ResultCommon +} + +// WriteGetObjectResponse Customize return data and response headers +func (c *Client) WriteGetObjectResponse(ctx context.Context, request *WriteGetObjectResponseRequest, optFns ...func(*Options)) (*WriteGetObjectResponseResult, error) { + var err error + if request == nil { + request = &WriteGetObjectResponseRequest{} + } + input := &OperationInput{ + OpName: "WriteGetObjectResponse", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "x-oss-write-get-object-response": "", + }, + } + + input.OpMetadata.Set(signer.SubResource, []string{"x-oss-write-get-object-response"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &WriteGetObjectResponseResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_policy.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_policy.go new file mode 100644 index 000000000..9d80157ec --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_policy.go @@ -0,0 +1,209 @@ +package oss + +import ( + "context" + "io" + "io/ioutil" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type PolicyStatus struct { + // Indicates whether the current bucket policy allows public access.true false + IsPublic *bool `xml:"IsPublic"` +} + +type PutBucketPolicyRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request parameters. + Body io.Reader `input:"body,nop,required"` + + RequestCommon +} + +type PutBucketPolicyResult struct { + ResultCommon +} + +// PutBucketPolicy Configures a policy for a bucket. +func (c *Client) PutBucketPolicy(ctx context.Context, request *PutBucketPolicyRequest, optFns ...func(*Options)) (*PutBucketPolicyResult, error) { + var err error + if request == nil { + request = &PutBucketPolicyRequest{} + } + input := &OperationInput{ + OpName: "PutBucketPolicy", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "policy": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"policy"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketPolicyResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketPolicyRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketPolicyResult struct { + // The configurations of the bucket policy. + Body string + + ResultCommon +} + +// GetBucketPolicy Queries the policies configured for a bucket. +func (c *Client) GetBucketPolicy(ctx context.Context, request *GetBucketPolicyRequest, optFns ...func(*Options)) (*GetBucketPolicyResult, error) { + var err error + if request == nil { + request = &GetBucketPolicyRequest{} + } + input := &OperationInput{ + OpName: "GetBucketPolicy", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "policy": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"policy"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + body, err := ioutil.ReadAll(output.Body) + defer output.Body.Close() + if err != nil { + return nil, err + } + result := &GetBucketPolicyResult{ + Body: string(body), + } + if err = c.unmarshalOutput(result, output); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type DeleteBucketPolicyRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketPolicyResult struct { + ResultCommon +} + +// DeleteBucketPolicy Deletes a policy for a bucket. +func (c *Client) DeleteBucketPolicy(ctx context.Context, request *DeleteBucketPolicyRequest, optFns ...func(*Options)) (*DeleteBucketPolicyResult, error) { + var err error + if request == nil { + request = &DeleteBucketPolicyRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketPolicy", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "policy": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"policy"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketPolicyResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketPolicyStatusRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketPolicyStatusResult struct { + // The container that stores public access information. + PolicyStatus *PolicyStatus `output:"body,PolicyStatus,xml"` + + ResultCommon +} + +// GetBucketPolicyStatus Checks whether the current bucket policy allows public access. +func (c *Client) GetBucketPolicyStatus(ctx context.Context, request *GetBucketPolicyStatusRequest, optFns ...func(*Options)) (*GetBucketPolicyStatusResult, error) { + var err error + if request == nil { + request = &GetBucketPolicyStatusRequest{} + } + input := &OperationInput{ + OpName: "GetBucketPolicyStatus", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "policyStatus": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"policyStatus"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketPolicyStatusResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_publicaccessblock.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_publicaccessblock.go new file mode 100644 index 000000000..e9aedd44e --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_publicaccessblock.go @@ -0,0 +1,154 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type GetBucketPublicAccessBlockRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketPublicAccessBlockResult struct { + // The container in which the Block Public Access configurations are stored. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `output:"body,PublicAccessBlockConfiguration,xml"` + + ResultCommon +} + +// GetBucketPublicAccessBlock Queries the Block Public Access configurations of a bucket. +func (c *Client) GetBucketPublicAccessBlock(ctx context.Context, request *GetBucketPublicAccessBlockRequest, optFns ...func(*Options)) (*GetBucketPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &GetBucketPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "GetBucketPublicAccessBlock", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutBucketPublicAccessBlockRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // Request body. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `input:"body,PublicAccessBlockConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketPublicAccessBlockResult struct { + ResultCommon +} + +// PutBucketPublicAccessBlock Enables or disables Block Public Access for a bucket. +func (c *Client) PutBucketPublicAccessBlock(ctx context.Context, request *PutBucketPublicAccessBlockRequest, optFns ...func(*Options)) (*PutBucketPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &PutBucketPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "PutBucketPublicAccessBlock", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteBucketPublicAccessBlockRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketPublicAccessBlockResult struct { + ResultCommon +} + +// DeleteBucketPublicAccessBlock Deletes the Block Public Access configurations of a bucket. +func (c *Client) DeleteBucketPublicAccessBlock(ctx context.Context, request *DeleteBucketPublicAccessBlockRequest, optFns ...func(*Options)) (*DeleteBucketPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &DeleteBucketPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketPublicAccessBlock", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_redundancytransition.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_redundancytransition.go new file mode 100644 index 000000000..d35c0cdc4 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_redundancytransition.go @@ -0,0 +1,310 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type BucketDataRedundancyTransition struct { + // The progress of the redundancy type change task in percentage. Valid values: 0 to 100. This element is available when the task is in the Processing or Finished state. + ProcessPercentage *int32 `xml:"ProcessPercentage"` + + // The estimated period of time that is required for the redundancy type change task. Unit: hours. This element is available when the task is in the Processing or Finished state. + EstimatedRemainingTime *int64 `xml:"EstimatedRemainingTime"` + + // The name of the bucket. + Bucket *string `xml:"Bucket"` + + // The ID of the redundancy type change task. + TaskId *string `xml:"TaskId"` + + // The state of the redundancy type change task. Valid values:QueueingProcessingFinished + Status *string `xml:"Status"` + + // The time when the redundancy type change task was created. + CreateTime *string `xml:"CreateTime"` + + // The time when the redundancy type change task was performed. This element is available when the task is in the Processing or Finished state. + StartTime *string `xml:"StartTime"` + + // The time when the redundancy type change task was finished. This element is available when the task is in the Finished state. + EndTime *string `xml:"EndTime"` +} + +type ListBucketDataRedundancyTransition struct { + // Indicates that this ListUserDataRedundancyTransition request contains subsequent results. + // You must set NextContinuationToken to continuation-token to continue obtaining the results. + NextContinuationToken *string `xml:"NextContinuationToken"` + + // The container in which the redundancy type conversion task is stored. + BucketDataRedundancyTransitions []BucketDataRedundancyTransition `xml:"BucketDataRedundancyTransition"` + + // Indicates whether the returned results are truncated. + // Valid values:true: indicates that not all results are returned for the request. + // false: indicates that all results are returned for the request. + IsTruncated *bool `xml:"IsTruncated"` +} + +type ListBucketDataRedundancyTransitionRequest struct { + // The name of the bucket + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type ListBucketDataRedundancyTransitionResult struct { + // The container for listed redundancy type change tasks. + ListBucketDataRedundancyTransition *ListBucketDataRedundancyTransition `output:"body,ListBucketDataRedundancyTransition,xml"` + + ResultCommon +} + +// ListBucketDataRedundancyTransition Lists all redundancy type conversion tasks of a bucket. +func (c *Client) ListBucketDataRedundancyTransition(ctx context.Context, request *ListBucketDataRedundancyTransitionRequest, optFns ...func(*Options)) (*ListBucketDataRedundancyTransitionResult, error) { + var err error + if request == nil { + request = &ListBucketDataRedundancyTransitionRequest{} + } + input := &OperationInput{ + OpName: "ListBucketDataRedundancyTransition", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "redundancyTransition": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"redundancyTransition"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListBucketDataRedundancyTransitionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketDataRedundancyTransitionRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The ID of the redundancy change task. + RedundancyTransitionTaskid *string `input:"query,x-oss-redundancy-transition-taskid,required"` + + RequestCommon +} + +type GetBucketDataRedundancyTransitionResult struct { + // The container for a specific redundancy type change task. + BucketDataRedundancyTransition *BucketDataRedundancyTransition `output:"body,BucketDataRedundancyTransition,xml"` + + ResultCommon +} + +// GetBucketDataRedundancyTransition Queries the redundancy type conversion tasks of a bucket. +func (c *Client) GetBucketDataRedundancyTransition(ctx context.Context, request *GetBucketDataRedundancyTransitionRequest, optFns ...func(*Options)) (*GetBucketDataRedundancyTransitionResult, error) { + var err error + if request == nil { + request = &GetBucketDataRedundancyTransitionRequest{} + } + input := &OperationInput{ + OpName: "GetBucketDataRedundancyTransition", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "redundancyTransition": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"redundancyTransition"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketDataRedundancyTransitionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type CreateBucketDataRedundancyTransitionRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The redundancy type to which you want to convert the bucket. You can only convert the redundancy type of a bucket from LRS to ZRS. + TargetRedundancyType *string `input:"query,x-oss-target-redundancy-type,required"` + + RequestCommon +} + +type CreateBucketDataRedundancyTransitionResult struct { + // The container in which the redundancy type conversion task is stored. + BucketDataRedundancyTransition *BucketDataRedundancyTransition `output:"body,BucketDataRedundancyTransition,xml"` + + ResultCommon +} + +// CreateBucketDataRedundancyTransition Creates a redundancy type conversion task for a bucket. +func (c *Client) CreateBucketDataRedundancyTransition(ctx context.Context, request *CreateBucketDataRedundancyTransitionRequest, optFns ...func(*Options)) (*CreateBucketDataRedundancyTransitionResult, error) { + var err error + if request == nil { + request = &CreateBucketDataRedundancyTransitionRequest{} + } + input := &OperationInput{ + OpName: "CreateBucketDataRedundancyTransition", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "redundancyTransition": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"redundancyTransition"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CreateBucketDataRedundancyTransitionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteBucketDataRedundancyTransitionRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The ID of the redundancy type change task. + RedundancyTransitionTaskid *string `input:"query,x-oss-redundancy-transition-taskid,required"` + + RequestCommon +} + +type DeleteBucketDataRedundancyTransitionResult struct { + ResultCommon +} + +// DeleteBucketDataRedundancyTransition Deletes a redundancy type conversion task of a bucket. +func (c *Client) DeleteBucketDataRedundancyTransition(ctx context.Context, request *DeleteBucketDataRedundancyTransitionRequest, optFns ...func(*Options)) (*DeleteBucketDataRedundancyTransitionResult, error) { + var err error + if request == nil { + request = &DeleteBucketDataRedundancyTransitionRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketDataRedundancyTransition", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "redundancyTransition": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"redundancyTransition"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketDataRedundancyTransitionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListUserDataRedundancyTransitionRequest struct { + // The token from which the list operation must start. + ContinuationToken *string `input:"query,continuation-token"` + + // The maximum number of redundancy type conversion tasks that can be returned. Valid values: 1 to 100. + MaxKeys int32 `input:"query,max-keys"` + + RequestCommon +} + +type ListUserDataRedundancyTransitionResult struct { + // The container in which the listed redundancy type conversion tasks are stored. + ListBucketDataRedundancyTransition *ListBucketDataRedundancyTransition `output:"body,ListBucketDataRedundancyTransition,xml"` + + ResultCommon +} + +// ListUserDataRedundancyTransition 列举请求者所有的存储冗余转换任务 +func (c *Client) ListUserDataRedundancyTransition(ctx context.Context, request *ListUserDataRedundancyTransitionRequest, optFns ...func(*Options)) (*ListUserDataRedundancyTransitionResult, error) { + var err error + if request == nil { + request = &ListUserDataRedundancyTransitionRequest{} + } + input := &OperationInput{ + OpName: "ListUserDataRedundancyTransition", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "redundancyTransition": "", + }, + } + + input.OpMetadata.Set(signer.SubResource, []string{"redundancyTransition"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListUserDataRedundancyTransitionResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_referer.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_referer.go new file mode 100644 index 000000000..e907e410d --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_referer.go @@ -0,0 +1,133 @@ +package oss + +import ( + "context" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type RefererList struct { + // The addresses in the Referer whitelist. + Referers []string `xml:"Referer"` +} + +type RefererBlacklist struct { + // The addresses in the Referer blacklist. + Referers []string `xml:"Referer"` +} + +type RefererConfiguration struct { + // Specifies whether to allow a request whose Referer field is empty. Valid values:* true (default)* false + AllowEmptyReferer *bool `xml:"AllowEmptyReferer"` + + // Specifies whether to truncate the query string in the URL when the Referer is matched. Valid values:* true (default)* false + AllowTruncateQueryString *bool `xml:"AllowTruncateQueryString"` + + // Specifies whether to truncate the path and parts that follow the path in the URL when the Referer is matched. Valid values:* true* false + TruncatePath *bool `xml:"TruncatePath"` + + // The container that stores the Referer whitelist. ****The PutBucketReferer operation overwrites the existing Referer whitelist with the Referer whitelist specified in RefererList. If RefererList is not specified in the request, which specifies that no Referer elements are included, the operation clears the existing Referer whitelist. + RefererList *RefererList `xml:"RefererList"` + + // The container that stores the Referer blacklist. + RefererBlacklist *RefererBlacklist `xml:"RefererBlacklist"` +} + +type PutBucketRefererRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + RefererConfiguration *RefererConfiguration `input:"body,RefererConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketRefererResult struct { + ResultCommon +} + +// PutBucketReferer Configures a Referer whitelist for an Object Storage Service (OSS) bucket. You can specify whether to allow the requests whose Referer field is empty or whose query strings are truncated. +func (c *Client) PutBucketReferer(ctx context.Context, request *PutBucketRefererRequest, optFns ...func(*Options)) (*PutBucketRefererResult, error) { + var err error + if request == nil { + request = &PutBucketRefererRequest{} + } + input := &OperationInput{ + OpName: "PutBucketReferer", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "referer": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"referer"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketRefererResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketRefererRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketRefererResult struct { + // The container that stores the hotlink protection configurations. + RefererConfiguration *RefererConfiguration `output:"body,RefererConfiguration,xml"` + + ResultCommon +} + +// GetBucketReferer Queries the hotlink protection configurations for a bucket. +func (c *Client) GetBucketReferer(ctx context.Context, request *GetBucketRefererRequest, optFns ...func(*Options)) (*GetBucketRefererResult, error) { + var err error + if request == nil { + request = &GetBucketRefererRequest{} + } + input := &OperationInput{ + OpName: "GetBucketReferer", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "referer": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"referer"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketRefererResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_replication.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_replication.go new file mode 100644 index 000000000..50df77fc3 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_replication.go @@ -0,0 +1,469 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type RtcConfiguration struct { + // The container that stores the status of RTC. + RTC *ReplicationTimeControl `xml:"RTC"` + + // The ID of the data replication rule for which you want to configure RTC. + ID *string `xml:"ID"` +} + +type ReplicationSourceSelectionCriteria struct { + // The container that is used to filter the source objects that are encrypted by using SSE-KMS. This parameter must be specified if the SourceSelectionCriteria parameter is specified in the data replication rule. + SseKmsEncryptedObjects *SseKmsEncryptedObjects `xml:"SseKmsEncryptedObjects"` +} + +type ReplicationPrefixSet struct { + // The prefix that is used to specify the object that you want to replicate. Only objects whose names contain the specified prefix are replicated to the destination bucket.* The value of the Prefix parameter can be up to 1,023 characters in length.* If you specify the Prefix parameter in a data replication rule, OSS synchronizes new data and historical data based on the value of the Prefix parameter. + Prefixs []string `xml:"Prefix"` +} + +type ReplicationProgressRule struct { + // The container that stores the information about the destination bucket. + Destination *ReplicationDestination `xml:"Destination"` + + // The status of the data replication task. Valid values:* starting: OSS creates a data replication task after a data replication rule is configured.* doing: The replication rule is effective and the replication task is in progress.* closing: OSS clears a data replication task after the corresponding data replication rule is deleted. + Status *string `xml:"Status"` + + // Specifies whether to replicate historical data that exists before data replication is enabled from the source bucket to the destination bucket.* enabled (default): replicates historical data to the destination bucket.* disabled: ignores historical data and replicates only data uploaded to the source bucket after data replication is enabled for the source bucket. + HistoricalObjectReplication *string `xml:"HistoricalObjectReplication"` + + // The container that stores the progress of the data replication task. This parameter is returned only when the data replication task is in the doing state. + Progress *ReplicationProgressInformation `xml:"Progress"` + + // The ID of the data replication rule. + ID *string `xml:"ID"` + + // The container that stores prefixes. You can specify up to 10 prefixes in each data replication rule. + PrefixSet *ReplicationPrefixSet `xml:"PrefixSet"` + + // The operations that are synchronized to the destination bucket.* ALL: PUT, DELETE, and ABORT operations are synchronized to the destination bucket.* PUT: Write operations are synchronized to the destination bucket, including PutObject, PostObject, AppendObject, CopyObject, PutObjectACL, InitiateMultipartUpload, UploadPart, UploadPartCopy, and CompleteMultipartUpload. + Action *string `xml:"Action"` +} + +type ReplicationDestination struct { + // The destination bucket to which data is replicated. + Bucket *string `xml:"Bucket"` + + // The region in which the destination bucket is located. + Location *string `xml:"Location"` + + // The link that is used to transfer data during data replication. Valid values:* internal (default): the default data transfer link used in OSS.* oss_acc: the transfer acceleration link. You can set TransferType to oss_acc only when you create CRR rules. + TransferType TransferTypeType `xml:"TransferType"` +} + +type SseKmsEncryptedObjects struct { + // Specifies whether to replicate objects that are encrypted by using SSE-KMS. Valid values:* Enabled* Disabled + Status StatusType `xml:"Status"` +} + +type LocationTransferType struct { + // The regions in which the destination bucket can be located. + Location *string `xml:"Location"` + + // The container that stores the transfer type. + TransferTypes *TransferTypes `xml:"TransferTypes"` +} + +type ReplicationTimeControl struct { + // Specifies whether to enable RTC.Valid values:* disabled * enabled + Status *string `xml:"Status"` +} + +type ReplicationRule struct { + // The container that stores the information about the destination bucket. + Destination *ReplicationDestination `xml:"Destination"` + + // The role that you want to authorize OSS to use to replicate data. If you want to use SSE-KMS to encrypt the objects that are replicated to the destination bucket, you must specify this parameter. + SyncRole *string `xml:"SyncRole"` + + // The container that specifies other conditions used to filter the source objects that you want to replicate. Filter conditions can be specified only for source objects encrypted by using SSE-KMS. + SourceSelectionCriteria *ReplicationSourceSelectionCriteria `xml:"SourceSelectionCriteria"` + + // The encryption configuration for the objects replicated to the destination bucket. If the Status parameter is set to Enabled, you must specify this parameter. + EncryptionConfiguration *ReplicationEncryptionConfiguration `xml:"EncryptionConfiguration"` + + // Specifies whether to replicate historical data that exists before data replication is enabled from the source bucket to the destination bucket. Valid values:* enabled (default): replicates historical data to the destination bucket.* disabled: does not replicate historical data to the destination bucket. Only data uploaded to the source bucket after data replication is enabled for the source bucket is replicated. + HistoricalObjectReplication HistoricalObjectReplicationType `xml:"HistoricalObjectReplication"` + + // The container that stores the status of the RTC feature. + RTC *ReplicationTimeControl `xml:"RTC"` + + // The ID of the rule. + ID *string `xml:"ID"` + + // The container that stores prefixes. You can specify up to 10 prefixes in each data replication rule. + PrefixSet *ReplicationPrefixSet `xml:"PrefixSet"` + + // The operations that can be synchronized to the destination bucket. If you configure Action in a data replication rule, OSS synchronizes new data and historical data based on the specified value of Action. You can set Action to one or more of the following operation types. Valid values:* ALL (default): PUT, DELETE, and ABORT operations are synchronized to the destination bucket.* PUT: Write operations are synchronized to the destination bucket, including PutObject, PostObject, AppendObject, CopyObject, PutObjectACL, InitiateMultipartUpload, UploadPart, UploadPartCopy, and CompleteMultipartUpload. + Action *string `xml:"Action"` + + // The status of the data replication task. Valid values:* starting: OSS creates a data replication task after a data replication rule is configured.* doing: The replication rule is effective and the replication task is in progress.* closing: OSS clears a data replication task after the corresponding data replication rule is deleted. + Status *string `xml:"Status"` +} + +type ReplicationConfiguration struct { + // The container that stores the data replication rules. + Rules []ReplicationRule `xml:"Rule"` +} + +type LocationTransferTypeConstraint struct { + // The container that stores regions in which the destination bucket can be located with the TransferType information. + LocationTransferTypes []LocationTransferType `xml:"LocationTransferType"` +} + +type LocationRTCConstraint struct { + // The regions where RTC is supported. + Locations []string `xml:"Location"` +} + +type ReplicationLocation struct { + // The regions in which the destination bucket can be located. + Locations []string `xml:"Location"` + + // The container that stores regions in which the destination bucket can be located with TransferType specified. + LocationTransferTypeConstraint *LocationTransferTypeConstraint `xml:"LocationTransferTypeConstraint"` + + // The container that stores regions in which the RTC can be enabled. + LocationRTCConstraint *LocationRTCConstraint `xml:"LocationRTCConstraint"` +} + +type ReplicationProgress struct { + // The container that stores the progress of the data replication task corresponding to each data replication rule. + Rules []ReplicationProgressRule `xml:"Rule"` +} + +type ReplicationEncryptionConfiguration struct { + ReplicaKmsKeyID *string `xml:"ReplicaKmsKeyID"` +} + +type TransferTypes struct { + // The data transfer type that is used to transfer data in data replication. Valid values:* internal (default): the default data transfer link used in OSS.* oss_acc: the link in which data transmission is accelerated. You can set TransferType to oss_acc only when you create CRR rules. + Types []string `xml:"Type"` +} + +type ReplicationProgressInformation struct { + // The percentage of the replicated historical data. This parameter is valid only when HistoricalObjectReplication is set to enabled. + HistoricalObject *string `xml:"HistoricalObject"` + + // The time used to determine whether data is replicated to the destination bucket. Data that is written to the source bucket before the time is replicated to the destination bucket. The value of this parameter is in the GMT format. Example: Thu, 24 Sep 2015 15:39:18 GMT. + NewObject *string `xml:"NewObject"` +} + +type PutBucketRtcRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The container of the request body. + RtcConfiguration *RtcConfiguration `input:"body,ReplicationRule,xml,required"` + + RequestCommon +} + +type PutBucketRtcResult struct { + ResultCommon +} + +// PutBucketRtc Enables or disables the Replication Time Control (RTC) feature for existing cross-region replication (CRR) rules. +func (c *Client) PutBucketRtc(ctx context.Context, request *PutBucketRtcRequest, optFns ...func(*Options)) (*PutBucketRtcResult, error) { + var err error + if request == nil { + request = &PutBucketRtcRequest{} + } + input := &OperationInput{ + OpName: "PutBucketRtc", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "rtc": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"rtc"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketRtcResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutBucketReplicationRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The container of the request body. + ReplicationConfiguration *ReplicationConfiguration `input:"body,ReplicationConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketReplicationResult struct { + ReplicationRuleId *string `output:"header,x-oss-replication-rule-id"` + + ResultCommon +} + +// PutBucketReplication Configures data replication rules for a bucket. Object Storage Service (OSS) supports cross-region replication (CRR) and same-region replication (SRR). +func (c *Client) PutBucketReplication(ctx context.Context, request *PutBucketReplicationRequest, optFns ...func(*Options)) (*PutBucketReplicationResult, error) { + var err error + if request == nil { + request = &PutBucketReplicationRequest{} + } + input := &OperationInput{ + OpName: "PutBucketReplication", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "comp": "add", + "replication": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"replication", "comp"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketReplicationResult{} + + if err = c.unmarshalOutput(result, output, unmarshalHeader, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketReplicationRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketReplicationResult struct { + // The container that stores data replication configurations. + ReplicationConfiguration *ReplicationConfiguration `output:"body,ReplicationConfiguration,xml"` + + ResultCommon +} + +// GetBucketReplication Queries the data replication rules configured for a bucket. +func (c *Client) GetBucketReplication(ctx context.Context, request *GetBucketReplicationRequest, optFns ...func(*Options)) (*GetBucketReplicationResult, error) { + var err error + if request == nil { + request = &GetBucketReplicationRequest{} + } + input := &OperationInput{ + OpName: "GetBucketReplication", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "replication": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"replication"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketReplicationResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketReplicationLocationRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketReplicationLocationResult struct { + // The container that stores the region in which the destination bucket can be located. + ReplicationLocation *ReplicationLocation `output:"body,ReplicationLocation,xml"` + + ResultCommon +} + +// GetBucketReplicationLocation Queries the regions in which available destination buckets reside. You can determine the region of the destination bucket to which the data in the source bucket are replicated based on the returned response. +func (c *Client) GetBucketReplicationLocation(ctx context.Context, request *GetBucketReplicationLocationRequest, optFns ...func(*Options)) (*GetBucketReplicationLocationResult, error) { + var err error + if request == nil { + request = &GetBucketReplicationLocationRequest{} + } + input := &OperationInput{ + OpName: "GetBucketReplicationLocation", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "replicationLocation": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"replicationLocation"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketReplicationLocationResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketReplicationProgressRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The ID of the data replication rule. You can call the GetBucketReplication operation to query the ID. + RuleId *string `input:"query,rule-id,required"` + + RequestCommon +} + +type GetBucketReplicationProgressResult struct { + // The container that is used to store the progress of data replication tasks. + ReplicationProgress *ReplicationProgress `output:"body,ReplicationProgress,xml"` + + ResultCommon +} + +// GetBucketReplicationProgress Queries the information about the data replication process of a bucket. +func (c *Client) GetBucketReplicationProgress(ctx context.Context, request *GetBucketReplicationProgressRequest, optFns ...func(*Options)) (*GetBucketReplicationProgressResult, error) { + var err error + if request == nil { + request = &GetBucketReplicationProgressRequest{} + } + input := &OperationInput{ + OpName: "GetBucketReplicationProgress", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "replicationProgress": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"replicationProgress"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketReplicationProgressResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ReplicationRules struct { + // The ID of data replication rules that you want to delete. You can call the GetBucketReplication operation to obtain the ID. + IDs []string `xml:"ID"` +} + +type DeleteBucketReplicationRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The container of the request body. + ReplicationRules *ReplicationRules `input:"body,ReplicationRules,xml,required"` + + RequestCommon +} + +type DeleteBucketReplicationResult struct { + ResultCommon +} + +// DeleteBucketReplication Disables data replication for a bucket and deletes the data replication rule configured for the bucket. After you call this operation, all operations performed on the source bucket are not synchronized to the destination bucket. +func (c *Client) DeleteBucketReplication(ctx context.Context, request *DeleteBucketReplicationRequest, optFns ...func(*Options)) (*DeleteBucketReplicationResult, error) { + var err error + if request == nil { + request = &DeleteBucketReplicationRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketReplication", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "comp": "delete", + "replication": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"comp", "replication"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteBucketReplicationResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_resourcegroup.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_resourcegroup.go new file mode 100644 index 000000000..8abc9ec28 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_resourcegroup.go @@ -0,0 +1,104 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type BucketResourceGroupConfiguration struct { + // The ID of the resource group to which the bucket belongs. + ResourceGroupId *string `xml:"ResourceGroupId"` +} + +type GetBucketResourceGroupRequest struct { + // The name of the bucket that you want to query. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketResourceGroupResult struct { + // The container that stores the ID of the resource group. + BucketResourceGroupConfiguration *BucketResourceGroupConfiguration `output:"body,BucketResourceGroupConfiguration,xml"` + + ResultCommon +} + +// GetBucketResourceGroup Queries the ID of the resource group to which a bucket belongs. +func (c *Client) GetBucketResourceGroup(ctx context.Context, request *GetBucketResourceGroupRequest, optFns ...func(*Options)) (*GetBucketResourceGroupResult, error) { + var err error + if request == nil { + request = &GetBucketResourceGroupRequest{} + } + input := &OperationInput{ + OpName: "GetBucketResourceGroup", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "resourceGroup": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"resourceGroup"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketResourceGroupResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type PutBucketResourceGroupRequest struct { + // The bucket for which you want to modify the ID of the resource group. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + BucketResourceGroupConfiguration *BucketResourceGroupConfiguration `input:"body,BucketResourceGroupConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketResourceGroupResult struct { + ResultCommon +} + +// PutBucketResourceGroup Modifies the ID of the resource group to which a bucket belongs. +func (c *Client) PutBucketResourceGroup(ctx context.Context, request *PutBucketResourceGroupRequest, optFns ...func(*Options)) (*PutBucketResourceGroupResult, error) { + var err error + if request == nil { + request = &PutBucketResourceGroupRequest{} + } + input := &OperationInput{ + OpName: "PutBucketResourceGroup", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "resourceGroup": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"resourceGroup"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &PutBucketResourceGroupResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_style.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_style.go new file mode 100644 index 000000000..e0fa5e811 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_style.go @@ -0,0 +1,244 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type StyleList struct { + // The list of styles. + Styles []StyleInfo `xml:"Style"` +} + +type StyleInfo struct { + // The time when the style was created. + CreateTime *string `xml:"CreateTime"` + + // The time when the style was last modified. + LastModifyTime *string `xml:"LastModifyTime"` + + // The category of this style。 Invalid value:image、document、video。 + Category *string `xml:"Category"` + + // The style name. + Name *string `xml:"Name"` + + // The content of the style. + Content *string `xml:"Content"` +} + +type StyleContent struct { + // The content of the style. + Content *string `xml:"Content"` +} + +type PutStyleRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the image style. + StyleName *string `input:"query,styleName,required"` + + // The category of the style. + Category *string `input:"query,category"` + + // The container that stores the content information about the image style. + Style *StyleContent `input:"body,Style,xml,required"` + + RequestCommon +} + +type PutStyleResult struct { + ResultCommon +} + +// PutStyle Adds an image style to a bucket. An image style contains one or more image processing parameters. +func (c *Client) PutStyle(ctx context.Context, request *PutStyleRequest, optFns ...func(*Options)) (*PutStyleResult, error) { + var err error + if request == nil { + request = &PutStyleRequest{} + } + input := &OperationInput{ + OpName: "PutStyle", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "style": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"style", "styleName"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutStyleResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListStyleRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type ListStyleResult struct { + + // The container that was used to query the information about image styles. + StyleList *StyleList `output:"body,StyleList,xml"` + + ResultCommon +} + +// ListStyle Queries all image styles that are created for a bucket. +func (c *Client) ListStyle(ctx context.Context, request *ListStyleRequest, optFns ...func(*Options)) (*ListStyleResult, error) { + var err error + if request == nil { + request = &ListStyleRequest{} + } + input := &OperationInput{ + OpName: "ListStyle", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "style": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"style"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListStyleResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetStyleRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the image style. + StyleName *string `input:"query,styleName,required"` + + RequestCommon +} + +type GetStyleResult struct { + // The container that stores the information about the image style. + Style *StyleInfo `output:"body,Style,xml"` + + ResultCommon +} + +// GetStyle Queries the information about an image style of a bucket. +func (c *Client) GetStyle(ctx context.Context, request *GetStyleRequest, optFns ...func(*Options)) (*GetStyleResult, error) { + var err error + if request == nil { + request = &GetStyleRequest{} + } + input := &OperationInput{ + OpName: "GetStyle", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "style": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"style", "styleName"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetStyleResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteStyleRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the image style. + StyleName *string `input:"query,styleName,required"` + + RequestCommon +} + +type DeleteStyleResult struct { + ResultCommon +} + +// DeleteStyle Deletes an image style from a bucket. +func (c *Client) DeleteStyle(ctx context.Context, request *DeleteStyleRequest, optFns ...func(*Options)) (*DeleteStyleResult, error) { + var err error + if request == nil { + request = &DeleteStyleRequest{} + } + input := &OperationInput{ + OpName: "DeleteStyle", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "style": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"style", "styleName"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteStyleResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_tags.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_tags.go new file mode 100644 index 000000000..17c6ade86 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_tags.go @@ -0,0 +1,146 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type PutBucketTagsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + Tagging *Tagging `input:"body,Tagging,xml,required"` + + RequestCommon +} + +type PutBucketTagsResult struct { + ResultCommon +} + +// PutBucketTags Adds tags to or modifies the existing tags of a bucket. +func (c *Client) PutBucketTags(ctx context.Context, request *PutBucketTagsRequest, optFns ...func(*Options)) (*PutBucketTagsResult, error) { + var err error + if request == nil { + request = &PutBucketTagsRequest{} + } + input := &OperationInput{ + OpName: "PutBucketTags", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "tagging": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"tagging"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &PutBucketTagsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketTagsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketTagsResult struct { + // The container that stores the returned tags of the bucket. If no tags are configured for the bucket, an XML message body is returned in which the Tagging element is empty. + Tagging *Tagging `output:"body,Tagging,xml"` + + ResultCommon +} + +// GetBucketTags Queries the tags of a bucket. +func (c *Client) GetBucketTags(ctx context.Context, request *GetBucketTagsRequest, optFns ...func(*Options)) (*GetBucketTagsResult, error) { + var err error + if request == nil { + request = &GetBucketTagsRequest{} + } + input := &OperationInput{ + OpName: "GetBucketTags", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "tagging": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"tagging"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketTagsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type DeleteBucketTagsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + Tagging *string `input:"query,tagging"` + + RequestCommon +} + +type DeleteBucketTagsResult struct { + ResultCommon +} + +// DeleteBucketTags Deletes tags configured for a bucket. +func (c *Client) DeleteBucketTags(ctx context.Context, request *DeleteBucketTagsRequest, optFns ...func(*Options)) (*DeleteBucketTagsResult, error) { + var err error + if request == nil { + request = &DeleteBucketTagsRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketTags", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "tagging": "", + }, + Bucket: request.Bucket, + } + + input.OpMetadata.Set(signer.SubResource, []string{"tagging"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &DeleteBucketTagsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_transferacceleration.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_transferacceleration.go new file mode 100644 index 000000000..e2fdb2d33 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_transferacceleration.go @@ -0,0 +1,113 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type TransferAccelerationConfiguration struct { + // Whether the transfer acceleration is enabled for this bucket. + Enabled *bool `xml:"Enabled"` +} + +type PutBucketTransferAccelerationRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The container of the request body. + TransferAccelerationConfiguration *TransferAccelerationConfiguration `input:"body,TransferAccelerationConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketTransferAccelerationResult struct { + ResultCommon +} + +// PutBucketTransferAcceleration Configures transfer acceleration for a bucket. After you enable transfer acceleration for a bucket, the object access speed is accelerated for users worldwide. The transfer acceleration feature is applicable to scenarios where data needs to be transferred over long geographical distances. This feature can also be used to download or upload objects that are gigabytes or terabytes in size. +func (c *Client) PutBucketTransferAcceleration(ctx context.Context, request *PutBucketTransferAccelerationRequest, optFns ...func(*Options)) (*PutBucketTransferAccelerationResult, error) { + var err error + if request == nil { + request = &PutBucketTransferAccelerationRequest{} + } + input := &OperationInput{ + OpName: "PutBucketTransferAcceleration", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "transferAcceleration": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"transferAcceleration"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutBucketTransferAccelerationResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetBucketTransferAccelerationRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketTransferAccelerationResult struct { + + // The container that stores the transfer acceleration configurations. + TransferAccelerationConfiguration *TransferAccelerationConfiguration `output:"body,TransferAccelerationConfiguration,xml"` + + ResultCommon +} + +// GetBucketTransferAcceleration Queries the transfer acceleration configurations of a bucket. +func (c *Client) GetBucketTransferAcceleration(ctx context.Context, request *GetBucketTransferAccelerationRequest, optFns ...func(*Options)) (*GetBucketTransferAccelerationResult, error) { + var err error + if request == nil { + request = &GetBucketTransferAccelerationRequest{} + } + input := &OperationInput{ + OpName: "GetBucketTransferAcceleration", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "transferAcceleration": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"transferAcceleration"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketTransferAccelerationResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_website.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_website.go new file mode 100644 index 000000000..4a80ae9f5 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_website.go @@ -0,0 +1,280 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type MirrorHeaders struct { + // Specifies whether to pass through all request headers other than the following headers to the origin. This parameter takes effect only when the value of RedirectType is Mirror.* Headers such as content-length, authorization2, authorization, range, and date* Headers that start with oss-, x-oss-, and x-drs-Default value: false.Valid values:* true * false + PassAll *bool `xml:"PassAll"` + + // The headers to pass through to the origin. This parameter takes effect only when the value of RedirectType is Mirror. Each specified header can be up to 1,024 bytes in length and can contain only letters, digits, and hyphens (-). You can specify up to 10 headers. + Passs []string `xml:"Pass"` + + // The headers that are not allowed to pass through to the origin. This parameter takes effect only when the value of RedirectType is Mirror. Each header can be up to 1,024 bytes in length and can contain only letters, digits, and hyphens (-). You can specify up to 10 headers. This parameter is used together with PassAll. + Removes []string `xml:"Remove"` + + // The headers that are sent to the origin. The specified headers are configured in the data returned by the origin regardless of whether the headers are contained in the request. This parameter takes effect only when the value of RedirectType is Mirror. You can specify up to 10 headers. + Sets []MirrorHeadersSet `xml:"Set"` +} + +type RoutingRule struct { + // The sequence number that is used to match and run the redirection rules. OSS matches redirection rules based on this parameter. If a match succeeds, only the rule is run and the subsequent rules are not run. This parameter must be specified if RoutingRule is specified. + RuleNumber *int64 `xml:"RuleNumber"` + + // The matching condition. If all of the specified conditions are met, the rule is run. A rule is considered matched only when the rule meets the conditions that are specified by all nodes in Condition. This parameter must be specified if RoutingRule is specified. + Condition *RoutingRuleCondition `xml:"Condition"` + + // The operation to perform after the rule is matched. This parameter must be specified if RoutingRule is specified. + Redirect *RoutingRuleRedirect `xml:"Redirect"` +} + +type WebsiteConfiguration struct { + // The container that stores the default homepage. You must specify at least one of the following containers: IndexDocument, ErrorDocument, and RoutingRules. + IndexDocument *IndexDocument `xml:"IndexDocument"` + + // The container that stores the default 404 page. You must specify at least one of the following containers: IndexDocument, ErrorDocument, and RoutingRules. + ErrorDocument *ErrorDocument `xml:"ErrorDocument"` + + // The container that stores the redirection rules. You must specify at least one of the following containers: IndexDocument, ErrorDocument, and RoutingRules. + RoutingRules *RoutingRules `xml:"RoutingRules"` +} + +type IndexDocument struct { + // The default homepage. + Suffix *string `xml:"Suffix"` + + // Specifies whether to redirect the access to the default homepage of the subdirectory when the subdirectory is accessed. Valid values:* **true**: The access is redirected to the default homepage of the subdirectory.* **false** (default): The access is redirected to the default homepage of the root directory.For example, the default homepage is set to index.html, and `bucket.oss-cn-hangzhou.aliyuncs.com/subdir/` is the site that you want to access. If **SupportSubDir** is set to false, the access is redirected to `bucket.oss-cn-hangzhou.aliyuncs.com/index.html`. If **SupportSubDir** is set to true, the access is redirected to `bucket.oss-cn-hangzhou.aliyuncs.com/subdir/index.html`. + SupportSubDir *bool `xml:"SupportSubDir"` + + // The operation to perform when the default homepage is set, the name of the accessed object does not end with a forward slash (/), and the object does not exist. This parameter takes effect only when **SupportSubDir** is set to true. It takes effect after RoutingRule but before ErrorFile. For example, the default homepage is set to index.html, `bucket.oss-cn-hangzhou.aliyuncs.com/abc` is the site that you want to access, and the abc object does not exist. In this case, different operations are performed based on the value of **Type**.* **0** (default): OSS checks whether the object named abc/index.html, which is in the `Object + Forward slash (/) + Homepage` format, exists. If the object exists, OSS returns HTTP status code 302 and the Location header value that contains URL-encoded `/abc/`. The URL-encoded /abc/ is in the `Forward slash (/) + Object + Forward slash (/)` format. If the object does not exist, OSS returns HTTP status code 404 and continues to check ErrorFile.* **1**: OSS returns HTTP status code 404 and the NoSuchKey error code and continues to check ErrorFile.* **2**: OSS checks whether abc/index.html exists. If abc/index.html exists, the content of the object is returned. If abc/index.html does not exist, OSS returns HTTP status code 404 and continues to check ErrorFile. + Type *int64 `xml:"Type"` +} + +type ErrorDocument struct { + // The error page. + Key *string `xml:"Key"` + + // The HTTP status code returned with the error page. + HttpStatus *int64 `xml:"HttpStatus"` +} + +type RoutingRuleIncludeHeader struct { + // The key of the header. The rule is matched only when the specified header is included in the request and the header value equals the value specified by Equals. + Key *string `xml:"Key"` + + // The value of the header. The rule is matched only when the header specified by Key is included in the request and the header value equals the specified value. + Equals *string `xml:"Equals"` +} + +type RoutingRuleCondition struct { + // The prefix of object names. Only objects whose names contain the specified prefix match the rule. + KeyPrefixEquals *string `xml:"KeyPrefixEquals"` + + // Only objects that match this suffix can match this rule. + KeySuffixEquals *string `xml:"KeySuffixEquals"` + + // The HTTP status code. The rule is matched only when the specified object is accessed and the specified HTTP status code is returned. If the redirection rule is the mirroring-based back-to-origin rule, the value of this parameter is 404. + HttpErrorCodeReturnedEquals *int64 `xml:"HttpErrorCodeReturnedEquals"` + + // This rule can only be matched if the request contains the specified header and the value is the specified value. This container can specify up to 10. + IncludeHeaders []RoutingRuleIncludeHeader `xml:"IncludeHeader"` +} + +type MirrorHeadersSet struct { + // The key of the header. The key can be up to 1,024 bytes in length and can contain only letters, digits, and hyphens (-). This parameter takes effect only when the value of RedirectType is Mirror. This parameter must be specified if Set is specified. + Key *string `xml:"Key"` + + // The value of the header. The value can be up to 1,024 bytes in length and cannot contain `\r\n`. This parameter takes effect only when the value of RedirectType is Mirror. This parameter must be specified if Set is specified. + Value *string `xml:"Value"` +} + +type RoutingRuleRedirect struct { + // The origin URL for mirroring-based back-to-origin. This parameter takes effect only when the value of RedirectType is Mirror. The origin URL must start with \*\*http://** or **https://\*\* and end with a forward slash (/). OSS adds an object name to the end of the URL to generate a back-to-origin URL. For example, the name of the object to access is myobject. If MirrorURL is set to `http://example.com/`, the back-to-origin URL is `http://example.com/myobject`. If MirrorURL is set to `http://example.com/dir1/`, the back-to-origin URL is `http://example.com/dir1/myobject`. This parameter must be specified if RedirectType is set to Mirror.Valid values:* true * false + MirrorURL *string `xml:"MirrorURL"` + + // Specifies whether to redirect the access to the address specified by Location if the origin returns an HTTP 3xx status code. This parameter takes effect only when the value of RedirectType is Mirror. For example, when a mirroring-based back-to-origin request is initiated, the origin returns 302 and Location is specified.* If you set MirrorFollowRedirect to true, OSS continues requesting the resource at the address specified by Location. The access can be redirected up to 10 times. If the access is redirected more than 10 times, the mirroring-based back-to-origin request fails.* If you set MirrorFollowRedirect to false, OSS returns 302 and passes through Location.Default value: true. + MirrorFollowRedirect *bool `xml:"MirrorFollowRedirect"` + + // If this parameter is set to true, the prefix of the object names is replaced with the value specified by ReplaceKeyPrefixWith. If this parameter is not specified or empty, the prefix of object names is truncated. When the ReplaceKeyWith parameter is not empty, the EnableReplacePrefix parameter cannot be set to true.Default value: false. + EnableReplacePrefix *bool `xml:"EnableReplacePrefix"` + + // The string that is used to replace the requested object name when the request is redirected. This parameter can be set to the ${key} variable, which indicates the object name in the request. For example, if ReplaceKeyWith is set to `prefix/${key}.suffix` and the object to access is test, the value of the Location header is `http://example.com/prefix/test.suffix`. + ReplaceKeyWith *string `xml:"ReplaceKeyWith"` + + // The domain name used for redirection. The domain name must comply with the domain naming rules. For example, if you access an object named test, Protocol is set to https, and Hostname is set to `example.com`, the value of the Location header is `https://example.com/test`. + HostName *string `xml:"HostName"` + + // Specifies whether to include parameters of the original request in the redirection request when the system runs the redirection rule or mirroring-based back-to-origin rule. For example, if the **PassQueryString** parameter is set to true, the `?a=b&c=d` parameter string is included in a request sent to OSS, and the redirection mode is 302, this parameter is added to the Location header. For example, if the request is `Location:example.com?a=b&c=d` and the redirection type is mirroring-based back-to-origin, the ?a=b\&c=d parameter string is also included in the back-to-origin request. Valid values: true and false (default). + PassQueryString *bool `xml:"PassQueryString"` + + // The headers contained in the response that is returned when you use mirroring-based back-to-origin. This parameter takes effect only when the value of RedirectType is Mirror. + MirrorHeaders *MirrorHeaders `xml:"MirrorHeaders"` + + // The string that is used to replace the prefix of the object name during redirection. If the prefix of an object name is empty, the string precedes the object name. You can specify only one of the ReplaceKeyWith and ReplaceKeyPrefixWith parameters in a rule. For example, if you access an object named abc/test.txt, KeyPrefixEquals is set to abc/, ReplaceKeyPrefixWith is set to def/, the value of the Location header is `http://example.com/def/test.txt`. + ReplaceKeyPrefixWith *string `xml:"ReplaceKeyPrefixWith"` + + // The redirection type. Valid values:* **Mirror**: mirroring-based back-to-origin.* **External**: external redirection. OSS returns an HTTP 3xx status code and returns an address for you to redirect to.* **AliCDN**: redirection based on Alibaba Cloud CDN. Compared with external redirection, OSS adds an additional header to the request. After Alibaba Cloud CDN identifies the header, Alibaba Cloud CDN redirects the access to the specified address and returns the obtained data instead of the HTTP 3xx status code that redirects the access to another address. This parameter must be specified if Redirect is specified. + RedirectType *string `xml:"RedirectType"` + + // Is SNI transparent. + MirrorSNI *bool `xml:"MirrorSNI"` + + // The protocol used for redirection. This parameter takes effect only when RedirectType is set to External or AliCDN. For example, if you access an object named test, Protocol is set to https, and Hostname is set to `example.com`, the value of the Location header is `https://example.com/test`. Valid values: **http** and **https**. + Protocol *string `xml:"Protocol"` + + // Specifies whether to check the MD5 hash of the body of the response returned by the origin. This parameter takes effect only when the value of RedirectType is Mirror. When **MirrorCheckMd5** is set to true and the response returned by the origin includes the Content-Md5 header, OSS checks whether the MD5 hash of the obtained data matches the header value. If the MD5 hash of the obtained data does not match the header value, the obtained data is not stored in OSS. Default value: false. + MirrorCheckMd5 *bool `xml:"MirrorCheckMd5"` + + // The HTTP redirect code in the response. This parameter takes effect only when RedirectType is set to External or AliCDN. Valid values: 301, 302, and 307. + HttpRedirectCode *int64 `xml:"HttpRedirectCode"` + + // Is it transmitted transparently '/' to the source site + MirrorPassOriginalSlashes *bool `xml:"MirrorPassOriginalSlashes"` + + // This parameter plays the same role as PassQueryString and has a higher priority than PassQueryString. This parameter takes effect only when the value of RedirectType is Mirror. Default value: false.Valid values:* true * false + MirrorPassQueryString *bool `xml:"MirrorPassQueryString"` +} + +type RoutingRules struct { + // The specified redirection rule or mirroring-based back-to-origin rule. You can specify up to 20 rules. + RoutingRules []RoutingRule `xml:"RoutingRule"` +} + +type GetBucketWebsiteRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketWebsiteResult struct { + // The containers of the website configuration. + WebsiteConfiguration *WebsiteConfiguration `output:"body,WebsiteConfiguration,xml"` + + ResultCommon +} + +// GetBucketWebsite Queries the static website hosting status and redirection rules configured for a bucket. +func (c *Client) GetBucketWebsite(ctx context.Context, request *GetBucketWebsiteRequest, optFns ...func(*Options)) (*GetBucketWebsiteResult, error) { + var err error + if request == nil { + request = &GetBucketWebsiteRequest{} + } + input := &OperationInput{ + OpName: "GetBucketWebsite", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "website": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"website"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetBucketWebsiteResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutBucketWebsiteRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The request body schema. + WebsiteConfiguration *WebsiteConfiguration `input:"body,WebsiteConfiguration,xml,required"` + + RequestCommon +} + +type PutBucketWebsiteResult struct { + ResultCommon +} + +// PutBucketWebsite Enables the static website hosting mode for a bucket and configures redirection rules for the bucket. +func (c *Client) PutBucketWebsite(ctx context.Context, request *PutBucketWebsiteRequest, optFns ...func(*Options)) (*PutBucketWebsiteResult, error) { + var err error + if request == nil { + request = &PutBucketWebsiteRequest{} + } + input := &OperationInput{ + OpName: "PutBucketWebsite", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "website": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"website"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &PutBucketWebsiteResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type DeleteBucketWebsiteRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type DeleteBucketWebsiteResult struct { + ResultCommon +} + +// DeleteBucketWebsite Disables the static website hosting mode and deletes the redirection rules for a bucket. +func (c *Client) DeleteBucketWebsite(ctx context.Context, request *DeleteBucketWebsiteRequest, optFns ...func(*Options)) (*DeleteBucketWebsiteResult, error) { + var err error + if request == nil { + request = &DeleteBucketWebsiteRequest{} + } + input := &OperationInput{ + OpName: "DeleteBucketWebsite", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "website": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"website"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &DeleteBucketWebsiteResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_worm.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_worm.go new file mode 100644 index 000000000..48f40e21a --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_bucket_worm.go @@ -0,0 +1,273 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type InitiateWormConfiguration struct { + // The number of days for which objects can be retained. + RetentionPeriodInDays *int32 `xml:"RetentionPeriodInDays"` +} + +type ExtendWormConfiguration struct { + // The number of days for which objects can be retained. + RetentionPeriodInDays *int32 `xml:"RetentionPeriodInDays"` +} + +type WormConfiguration struct { + // The ID of the retention policy.>Note If the specified retention policy ID that is used to query the retention policy configurations of the bucket does not exist, OSS returns the 404 error code. + WormId *string `xml:"WormId"` + + // The status of the retention policy. Valid values:- InProgress: indicates that the retention policy is in the InProgress state. By default, a retention policy is in the InProgress state after it is created. The policy remains in this state for 24 hours.- Locked: indicates that the retention policy is in the Locked state. + State BucketWormStateType `xml:"State"` + + // The number of days for which objects can be retained. + RetentionPeriodInDays *int32 `xml:"RetentionPeriodInDays"` + + // The time at which the retention policy was created. + CreationDate *string `xml:"CreationDate"` + + // The time at which the retention policy will be expired. + //ExpirationDate *string `xml:"ExpirationDate"` +} + +type InitiateBucketWormRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The container of the request body. + InitiateWormConfiguration *InitiateWormConfiguration `input:"body,InitiateWormConfiguration,xml,required"` + + RequestCommon +} + +type InitiateBucketWormResult struct { + // The ID of the retention policy. + WormId *string `output:"header,x-oss-worm-id"` + + ResultCommon +} + +// InitiateBucketWorm Creates a retention policy. +func (c *Client) InitiateBucketWorm(ctx context.Context, request *InitiateBucketWormRequest, optFns ...func(*Options)) (*InitiateBucketWormResult, error) { + var err error + if request == nil { + request = &InitiateBucketWormRequest{} + } + input := &OperationInput{ + OpName: "InitiateBucketWorm", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "worm": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"worm"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &InitiateBucketWormResult{} + if err = c.unmarshalOutput(result, output, unmarshalHeader, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type AbortBucketWormRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type AbortBucketWormResult struct { + ResultCommon +} + +// AbortBucketWorm Deletes an unlocked retention policy for a bucket. +func (c *Client) AbortBucketWorm(ctx context.Context, request *AbortBucketWormRequest, optFns ...func(*Options)) (*AbortBucketWormResult, error) { + var err error + if request == nil { + request = &AbortBucketWormRequest{} + } + input := &OperationInput{ + OpName: "AbortBucketWorm", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "worm": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"worm"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &AbortBucketWormResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type CompleteBucketWormRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The ID of the retention policy. + WormId *string `input:"query,wormId,required"` + + RequestCommon +} + +type CompleteBucketWormResult struct { + ResultCommon +} + +// CompleteBucketWorm Locks a retention policy. +func (c *Client) CompleteBucketWorm(ctx context.Context, request *CompleteBucketWormRequest, optFns ...func(*Options)) (*CompleteBucketWormResult, error) { + var err error + if request == nil { + request = &CompleteBucketWormRequest{} + } + input := &OperationInput{ + OpName: "CompleteBucketWorm", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"wormId"}) + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CompleteBucketWormResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type ExtendBucketWormRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The ID of the retention policy.> If the ID of the retention policy that specifies the number of days for which objects can be retained does not exist, the HTTP status code 404 is returned. + WormId *string `input:"query,wormId,required"` + + // The container of the request body. + ExtendWormConfiguration *ExtendWormConfiguration `input:"body,ExtendWormConfiguration,xml,required"` + + RequestCommon +} + +type ExtendBucketWormResult struct { + ResultCommon +} + +// ExtendBucketWorm Extends the retention period of objects in a bucket for which a retention policy is locked. +func (c *Client) ExtendBucketWorm(ctx context.Context, request *ExtendBucketWormRequest, optFns ...func(*Options)) (*ExtendBucketWormResult, error) { + var err error + if request == nil { + request = &ExtendBucketWormRequest{} + } + input := &OperationInput{ + OpName: "ExtendBucketWorm", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "wormExtend": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"wormExtend", "wormId"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ExtendBucketWormResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type GetBucketWormRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + RequestCommon +} + +type GetBucketWormResult struct { + // The container that stores the information about retention policies of the bucket. + WormConfiguration *WormConfiguration `output:"body,WormConfiguration,xml"` + + ResultCommon +} + +// GetBucketWorm Queries the retention policy configured for a bucket. +func (c *Client) GetBucketWorm(ctx context.Context, request *GetBucketWormRequest, optFns ...func(*Options)) (*GetBucketWormResult, error) { + var err error + if request == nil { + request = &GetBucketWormRequest{} + } + input := &OperationInput{ + OpName: "GetBucketWorm", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "worm": "", + }, + Bucket: request.Bucket, + } + input.OpMetadata.Set(signer.SubResource, []string{"worm"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetBucketWormResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_cloud_box.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_cloud_box.go new file mode 100644 index 000000000..3ca4a62d2 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_cloud_box.go @@ -0,0 +1,89 @@ +package oss + +import ( + "context" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type ListCloudBoxesRequest struct { + // The name of the bucket from which the list operation begins. + Marker *string `input:"query,marker"` + + // The maximum number of buckets that can be returned in the single query. + // Valid values: 1 to 1000. + MaxKeys int32 `input:"query,max-keys"` + + // The prefix that the names of returned buckets must contain. + Prefix *string `input:"query,prefix"` + + RequestCommon +} + +type ListCloudBoxesResult struct { + // The prefix contained in the names of the returned bucket. + Prefix *string `xml:"Prefix"` + + // The name of the bucket after which the ListBuckets operation starts. + Marker *string `xml:"Marker"` // The marker filter. + + // The maximum number of buckets that can be returned for the request. + MaxKeys int32 `xml:"MaxKeys"` + + // Indicates whether all results are returned. + // true: Only part of the results are returned for the request. + // false: All results are returned for the request. + IsTruncated bool `xml:"IsTruncated"` + + // The marker for the next ListBuckets request, which can be used to return the remaining results. + NextMarker *string `xml:"NextMarker"` + + // The container that stores information about the bucket owner. + Owner *Owner `xml:"Owner"` + + // The container that stores information about cloud box bucket. + CloudBoxes []CloudBoxProperties `xml:"CloudBoxes>CloudBox"` + + ResultCommon +} + +type CloudBoxProperties struct { + ID *string `xml:"ID"` + Name *string `xml:"Name"` + Region *string `xml:"Region"` + ControlEndpoint *string `xml:"ControlEndpoint"` + DataEndpoint *string `xml:"DataEndpoint"` +} + +// ListCloudBoxes Lists cloud box buckets that belong to the current account. +func (c *Client) ListCloudBoxes(ctx context.Context, request *ListCloudBoxesRequest, optFns ...func(*Options)) (*ListCloudBoxesResult, error) { + var err error + if request == nil { + request = &ListCloudBoxesRequest{} + } + input := &OperationInput{ + OpName: "ListCloudBoxes", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cloudboxes": "", + }, + } + + input.OpMetadata.Set(signer.SubResource, []string{"cloudboxes"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListCloudBoxesResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_common.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_common.go new file mode 100644 index 000000000..e38bc8e53 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_common.go @@ -0,0 +1,12 @@ +package oss + +import ( + "context" +) + +func (c *Client) InvokeOperation(ctx context.Context, input *OperationInput, optFns ...func(*Options)) (*OperationOutput, error) { + if err := validateInput(input); err != nil { + return nil, err + } + return c.invokeOperation(ctx, input, optFns) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_object.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_object.go new file mode 100644 index 000000000..52ce1cc23 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_object.go @@ -0,0 +1,2572 @@ +package oss + +import ( + "context" + "fmt" + "hash" + "io" + "sort" + "strconv" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type PutObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The caching behavior of the web page when the object is downloaded. + CacheControl *string `input:"header,Cache-Control"` + + // The method that is used to access the object. + ContentDisposition *string `input:"header,Content-Disposition"` + + // The method that is used to encode the object. + ContentEncoding *string `input:"header,Content-Encoding"` + + // The size of the data in the HTTP message body. Unit: bytes. + ContentLength *int64 `input:"header,Content-Length"` + + // The MD5 hash of the object that you want to upload. + ContentMD5 *string `input:"header,Content-MD5"` + + // A standard MIME type describing the format of the contents. + ContentType *string `input:"header,Content-Type"` + + // The expiration time of the cache in UTC. + Expires *string `input:"header,Expires"` + + // Specifies whether the object that is uploaded by calling the PutObject operation overwrites the existing object that has the same name. + // When versioning is enabled or suspended for the bucket to which you want to upload the object, the x-oss-forbid-overwrite header does not take effect. In this case, the object that is uploaded by calling the PutObject operation overwrites the existing object that has the same name. Default value: false. + // If you do not specify the x-oss-forbid-overwrite header or you set the x-oss-forbid-overwrite header to false, the object that is uploaded by calling the PutObject operation overwrites the existing object that has the same name. + // If you set the x-oss-forbid-overwrite header to true, an existing object that has the same name cannot be overwritten. + ForbidOverwrite *string `input:"header,x-oss-forbid-overwrite"` + + // The encryption method on the server side when an object is created. Valid values: AES256, KMS, SM4. + // If you specify the header, the header is returned in the response. + // OSS uses the method that is specified by this header to encrypt the uploaded object. + // When you download the encrypted object, the x-oss-server-side-encryption header is included in the response and the header value is set to the algorithm that is used to encrypt the object. + ServerSideEncryption *string `input:"header,x-oss-server-side-encryption"` + + // Specify the encryption algorithm for the object. Valid values: SM4. + // If this option is not specified, it indicates that the Object uses AES256 encryption algorithm. + // This option is only valid when x-oss-ser-side-encryption is KMS. + ServerSideDataEncryption *string `input:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The access control list (ACL) of the object. + Acl ObjectACLType `input:"header,x-oss-object-acl"` + + // The storage class of the object. + StorageClass StorageClassType `input:"header,x-oss-storage-class"` + + // The metadata of the object that you want to upload. + Metadata map[string]string `input:"header,x-oss-meta-,usermeta"` + + // The tags that are specified for the object by using a key-value pair. + // You can specify multiple tags for an object. Example: TagA=A&TagB=B. + Tagging *string `input:"header,x-oss-tagging"` + + // A callback parameter is a Base64-encoded string that contains multiple fields in the JSON format. + Callback *string `input:"header,x-oss-callback"` + + // Configure custom parameters by using the callback-var parameter. + CallbackVar *string `input:"header,x-oss-callback-var"` + + // Specify the speed limit value. The speed limit value ranges from 245760 to 838860800, with a unit of bit/s. + TrafficLimit int64 `input:"header,x-oss-traffic-limit"` + + // Object data. + Body io.Reader `input:"body,nop"` + + // Progress callback function + ProgressFn ProgressFunc + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type PutObjectResult struct { + // Content-Md5 for the uploaded object. + ContentMD5 *string `output:"header,Content-MD5"` + + // Entity tag for the uploaded object. + ETag *string `output:"header,ETag"` + + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + CallbackResult map[string]any + + ResultCommon +} + +// PutObject Uploads a object. +func (c *Client) PutObject(ctx context.Context, request *PutObjectRequest, optFns ...func(*Options)) (*PutObjectResult, error) { + var err error + if request == nil { + request = &PutObjectRequest{} + } + input := &OperationInput{ + OpName: "PutObject", + Method: "PUT", + Bucket: request.Bucket, + Key: request.Key, + } + + marshalFns := []func(any, *OperationInput) error{ + addProgress, + c.updateContentType, + c.addCrcCheck, + } + unmarshalFns := []func(result any, output *OperationOutput) error{ + unmarshalHeader, + } + + if request.Callback != nil { + marshalFns = append(marshalFns, addCallback) + unmarshalFns = append(unmarshalFns, unmarshalCallbackBody) + } else { + unmarshalFns = append(unmarshalFns, discardBody) + } + + if err = c.marshalInput(request, input, marshalFns...); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutObjectResult{} + if err = c.unmarshalOutput(result, output, unmarshalFns...); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type HTTPRange struct { + Offset int64 + Count int64 +} + +func (r HTTPRange) FormatHTTPRange() *string { + if r.Offset == 0 && r.Count == 0 { + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.Count > 0 { + endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) + return &dataRange +} + +type HTTPContentRange struct { + Offset int64 + Count int64 + Total int64 +} + +func (r HTTPContentRange) FormatHTTPContentRange() *string { + if r.Offset == 0 && r.Count == 0 { + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.Count > 0 { + endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) + } + dataRange := fmt.Sprintf("bytes %v-%s/%s", r.Offset, endOffset, strconv.FormatInt(r.Total, 10)) + return &dataRange +} + +type GetObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // If the ETag specified in the request matches the ETag value of the object, + // the object and 200 OK are returned. Otherwise, 412 Precondition Failed is returned. + IfMatch *string `input:"header,If-Match"` + + // If the ETag specified in the request does not match the ETag value of the object, + // the object and 200 OK are returned. Otherwise, 304 Not Modified is returned. + IfNoneMatch *string `input:"header,If-None-Match"` + + // If the time specified in this header is earlier than the object modified time or is invalid, + // the object and 200 OK are returned. Otherwise, 304 Not Modified is returned. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfModifiedSince *string `input:"header,If-Modified-Since"` + + // If the time specified in this header is the same as or later than the object modified time, + // the object and 200 OK are returned. Otherwise, 412 Precondition Failed is returned. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfUnmodifiedSince *string `input:"header,If-Unmodified-Since"` + + // The content range of the object to be returned. + // If the value of Range is valid, the total size of the object and the content range are returned. + // For example, Content-Range: bytes 0~9/44 indicates that the total size of the object is 44 bytes, + // and the range of data returned is the first 10 bytes. + // However, if the value of Range is invalid, the entire object is returned, + // and the response does not include the Content-Range parameter. + Range *string `input:"header,Range"` + + // Specify standard behaviors to download data by range + // If the value is "standard", the download behavior is modified when the specified range is not within the valid range. + // For an object whose size is 1,000 bytes: + // 1) If you set Range: bytes to 500-2000, the value at the end of the range is invalid. + // In this case, OSS returns HTTP status code 206 and the data that is within the range of byte 500 to byte 999. + // 2) If you set Range: bytes to 1000-2000, the value at the start of the range is invalid. + // In this case, OSS returns HTTP status code 416 and the InvalidRange error code. + RangeBehavior *string `input:"header,x-oss-range-behavior"` + + // The cache-control header to be returned in the response. + ResponseCacheControl *string `input:"query,response-cache-control"` + + // The content-disposition header to be returned in the response. + ResponseContentDisposition *string `input:"query,response-content-disposition"` + + // The content-encoding header to be returned in the response. + ResponseContentEncoding *string `input:"query,response-content-encoding"` + + // The content-language header to be returned in the response. + ResponseContentLanguage *string `input:"query,response-content-language"` + + // The content-type header to be returned in the response. + ResponseContentType *string `input:"query,response-content-type"` + + // The expires header to be returned in the response. + ResponseExpires *string `input:"query,response-expires"` + + // VersionId used to reference a specific version of the object. + VersionId *string `input:"query,versionId"` + + // Specify the speed limit value. The speed limit value ranges from 245760 to 838860800, with a unit of bit/s. + TrafficLimit int64 `input:"header,x-oss-traffic-limit"` + + // Progress callback function + ProgressFn ProgressFunc + + // Image processing parameters + Process *string `input:"query,x-oss-process"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type GetObjectResult struct { + // Size of the body in bytes. -1 indicates that the Content-Length dose not exist. + ContentLength int64 `output:"header,Content-Length"` + + // The portion of the object returned in the response. + ContentRange *string `output:"header,Content-Range"` + + // A standard MIME type describing the format of the object data. + ContentType *string `output:"header,Content-Type"` + + // The entity tag (ETag). An ETag is created when an object is created to identify the content of the object. + ETag *string `output:"header,ETag"` + + // The time when the returned objects were last modified. + LastModified *time.Time `output:"header,Last-Modified,time"` + + // The storage class of the object. + StorageClass *string `output:"header,x-oss-storage-class"` + + // Content-Md5 for the uploaded object. + ContentMD5 *string `output:"header,Content-MD5"` + + // A map of metadata to store with the object. + Metadata map[string]string `output:"header,x-oss-meta-,usermeta"` + + // If the requested object is encrypted by using a server-side encryption algorithm based on entropy encoding, + // OSS automatically decrypts the object and returns the decrypted object after OSS receives the GetObject request. + // The x-oss-server-side-encryption header is included in the response to indicate + // the encryption algorithm used to encrypt the object on the server. + ServerSideEncryption *string `output:"header,x-oss-server-side-encryption"` + + // The server side data encryption algorithm. + ServerSideDataEncryption *string `output:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + // The type of the object. + ObjectType *string `output:"header,x-oss-object-type"` + + // The position for the next append operation. + // If the type of the object is Appendable, this header is included in the response. + NextAppendPosition *string `output:"header,x-oss-next-append-position"` + + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + // The lifecycle information about the object. + // If lifecycle rules are configured for the object, this header is included in the response. + // This header contains the following parameters: expiry-date that indicates the expiration time of the object, + // and rule-id that indicates the ID of the matched lifecycle rule. + Expiration *string `output:"header,x-oss-expiration"` + + // The status of the object when you restore an object. + // If the storage class of the bucket is Archive and a RestoreObject request is submitted, + Restore *string `output:"header,x-oss-restore"` + + // The result of an event notification that is triggered for the object. + ProcessStatus *string `output:"header,x-oss-process-status"` + + // The number of tags added to the object. + // This header is included in the response only when you have read permissions on tags. + TaggingCount int32 `output:"header,x-oss-tagging-count"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. + DeleteMarker bool `output:"header,x-oss-delete-marker"` + + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // Object data. + Body io.ReadCloser + + ResultCommon +} + +func (c *Client) GetObject(ctx context.Context, request *GetObjectRequest, optFns ...func(*Options)) (*GetObjectResult, error) { + var err error + if request == nil { + request = &GetObjectRequest{} + } + input := &OperationInput{ + OpName: "GetObject", + Method: "GET", + Bucket: request.Bucket, + Key: request.Key, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetObjectResult{ + Body: output.Body, + } + if err = c.unmarshalOutput(result, output, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type CopyObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The name of the source bucket. + SourceBucket *string `input:"nop,bucket"` + + // The path of the source object. + SourceKey *string `input:"nop,key,required"` + + // The version ID of the source object. + SourceVersionId *string `input:"nop,versionId"` + + // Specifies whether the CopyObject operation overwrites objects with the same name. The x-oss-forbid-overwrite request header does not take effect when versioning is enabled or suspended for the destination bucket. In this case, the CopyObject operation overwrites the existing object that has the same name as the destination object. + // If you do not specify the x-oss-forbid-overwrite header or set the header to false, an existing object that has the same name as the object that you want to copy is overwritten. + // If you set the x-oss-forbid-overwrite header to true, an existing object that has the same name as the object that you want to copy is not overwritten. + ForbidOverwrite *string `input:"header,x-oss-forbid-overwrite"` + + // If the ETag specified in the request matches the ETag value of the object, + // the object and 200 OK are returned. Otherwise, 412 Precondition Failed is returned. + IfMatch *string `input:"header,x-oss-copy-source-if-match"` + + // If the ETag specified in the request does not match the ETag value of the object, + // the object and 200 OK are returned. Otherwise, 304 Not Modified is returned. + IfNoneMatch *string `input:"header,x-oss-copy-source-if-none-match"` + + // If the time specified in this header is earlier than the object modified time or is invalid, + // the object and 200 OK are returned. Otherwise, 304 Not Modified is returned. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfModifiedSince *string `input:"header,x-oss-copy-source-if-modified-since"` + + // If the time specified in this header is the same as or later than the object modified time, + // the object and 200 OK are returned. Otherwise, 412 Precondition Failed is returned. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfUnmodifiedSince *string `input:"header,x-oss-copy-source-if-unmodified-since"` + + // The method that is used to configure the metadata of the destination object. + // COPY (default): The metadata of the source object is copied to the destination object. + // The configurations of the x-oss-server-side-encryption + // header of the source object are not copied to the destination object. + // The x-oss-server-side-encryption header in the CopyObject request specifies + // the method used to encrypt the destination object. + // REPLACE: The metadata specified in the request is used as the metadata of the destination object. + MetadataDirective *string `input:"header,x-oss-metadata-directive"` + + // The entropy coding-based encryption algorithm that OSS uses to encrypt an object when you create the object. + // Valid values: AES256, KMS, SM4 + ServerSideEncryption *string `input:"header,x-oss-server-side-encryption"` + + // The server side data encryption algorithm. Invalid value: SM4 + ServerSideDataEncryption *string `input:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The access control list (ACL) of the object. + Acl ObjectACLType `input:"header,x-oss-object-acl"` + + // The storage class of the object. + StorageClass StorageClassType `input:"header,x-oss-storage-class"` + + // The caching behavior of the web page when the object is downloaded. + CacheControl *string `input:"header,Cache-Control"` + + // The method that is used to access the object. + ContentDisposition *string `input:"header,Content-Disposition"` + + // The method that is used to encode the object. + ContentEncoding *string `input:"header,Content-Encoding"` + + // A standard MIME type describing the format of the contents. + ContentType *string `input:"header,Content-Type"` + + // The expiration time of the cache in UTC. + Expires *string `input:"header,Expires"` + + // The metadata of the object that you want to upload. + Metadata map[string]string `input:"header,x-oss-meta-,usermeta"` + + // The tags that are specified for the object by using a key-value pair. + // You can specify multiple tags for an object. Example: TagA=A&TagB=B. + Tagging *string `input:"header,x-oss-tagging"` + + // The method that is used to configure tags for the destination object. + // Valid values: Copy (default): The tags of the source object are copied to the destination object. + // Replace: The tags specified in the request are configured for the destination object. + TaggingDirective *string `input:"header,x-oss-tagging-directive"` + + // Specify the speed limit value. The speed limit value ranges from 245760 to 838860800, with a unit of bit/s. + TrafficLimit int64 `input:"header,x-oss-traffic-limit"` + + // Progress callback function, it works in Copier.Copy only. + ProgressFn ProgressFunc + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type CopyObjectResult struct { + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // The version ID of the source object. + SourceVersionId *string `output:"header,x-oss-copy-source-version-id"` + + // If the requested object is encrypted by using a server-side encryption algorithm based on entropy encoding, + // OSS automatically decrypts the object and returns the decrypted object after OSS receives the GetObject request. + // The x-oss-server-side-encryption header is included in the response to indicate + // the encryption algorithm used to encrypt the object on the server. + ServerSideEncryption *string `output:"header,x-oss-server-side-encryption"` + + // The server side data encryption algorithm. + ServerSideDataEncryption *string `output:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + // The time when the returned objects were last modified. + LastModified *time.Time `xml:"LastModified"` + + // The entity tag (ETag). An ETag is created when an object is created to identify the content of the object. + ETag *string `xml:"ETag"` + + ResultCommon +} + +// CopyObject Copies objects within a bucket or between buckets in the same region +func (c *Client) CopyObject(ctx context.Context, request *CopyObjectRequest, optFns ...func(*Options)) (*CopyObjectResult, error) { + var err error + if request == nil { + request = &CopyObjectRequest{} + } + + input := &OperationInput{ + OpName: "CopyObject", + Method: "PUT", + Bucket: request.Bucket, + Key: request.Key, + Headers: map[string]string{ + "x-oss-copy-source": encodeSourceObject(request), + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CopyObjectResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type AppendObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The position from which the AppendObject operation starts. + // Each time an AppendObject operation succeeds, the x-oss-next-append-position header is included in + // the response to specify the position from which the next AppendObject operation starts. + Position *int64 `input:"query,position,required"` + + // The caching behavior of the web page when the object is downloaded. + CacheControl *string `input:"header,Cache-Control"` + + // The method that is used to access the object. + ContentDisposition *string `input:"header,Content-Disposition"` + + // The method that is used to encode the object. + ContentEncoding *string `input:"header,Content-Encoding"` + + // The size of the data in the HTTP message body. Unit: bytes. + ContentLength *int64 `input:"header,Content-Length"` + + // The MD5 hash of the object that you want to upload. + ContentMD5 *string `input:"header,Content-MD5"` + + // The expiration time of the cache in UTC. + Expires *string `input:"header,Expires"` + + // A standard MIME type describing the format of the contents. + ContentType *string `input:"header,Content-Type"` + + // Specifies whether the AppendObject operation overwrites objects with the same name. The x-oss-forbid-overwrite request header does not take effect when versioning is enabled or suspended for the destination bucket. In this case, the AppendObject operation overwrites the existing object that has the same name as the destination object. + // If you do not specify the x-oss-forbid-overwrite header or set the header to false, an existing object that has the same name as the object that you want to copy is overwritten. + // If you set the x-oss-forbid-overwrite header to true, an existing object that has the same name as the object that you want to copy is not overwritten. + ForbidOverwrite *string `input:"header,x-oss-forbid-overwrite"` + + // The method used to encrypt objects on the specified OSS server.Valid values: AES256, KMS, SM4 + // AES256: Keys managed by OSS are used for encryption and decryption (SSE-OSS). + // KMS: Keys managed by Key Management Service (KMS) are used for encryption and decryption. + // SM4: The SM4 block cipher algorithm is used for encryption and decryption. + ServerSideEncryption *string `input:"header,x-oss-server-side-encryption"` + + // Specify the encryption algorithm for the object. Valid values: SM4. + // If this option is not specified, it indicates that the Object uses AES256 encryption algorithm. + // This option is only valid when x-oss-ser-side-encryption is KMS. + ServerSideDataEncryption *string `input:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The access control list (ACL) of the object. + Acl ObjectACLType `input:"header,x-oss-object-acl"` + + // The storage class of the object. + StorageClass StorageClassType `input:"header,x-oss-storage-class"` + + // The metadata of the object that you want to upload. + Metadata map[string]string `input:"header,x-oss-meta-,usermeta"` + + // The tags that are specified for the object by using a key-value pair. + // You can specify multiple tags for an object. Example: TagA=A&TagB=B. + Tagging *string `input:"header,x-oss-tagging"` + + // Specify the speed limit value. The speed limit value ranges from 245760 to 838860800, with a unit of bit/s. + TrafficLimit int64 `input:"header,x-oss-traffic-limit"` + + // Object data. + Body io.Reader `input:"body,nop"` + + // Specify the initial value of CRC64. If not set, the crc check is ignored. + InitHashCRC64 *string + + // Progress callback function + ProgressFn ProgressFunc + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type AppendObjectResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + // The position that must be provided in the next request, which is the current length of the object. + NextPosition int64 `output:"header,x-oss-next-append-position"` + + // The encryption method on the server side when an object is created. + // Valid values: AES256, KMS, SM4 + ServerSideEncryption *string `output:"header,x-oss-server-side-encryption"` + + // The server side data encryption algorithm. + ServerSideDataEncryption *string `output:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + ResultCommon +} + +// AppendObject Uploads an object by appending the object to an existing object. +// Objects created by using the AppendObject operation are appendable objects. +func (c *Client) AppendObject(ctx context.Context, request *AppendObjectRequest, optFns ...func(*Options)) (*AppendObjectResult, error) { + var err error + if request == nil { + request = &AppendObjectRequest{} + } + input := &OperationInput{ + OpName: "AppendObject", + Method: "POST", + Parameters: map[string]string{"append": ""}, + Bucket: request.Bucket, + Key: request.Key, + } + + marshalFns := []func(any, *OperationInput) error{ + addProgress, + c.updateContentType, + } + + unmarshalFns := []func(any, *OperationOutput) error{ + discardBody, + unmarshalHeader, + } + + // AppendObject is not idempotent, and cannot be retried + if c.hasFeature(FeatureEnableCRC64CheckUpload) && request.InitHashCRC64 != nil { + var init uint64 + init, err = strconv.ParseUint(ToString(request.InitHashCRC64), 10, 64) + if err != nil { + return nil, NewErrParamInvalid("request.InitHashCRC64") + } + var w io.Writer = NewCRC64(init) + input.OpMetadata.Add(OpMetaKeyRequestBodyTracker, w) + unmarshalFns = append(unmarshalFns, func(result any, output *OperationOutput) error { + return checkResponseHeaderCRC64(fmt.Sprint(w.(hash.Hash64).Sum64()), output.Headers) + }) + } + + if err = c.marshalInput(request, input, marshalFns...); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &AppendObjectResult{} + if err = c.unmarshalOutput(result, output, unmarshalFns...); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The version ID of the source object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type DeleteObjectResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. + DeleteMarker bool `output:"header,x-oss-delete-marker"` + + ResultCommon +} + +// DeleteObject Deletes an object. +func (c *Client) DeleteObject(ctx context.Context, request *DeleteObjectRequest, optFns ...func(*Options)) (*DeleteObjectResult, error) { + var err error + if request == nil { + request = &DeleteObjectRequest{} + } + input := &OperationInput{ + OpName: "DeleteObject", + Method: "DELETE", + Bucket: request.Bucket, + Key: request.Key, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &DeleteObjectResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteMultipleObjectsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The encoding type of the object names in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // The size of the data in the HTTP message body. Unit: bytes. + ContentLength int64 `input:"header,Content-Length"` + + // The container that stores information about you want to delete objects. + Objects []DeleteObject `input:"nop,objects,required"` + + // Specifies whether to enable the Quiet return mode. + // The DeleteMultipleObjects operation provides the following return modes: Valid value: true,false + Quiet bool + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type DeleteObject struct { + // The name of the object that you want to delete. + Key *string `xml:"Key"` + + // The version ID of the object that you want to delete. + VersionId *string `xml:"VersionId"` +} + +type DeleteMultipleObjectsResult struct { + // The container that stores information about the deleted objects. + DeletedObjects []DeletedInfo `xml:"Deleted"` + + // The encoding type of the name of the deleted object in the response. + // If encoding-type is specified in the request, the object name is encoded in the returned result. + EncodingType *string `xml:"EncodingType"` + + ResultCommon +} + +type DeletedInfo struct { + // The name of the deleted object. + Key *string `xml:"Key"` + + // The version ID of the object that you deleted. + VersionId *string `xml:"VersionId"` + + // Indicates whether the deleted version is a delete marker. + DeleteMarker bool `xml:"DeleteMarker"` + + // The version ID of the delete marker. + DeleteMarkerVersionId *string `xml:"DeleteMarkerVersionId"` +} + +// DeleteMultipleObjects Deletes multiple objects from a bucket. +func (c *Client) DeleteMultipleObjects(ctx context.Context, request *DeleteMultipleObjectsRequest, optFns ...func(*Options)) (*DeleteMultipleObjectsResult, error) { + var err error + if request == nil { + request = &DeleteMultipleObjectsRequest{} + } + input := &OperationInput{ + OpName: "DeleteMultipleObjects", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "delete": "", + "encoding-type": "url", + }, + Bucket: request.Bucket, + } + if err = c.marshalInput(request, input, marshalDeleteObjects, updateContentMd5, enableNonStream); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &DeleteMultipleObjectsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalHeader, unmarshalEncodeType); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type HeadObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The version ID of the source object. + VersionId *string `input:"query,versionId"` + + // If the ETag specified in the request matches the ETag value of the object, + // the object and 200 OK are returned. Otherwise, 412 Precondition Failed is returned. + IfMatch *string `input:"header,If-Match"` + + // If the ETag specified in the request does not match the ETag value of the object, + // the object and 200 OK are returned. Otherwise, 304 Not Modified is returned. + IfNoneMatch *string `input:"header,If-None-Match"` + + // If the time specified in this header is earlier than the object modified time or is invalid, + // the object and 200 OK are returned. Otherwise, 304 Not Modified is returned. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfModifiedSince *string `input:"header,If-Modified-Since"` + + // If the time specified in this header is the same as or later than the object modified time, + // the object and 200 OK are returned. Otherwise, 412 Precondition Failed is returned. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfUnmodifiedSince *string `input:"header,If-Unmodified-Since"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type HeadObjectResult struct { + // Size of the body in bytes. -1 indicates that the Content-Length dose not exist. + ContentLength int64 `output:"header,Content-Length"` + + // A standard MIME type describing the format of the object data. + ContentType *string `output:"header,Content-Type"` + + // The entity tag (ETag). An ETag is created when an object is created to identify the content of the object. + ETag *string `output:"header,ETag"` + + // The time when the returned objects were last modified. + LastModified *time.Time `output:"header,Last-Modified,time"` + + // The storage class of the object. + StorageClass *string `output:"header,x-oss-storage-class"` + + // Content-Md5 for the uploaded object. + ContentMD5 *string `output:"header,Content-MD5"` + + // A map of metadata to store with the object. + Metadata map[string]string `output:"header,x-oss-meta-,usermeta"` + + // If the requested object is encrypted by using a server-side encryption algorithm based on entropy encoding, + // OSS automatically decrypts the object and returns the decrypted object after OSS receives the GetObject request. + // The x-oss-server-side-encryption header is included in the response to indicate + // the encryption algorithm used to encrypt the object on the server. + ServerSideEncryption *string `output:"header,x-oss-server-side-encryption"` + + // The server side data encryption algorithm. + ServerSideDataEncryption *string `output:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `output:"header,x-oss-server-side-encryption-key-id"` + + // The type of the object. + ObjectType *string `output:"header,x-oss-object-type"` + + // The position for the next append operation. + // If the type of the object is Appendable, this header is included in the response. + NextAppendPosition *string `output:"header,x-oss-next-append-position"` + + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + // The lifecycle information about the object. + // If lifecycle rules are configured for the object, this header is included in the response. + // This header contains the following parameters: expiry-date that indicates the expiration time of the object, + // and rule-id that indicates the ID of the matched lifecycle rule. + Expiration *string `output:"header,x-oss-expiration"` + + // The status of the object when you restore an object. + // If the storage class of the bucket is Archive and a RestoreObject request is submitted, + Restore *string `output:"header,x-oss-restore"` + + // The result of an event notification that is triggered for the object. + ProcessStatus *string `output:"header,x-oss-process-status"` + + // The requester. This header is included in the response if the pay-by-requester mode + // is enabled for the bucket and the requester is not the bucket owner. The value of this header is requester + RequestCharged *string `output:"header,x-oss-request-charged"` + + // The number of tags added to the object. + // This header is included in the response only when you have read permissions on tags. + TaggingCount int32 `output:"header,x-oss-tagging-count"` + + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // The origins allowed for cross-origin resource sharing (CORS). + // If a CORS rule is configured for the bucket that stores the object and the Origin header + // in the request meets the CORS rule, this header is included in the response. + AllowOrigin *string `output:"header,Access-Control-Allow-Origin"` + + // The methods allowed for CORS. If a CORS rule is configured for the bucket that stores the object + // and the Access-Control-Request-Method header in the request meets the CORS rule, this header is included in the response. + AllowMethods *string `output:"header,Access-Control-Allow-Methods"` + + // The maximum caching period for CORS. If a CORS rule is configured for the bucket that stores + // the object and the request meets the CORS rule, this header is included in the response. + AllowAge *string `output:"header,Access-Control-Allow-Age"` + + // The headers allowed for CORS. If a CORS rule is configured for the bucket that stores + // the object and the request meets the CORS rule, this header is included in the response + AllowHeaders *string `output:"header,Access-Control-Allow-Headers"` + + // The headers that can be accessed by JavaScript applications on the client. + // If a CORS rule is configured for the bucket that stores the object and the request meets + // the CORS rule, this header is included in the response + ExposeHeaders *string `output:"header,Access-Control-Expose-Headers"` + + // The caching behavior of the web page when the object is downloaded. + CacheControl *string `output:"header,Cache-Control"` + + // The method that is used to access the object. + ContentDisposition *string `output:"header,Content-Disposition"` + + // The method that is used to encode the object. + ContentEncoding *string `output:"header,Content-Encoding"` + + // The expiration time of the cache in UTC. + Expires *string `output:"header,Expires"` + + // The time when the storage class of the object is converted to Cold Archive or Deep Cold Archive based on lifecycle rules. + TransitionTime *time.Time `output:"header,x-oss-transition-time,time"` + + ResultCommon +} + +// HeadObject Queries information about all objects in a bucket. +func (c *Client) HeadObject(ctx context.Context, request *HeadObjectRequest, optFns ...func(*Options)) (*HeadObjectResult, error) { + var err error + if request == nil { + request = &HeadObjectRequest{} + } + input := &OperationInput{ + OpName: "HeadObject", + Method: "HEAD", + Bucket: request.Bucket, + Key: request.Key, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &HeadObjectResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetObjectMetaRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The version ID of the source object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type GetObjectMetaResult struct { + // Size of the body in bytes. -1 indicates that the Content-Length dose not exist. + ContentLength int64 `output:"header,Content-Length"` + + // The entity tag (ETag). An ETag is created when an object is created to identify the content of the object. + ETag *string `output:"header,ETag"` + + // The time when the returned objects were last modified. + LastModified *time.Time `output:"header,Last-Modified,time"` + + // The time when the object was last accessed. + LastAccessTime *time.Time `output:"header,x-oss-last-access-time,time"` + + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + // The time when the storage class of the object is converted to Cold Archive or Deep Cold Archive based on lifecycle rules. + TransitionTime *time.Time `output:"header,x-oss-transition-time,time"` + + ResultCommon +} + +// GetObjectMeta Queries the metadata of an object, including ETag, Size, and LastModified. +// The content of the object is not returned. +func (c *Client) GetObjectMeta(ctx context.Context, request *GetObjectMetaRequest, optFns ...func(*Options)) (*GetObjectMetaResult, error) { + var err error + if request == nil { + request = &GetObjectMetaRequest{} + } + input := &OperationInput{ + OpName: "GetObjectMeta", + Method: "HEAD", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "objectMeta": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &GetObjectMetaResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type RestoreObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The version ID of the source object. + VersionId *string `input:"query,versionId"` + + // The container that stores information about the RestoreObject request. + RestoreRequest *RestoreRequest `input:"body,RestoreRequest,xml"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type RestoreRequest struct { + // The duration within which the restored object remains in the restored state. + Days int32 `xml:"Days"` + + // The restoration priority of Cold Archive or Deep Cold Archive objects. Valid values:Expedited,Standard,Bulk + Tier *string `xml:"JobParameters>Tier"` +} + +type RestoreObjectResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // The restoration priority. + // This header is displayed only for the Cold Archive or Deep Cold Archive object in the restored state. + RestorePriority *string `output:"header,x-oss-object-restore-priority"` + + ResultCommon +} + +// RestoreObject Restores Archive, Cold Archive, or Deep Cold Archive objects. +func (c *Client) RestoreObject(ctx context.Context, request *RestoreObjectRequest, optFns ...func(*Options)) (*RestoreObjectResult, error) { + var err error + if request == nil { + request = &RestoreObjectRequest{} + } + input := &OperationInput{ + OpName: "RestoreObject", + Method: "POST", + Bucket: request.Bucket, + Key: request.Key, + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "restore": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &RestoreObjectResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutObjectAclRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The access control list (ACL) of the object. + Acl ObjectACLType `input:"header,x-oss-object-acl,required"` + + // The version ID of the source object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type PutObjectAclResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + ResultCommon +} + +// PutObjectAcl You can call this operation to modify the access control list (ACL) of an object. +func (c *Client) PutObjectAcl(ctx context.Context, request *PutObjectAclRequest, optFns ...func(*Options)) (*PutObjectAclResult, error) { + var err error + if request == nil { + request = &PutObjectAclRequest{} + } + input := &OperationInput{ + OpName: "PutObjectAcl", + Method: "PUT", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "acl": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutObjectAclResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetObjectAclRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The version ID of the source object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type GetObjectAclResult struct { + // The ACL of the object. Default value: default. + ACL *string `xml:"AccessControlList>Grant"` + + // The container that stores information about the object owner. + Owner *Owner `xml:"Owner"` + + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + ResultCommon +} + +// GetObjectAcl Queries the access control list (ACL) of an object in a bucket. +func (c *Client) GetObjectAcl(ctx context.Context, request *GetObjectAclRequest, optFns ...func(*Options)) (*GetObjectAclResult, error) { + var err error + if request == nil { + request = &GetObjectAclRequest{} + } + input := &OperationInput{ + OpName: "GetObjectAcl", + Method: "GET", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "acl": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetObjectAclResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type InitiateMultipartUploadRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The encoding type of the object names in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // The caching behavior of the web page when the object is downloaded. + CacheControl *string `input:"header,Cache-Control"` + + // The method that is used to access the object. + ContentDisposition *string `input:"header,Content-Disposition"` + + // The method that is used to encode the object. + ContentEncoding *string `input:"header,Content-Encoding"` + + // A standard MIME type describing the format of the contents. + ContentType *string `input:"header,Content-Type"` + + // The expiration time of the cache in UTC. + Expires *string `input:"header,Expires"` + + // Specifies whether the InitiateMultipartUpload operation overwrites the existing object that has the same name as the object that you want to upload. If versioning is enabled or suspended for the bucket to which you want to upload the object, the x-oss-forbid-overwrite header does not take effect. As a result, the object that is uploaded by calling the InitiateMultipartUpload operation overwrites the existing object that has the same name. + // If you do not specify the x-oss-forbid-overwrite header or you set the x-oss-forbid-overwrite header to false, the operation overwrites an existing object that has the same name. + // If you set the x-oss-forbid-overwrite header to true, an existing object that has the same name cannot be overwritten. + ForbidOverwrite *string `input:"header,x-oss-forbid-overwrite"` + + // The server-side encryption method that is used to encrypt each part of the object that you want to upload.Valid values: AES256, KMS, SM4. + // If you specify this header in the request, this header is included in the response. + // OSS uses the method specified by this header to encrypt each uploaded part. + // When you download the object, the x-oss-server-side-encryption header is included in the response and the header value is set to the method that is used to encrypt the object. + ServerSideEncryption *string `input:"header,x-oss-server-side-encryption"` + + // The server side data encryption algorithm. Valid values: SM4 + // If this option is not specified, it indicates that the Object uses AES256 encryption algorithm. + // This option is only valid when x-oss-ser-side-encryption is KMS. + ServerSideDataEncryption *string `input:"header,x-oss-server-side-data-encryption"` + + // Deprecated: Please use ServerSideEncryptionKeyId + SSEKMSKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The ID of the customer master key (CMK) that is managed by Key Management Service (KMS). + // This header is valid only when the x-oss-server-side-encryption header is set to KMS. + ServerSideEncryptionKeyId *string `input:"header,x-oss-server-side-encryption-key-id"` + + // The storage class of the object. + StorageClass StorageClassType `input:"header,x-oss-storage-class"` + + // The metadata of the object that you want to upload. + Metadata map[string]string `input:"header,x-oss-meta-,usermeta"` + + // The tags that are specified for the object by using a key-value pair. + // You can specify multiple tags for an object. Example: TagA=A&TagB=B. + Tagging *string `input:"header,x-oss-tagging"` + + // The total size when using client side encryption, only valid in EncryptionClient + CSEDataSize *int64 + + // The part size when using client side encryption, only valid in EncryptionClient + // CSEPartSize must aligned to the secret iv length + CSEPartSize *int64 + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + // To disable the feature that Content-Type is automatically added based on the object name if not specified. + DisableAutoDetectMimeType bool + + RequestCommon +} + +type InitiateMultipartUploadResult struct { + // The name of the bucket to which the object is uploaded by the multipart upload task. + Bucket *string `xml:"Bucket"` + + // The name of the object that is uploaded by the multipart upload task. + Key *string `xml:"Key"` + + // The upload ID that uniquely identifies the multipart upload task. + UploadId *string `xml:"UploadId"` + + // The encoding type of the object names in the response. Valid value: url + EncodingType *string `xml:"EncodingType"` + + // The encryption context for multipart upload when using client side encryption, only valid in EncryptionClient + CSEMultiPartContext *EncryptionMultiPartContext + + ResultCommon +} + +// InitiateMultipartUpload Initiates a multipart upload task before you can upload data in parts to Object Storage Service (OSS). +func (c *Client) InitiateMultipartUpload(ctx context.Context, request *InitiateMultipartUploadRequest, optFns ...func(*Options)) (*InitiateMultipartUploadResult, error) { + var err error + if request == nil { + request = &InitiateMultipartUploadRequest{} + } + input := &OperationInput{ + OpName: "InitiateMultipartUpload", + Method: "POST", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "uploads": "", + "encoding-type": "url", + }, + } + + marshalFns := []func(any, *OperationInput) error{ + updateContentMd5, + } + if !request.DisableAutoDetectMimeType { + marshalFns = append(marshalFns, c.updateContentType) + } + if err = c.marshalInput(request, input, marshalFns...); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &InitiateMultipartUploadResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalEncodeType); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type UploadPartRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Each uploaded part is identified by a number. + // Value: 1-10000 + //The size limit of a single part is between 100 KB and 5 GB. + PartNumber int32 `input:"query,partNumber,required"` + + // The ID of the multipart upload task. + UploadId *string `input:"query,uploadId,required"` + + // The MD5 hash of the object that you want to upload. + ContentMD5 *string `input:"header,Content-MD5"` + + // Specify the speed limit value. The speed limit value ranges from 245760 to 838860800, with a unit of bit/s. + TrafficLimit int64 `input:"header,x-oss-traffic-limit"` + + // Object data. + Body io.Reader `input:"body,nop"` + + // Progress callback function + ProgressFn ProgressFunc + + // The size of the data in the HTTP message body. Unit: bytes. + ContentLength *int64 `input:"header,Content-Length"` + + // The encryption context for multipart upload when using client side encryption, only valid in EncryptionClient + CSEMultiPartContext *EncryptionMultiPartContext + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type UploadPartResult struct { + // Entity tag for the uploaded part. + ETag *string `output:"header,ETag"` + + // The MD5 hash of the part that you want to upload. + ContentMD5 *string `output:"header,Content-MD5"` + + // The 64-bit CRC value of the part. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + ResultCommon +} + +// UploadPart Call the UploadPart interface to upload data in blocks (parts) based on the specified Object name and uploadId. +func (c *Client) UploadPart(ctx context.Context, request *UploadPartRequest, optFns ...func(*Options)) (*UploadPartResult, error) { + var err error + if request == nil { + request = &UploadPartRequest{} + } + input := &OperationInput{ + OpName: "UploadPart", + Method: "PUT", + Bucket: request.Bucket, + Key: request.Key, + } + + marshalFns := []func(any, *OperationInput) error{ + addProgress, + c.addCrcCheck, + } + + if err = c.marshalInput(request, input, marshalFns...); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &UploadPartResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type UploadPartCopyRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Each uploaded part is identified by a number. + // Value: 1-10000 + //The size limit of a single part is between 100 KB and 5 GB. + PartNumber int32 `input:"query,partNumber,required"` + + // The ID of the multipart upload task. + UploadId *string `input:"query,uploadId,required"` + + // The name of the source bucket. + SourceBucket *string `input:"nop,bucket"` + + // The path of the source object. + SourceKey *string `input:"nop,key,required"` + + // The version ID of the source object. + SourceVersionId *string `input:"nop,versionId"` + + // The range of bytes to copy data from the source object. + Range *string `input:"header,x-oss-copy-source-range"` + + // The copy operation condition. If the ETag value of the source object is + // the same as the ETag value provided by the user, OSS copies data. Otherwise, + // OSS returns 412 Precondition Failed. + IfMatch *string `input:"header,x-oss-copy-source-if-match"` + + // The object transfer condition. If the input ETag value does not match the ETag value of the object + // the system transfers the object normally and returns 200 OK. Otherwise, OSS returns 304 Not Modified. + IfNoneMatch *string `input:"header,x-oss-copy-source-if-none-match"` + + // The object transfer condition. If the specified time is earlier than the actual modified time of the object, + // the system transfers the object normally and returns 200 OK. Otherwise, OSS returns 304 Not Modified. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfModifiedSince *string `input:"header,x-oss-copy-source-if-modified-since"` + + // The object transfer condition. If the specified time is the same as or later than the actual modified time of the object, + // OSS transfers the object normally and returns 200 OK. Otherwise, OSS returns 412 Precondition Failed. + // The time must be in GMT. Example: Fri, 13 Nov 2015 14:47:53 GMT. + IfUnmodifiedSince *string `input:"header,x-oss-copy-source-if-unmodified-since"` + + // Specify the speed limit value. The speed limit value ranges from 245760 to 838860800, with a unit of bit/s. + TrafficLimit int64 `input:"header,x-oss-traffic-limit"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type UploadPartCopyResult struct { + // The time when the returned objects were last modified. + LastModified *time.Time `xml:"LastModified"` + + // Entity tag for the uploaded part. + ETag *string `xml:"ETag"` + + // The version ID of the source object. + VersionId *string `output:"header,x-oss-copy-source-version-id"` + + ResultCommon +} + +// UploadPartCopy You can call this operation to copy data from an existing object to upload a part by adding a x-oss-copy-request header to UploadPart. +func (c *Client) UploadPartCopy(ctx context.Context, request *UploadPartCopyRequest, optFns ...func(*Options)) (*UploadPartCopyResult, error) { + var err error + if request == nil { + request = &UploadPartCopyRequest{} + } + input := &OperationInput{ + OpName: "UploadPartCopy", + Method: "PUT", + Bucket: request.Bucket, + Key: request.Key, + Headers: map[string]string{ + "x-oss-copy-source": encodeSourceObject(request), + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &UploadPartCopyResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type CompleteMultipartUploadRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The ID of the multipart upload task. + UploadId *string `input:"query,uploadId,required"` + + // The encoding type of the object names in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // Specifies whether the object with the same object name is overwritten when you call the CompleteMultipartUpload operation. + // If x-oss-forbid-overwrite is not specified or set to false, existing objects can be overwritten by objects that have the same names. + // If x-oss-forbid-overwrite is set to true, existing objects cannot be overwritten by objects that have the same names. + ForbidOverwrite *string `input:"header,x-oss-forbid-overwrite"` + + // Specifies whether to list all parts that are uploaded by using the current upload ID. Valid value: yes + CompleteAll *string `input:"header,x-oss-complete-all"` + + // The container that stores the content of the CompleteMultipartUpload + CompleteMultipartUpload *CompleteMultipartUpload `input:"body,CompleteMultipartUpload,xml"` + + // The access control list (ACL) of the object. + Acl ObjectACLType `input:"header,x-oss-object-acl"` + + // A callback parameter is a Base64-encoded string that contains multiple fields in the JSON format. + Callback *string `input:"header,x-oss-callback"` + + // Configure custom parameters by using the callback-var parameter. + CallbackVar *string `input:"header,x-oss-callback-var"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type UploadPart struct { + // The number of parts. + PartNumber int32 `xml:"PartNumber"` + + // The ETag values that are returned by OSS after parts are uploaded. + ETag *string `xml:"ETag"` +} + +type CompleteMultipartUpload struct { + Parts []UploadPart `xml:"Part"` +} +type UploadParts []UploadPart + +func (slice UploadParts) Len() int { + return len(slice) +} +func (slice UploadParts) Less(i, j int) bool { + return slice[i].PartNumber < slice[j].PartNumber +} +func (slice UploadParts) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +type CompleteMultipartUploadResult struct { + // The version ID of the source object. + VersionId *string `output:"header,x-oss-version-id"` + + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `output:"header,x-oss-hash-crc64ecma"` + + // The encoding type of the name of the deleted object in the response. + // If encoding-type is specified in the request, the object name is encoded in the returned result. + EncodingType *string `xml:"EncodingType"` + + // The URL that is used to access the uploaded object. + Location *string `xml:"Location"` + + // The name of the bucket. + Bucket *string `xml:"Bucket"` + + // The name of the uploaded object. + Key *string `xml:"Key"` + + // The ETag that is generated when an object is created. + // ETags are used to identify the content of objects. + ETag *string `xml:"ETag"` + + CallbackResult map[string]any + + ResultCommon +} + +// CompleteMultipartUpload Completes the multipart upload task of an object after all parts of the object are uploaded. +func (c *Client) CompleteMultipartUpload(ctx context.Context, request *CompleteMultipartUploadRequest, optFns ...func(*Options)) (*CompleteMultipartUploadResult, error) { + var err error + if request == nil { + request = &CompleteMultipartUploadRequest{} + } + input := &OperationInput{ + OpName: "CompleteMultipartUpload", + Method: "POST", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "encoding-type": "url", + }, + } + + if request.CompleteMultipartUpload != nil && len(request.CompleteMultipartUpload.Parts) > 0 { + sort.Sort(UploadParts(request.CompleteMultipartUpload.Parts)) + } + + marshalFns := []func(any, *OperationInput) error{ + updateContentMd5, + } + unmarshalFns := []func(result any, output *OperationOutput) error{ + unmarshalHeader, + } + + if request.Callback != nil { + marshalFns = append(marshalFns, addCallback) + unmarshalFns = append(unmarshalFns, unmarshalCallbackBody) + } else { + unmarshalFns = append(unmarshalFns, unmarshalBodyXml, unmarshalEncodeType) + } + + if err = c.marshalInput(request, input, marshalFns...); err != nil { + return nil, err + } + + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CompleteMultipartUploadResult{} + if err = c.unmarshalOutput(result, output, unmarshalFns...); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +type AbortMultipartUploadRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The ID of the multipart upload task. + UploadId *string `input:"query,uploadId,required"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type AbortMultipartUploadResult struct { + ResultCommon +} + +// AbortMultipartUpload Cancels a multipart upload task and deletes the parts uploaded in the task. +func (c *Client) AbortMultipartUpload(ctx context.Context, request *AbortMultipartUploadRequest, optFns ...func(*Options)) (*AbortMultipartUploadResult, error) { + var err error + if request == nil { + request = &AbortMultipartUploadRequest{} + } + input := &OperationInput{ + OpName: "AbortMultipartUpload", + Method: "DELETE", + Bucket: request.Bucket, + Key: request.Key, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &AbortMultipartUploadResult{} + if err = c.unmarshalOutput(result, output, discardBody); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListMultipartUploadsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The character that is used to group objects by name. If you specify the delimiter parameter in the request, + // the response contains the CommonPrefixes parameter. The objects whose names contain the same string from + // the prefix to the next occurrence of the delimiter are grouped as a single result element in CommonPrefixes. + Delimiter *string `input:"query,delimiter"` + + // The encoding type of the content in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // This parameter is used together with the upload-id-marker parameter to specify + // the position from which the next list begins. + KeyMarker *string `input:"query,key-marker"` + + // The maximum number of multipart upload tasks that can be returned for the current request. + // Default value: 1000. Maximum value: 1000. + MaxUploads int32 `input:"query,max-uploads"` + + // The prefix that the names of the returned objects must contain. + Prefix *string `input:"query,prefix"` + + // The upload ID of the multipart upload task after which the list begins. + // This parameter is used together with the key-marker parameter. + UploadIdMarker *string `input:"query,upload-id-marker"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type ListMultipartUploadsResult struct { + // The method used to encode the object name in the response. + // If encoding-type is specified in the request, values of those elements including + // Delimiter, KeyMarker, Prefix, NextKeyMarker, and Key are encoded in the returned result. + EncodingType *string `xml:"EncodingType"` + + // The name of the bucket. + Bucket *string `xml:"Bucket"` + + // The name of the object that corresponds to the multipart upload task after which the list begins. + KeyMarker *string `xml:"KeyMarker"` + + // The upload ID of the multipart upload task after which the list begins. + UploadIdMarker *string `xml:"UploadIdMarker"` + + // The upload ID of the multipart upload task after which the list begins. + NextKeyMarker *string `xml:"NextKeyMarker"` + + // The NextUploadMarker value that is used for the UploadMarker value in + // the next request if the response does not contain all required results. + NextUploadIdMarker *string `xml:"NextUploadIdMarker"` + + // The character that is used to group objects by name. + Delimiter *string `xml:"Delimiter"` + + // The prefix contained in the returned object names. + Prefix *string `xml:"Prefix"` + + // The maximum number of multipart upload tasks returned by OSS. + MaxUploads int32 `xml:"MaxUploads"` + + // Indicates whether the list of multipart upload tasks returned in the response is truncated. + // true: Only part of the results are returned this time. + // false: All results are returned. + IsTruncated bool `xml:"IsTruncated"` + + Uploads []Upload `xml:"Upload"` + + ResultCommon +} + +type Upload struct { + // The name of the object for which a multipart upload task was initiated. + Key *string `xml:"Key"` + + // The ID of the multipart upload task + UploadId *string `xml:"UploadId"` + + // The time when the multipart upload task was initialized. + Initiated *time.Time `xml:"Initiated"` +} + +// ListMultipartUploads Lists all multipart upload tasks in progress. The tasks are not completed or canceled. +func (c *Client) ListMultipartUploads(ctx context.Context, request *ListMultipartUploadsRequest, optFns ...func(*Options)) (*ListMultipartUploadsResult, error) { + var err error + if request == nil { + request = &ListMultipartUploadsRequest{} + } + input := &OperationInput{ + OpName: "ListMultipartUploads", + Method: "GET", + Bucket: request.Bucket, + Parameters: map[string]string{ + "encoding-type": "url", + "uploads": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5, enableNonStream); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListMultipartUploadsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalEncodeType); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ListPartsRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The ID of the multipart upload task. + UploadId *string `input:"query,uploadId,required"` + + // The encoding type of the content in the response. Valid value: url + EncodingType *string `input:"query,encoding-type"` + + // The maximum number of parts that can be returned by OSS. + // Default value: 1000. Maximum value: 1000. + MaxParts int32 `input:"query,max-parts"` + + // The position from which the list starts. + // All parts whose part numbers are greater than the value of this parameter are listed. + PartNumberMarker int32 `input:"query,part-number-marker"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type ListPartsResult struct { + // The method used to encode the object name in the response. + // If encoding-type is specified in the request, values of those elements including + // Delimiter, KeyMarker, Prefix, NextKeyMarker, and Key are encoded in the returned result. + EncodingType *string `xml:"EncodingType"` + + // The name of the bucket. + Bucket *string `xml:"Bucket"` + + // The name of the object that corresponds to the multipart upload task after which the list begins. + Key *string `xml:"Key"` + + // The ID of the upload task. + UploadId *string `xml:"UploadId"` + + // The position from which the list starts. + // All parts whose part numbers are greater than the value of this parameter are listed. + PartNumberMarker int32 `xml:"PartNumberMarker"` + + // The NextPartNumberMarker value that is used for the PartNumberMarker value in a subsequent + // request when the response does not contain all required results. + NextPartNumberMarker int32 `xml:"NextPartNumberMarker"` + + // he maximum number of parts in the response. + MaxParts int32 `xml:"MaxParts"` + + // Indicates whether the list of parts returned in the response has been truncated. + // true: Only part of the results are returned this time. + // false: All results are returned. + IsTruncated bool `xml:"IsTruncated"` + + // The storage class of the object. + StorageClass *string `xml:"StorageClass"` + + // The encrypted data key. + // The encrypted data key is a string encrypted by a customer master key and encoded in Base64. + // Only available in client-side encryption + ClientEncryptionKey *string `xml:"ClientEncryptionKey"` + + // The initial value that is randomly generated for data encryption. + // The initial value is is a string encrypted by a customer master key and encoded in Base64. + // Only available in client-side encryption + ClientEncryptionStart *string `xml:"ClientEncryptionStart"` + + // The algorithm used to encrypt data. + // Only available in client-side encryption + ClientEncryptionCekAlg *string `xml:"ClientEncryptionCekAlg"` + + // The algorithm used to encrypt the data key. + // Only available in client-side encryption + ClientEncryptionWrapAlg *string `xml:"ClientEncryptionWrapAlg"` + + // The total size of the data to encrypt for multipart upload when init_multipart is called. + // Only available in client-side encryption + ClientEncryptionDataSize *int64 `xml:"ClientEncryptionDataSize"` + + // The size of each part to encrypt for multipart upload when init_multipart is called. + // Only available in client-side encryption + ClientEncryptionPartSize *int64 `xml:"ClientEncryptionPartSize"` + + Parts []Part `xml:"Part"` + + ResultCommon +} + +type Part struct { + // The number that identifies a part. + PartNumber int32 `xml:"PartNumber"` + + // The ETag value of the content of the uploaded part. + ETag *string `xml:"ETag"` + + // The time when the part was uploaded. + LastModified *time.Time `xml:"LastModified"` + + // The size of the uploaded parts. + Size int64 `xml:"Size"` + + // The 64-bit CRC value of the object. + // This value is calculated based on the ECMA-182 standard. + HashCRC64 *string `xml:"HashCrc64ecma"` +} + +// ListParts Lists all parts that are uploaded by using a specified upload ID. +func (c *Client) ListParts(ctx context.Context, request *ListPartsRequest, optFns ...func(*Options)) (*ListPartsResult, error) { + var err error + if request == nil { + request = &ListPartsRequest{} + } + input := &OperationInput{ + OpName: "ListParts", + Method: "GET", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "encoding-type": "url", + }, + } + if err = c.marshalInput(request, input, updateContentMd5, enableNonStream); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListPartsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalEncodeType); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutSymlinkRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // The destination object to which the symbolic link points. + Target *string `input:"header,x-oss-symlink-target,required"` + + // Specifies whether the PutSymlink operation overwrites the object that has the same name. + // If you do not specify the x-oss-forbid-overwrite header or if you set the x-oss-forbid-overwrite header to false, the object that has the same name is overwritten. + // If you set the x-oss-forbid-overwrite header to true, the object that has the same name cannot be overwritten. + ForbidOverwrite *string `input:"header,x-oss-forbid-overwrite"` + + // The ACL of the object. Default value: default. + Acl ObjectACLType `input:"header,x-oss-object-acl"` + + // The storage class of the object. + StorageClass StorageClassType `input:"header,x-oss-storage-class"` + + // The metadata of the object that you want to symlink. + Metadata map[string]string `input:"header,x-oss-meta-,usermeta"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type PutSymlinkResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + ResultCommon +} + +// PutSymlink Creates a symbolic link that points to a destination object. You can use the symbolic link to access the destination object. +func (c *Client) PutSymlink(ctx context.Context, request *PutSymlinkRequest, optFns ...func(*Options)) (*PutSymlinkResult, error) { + var err error + if request == nil { + request = &PutSymlinkRequest{} + } + input := &OperationInput{ + OpName: "PutSymlink", + Method: "PUT", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "symlink": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutSymlinkResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetSymlinkRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Version of the object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type GetSymlinkResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // Indicates the target object that the symbol link directs to. + Target *string `output:"header,x-oss-symlink-target"` + + // Entity tag for the uploaded object. + ETag *string `output:"header,ETag"` + + // The metadata of the object that you want to symlink. + Metadata map[string]string `output:"header,x-oss-meta-,usermeta"` + + ResultCommon +} + +// GetSymlink Obtains a symbol link. To perform GetSymlink operations, you must have the read permission on the symbol link. +func (c *Client) GetSymlink(ctx context.Context, request *GetSymlinkRequest, optFns ...func(*Options)) (*GetSymlinkResult, error) { + var err error + if request == nil { + request = &GetSymlinkRequest{} + } + input := &OperationInput{ + OpName: "GetSymlink", + Method: "GET", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "symlink": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetSymlinkResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutObjectTaggingRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Version of the object. + VersionId *string `input:"query,versionId"` + + Tagging *Tagging `input:"body,Tagging,xml,required"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type Tagging struct { + // The container used to store a set of Tags. + TagSet *TagSet `xml:"TagSet"` +} + +type TagSet struct { + // The tags. + Tags []Tag `xml:"Tag"` +} + +type Tag struct { + // The key of a tag. + // * A tag key can be up to 64 bytes in length. + // * A tag key cannot start with `http://`, `https://`, or `Aliyun`. + // * A tag key must be UTF-8 encoded. + // * A tag key cannot be left empty. + Key *string `xml:"Key"` + + // The value of the tag that you want to add or modify. + // * A tag value can be up to 128 bytes in length. + // * A tag value must be UTF-8 encoded. + // * The tag value can be left empty. + Value *string `xml:"Value"` +} + +type PutObjectTaggingResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + ResultCommon +} + +// PutObjectTagging Adds tags to an object or updates the tags added to the object. Each tag added to an object is a key-value pair. +func (c *Client) PutObjectTagging(ctx context.Context, request *PutObjectTaggingRequest, optFns ...func(*Options)) (*PutObjectTaggingResult, error) { + var err error + if request == nil { + request = &PutObjectTaggingRequest{} + } + input := &OperationInput{ + OpName: "PutObjectTagging", + Method: "PUT", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "tagging": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutObjectTaggingResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type GetObjectTaggingRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Version of the object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type GetObjectTaggingResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + // The container used to store the collection of tags. + Tags []Tag `xml:"TagSet>Tag"` + + ResultCommon +} + +// GetObjectTagging You can call this operation to query the tags of an object. +func (c *Client) GetObjectTagging(ctx context.Context, request *GetObjectTaggingRequest, optFns ...func(*Options)) (*GetObjectTaggingResult, error) { + var err error + if request == nil { + request = &GetObjectTaggingRequest{} + } + input := &OperationInput{ + OpName: "GetObjectTagging", + Method: "GET", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "tagging": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetObjectTaggingResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeleteObjectTaggingRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Version of the object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type DeleteObjectTaggingResult struct { + // Version of the object. + VersionId *string `output:"header,x-oss-version-id"` + + ResultCommon +} + +// DeleteObjectTagging You can call this operation to delete the tags of a specified object. +func (c *Client) DeleteObjectTagging(ctx context.Context, request *DeleteObjectTaggingRequest, optFns ...func(*Options)) (*DeleteObjectTaggingResult, error) { + var err error + if request == nil { + request = &DeleteObjectTaggingRequest{} + } + input := &OperationInput{ + OpName: "DeleteObjectTagging", + Method: "DELETE", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "tagging": "", + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeleteObjectTaggingResult{} + if err = c.unmarshalOutput(result, output, discardBody, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type ProcessObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Image processing parameters + Process *string `input:"x-oss-process,nop,required"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type ProcessObjectResult struct { + Bucket string `json:"bucket"` + FileSize int `json:"fileSize"` + Object string `json:"object"` + ProcessStatus string `json:"status"` + ResultCommon +} + +// ProcessObject apply process on the specified image file. +func (c *Client) ProcessObject(ctx context.Context, request *ProcessObjectRequest, optFns ...func(*Options)) (*ProcessObjectResult, error) { + var err error + if request == nil { + request = &ProcessObjectRequest{} + } + input := &OperationInput{ + OpName: "ProcessObject", + Method: "POST", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "x-oss-process": "", + }, + } + if err = c.marshalInput(request, input, addProcess, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ProcessObjectResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyDefault, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type AsyncProcessObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Image async processing parameters + AsyncProcess *string `input:"x-async-oss-process,nop,required"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type AsyncProcessObjectResult struct { + EventId string `json:"EventId"` + RequestId string `json:"RequestId"` + TaskId string `json:"TaskId"` + ResultCommon +} + +// AsyncProcessObject apply async process on the specified image file. +func (c *Client) AsyncProcessObject(ctx context.Context, request *AsyncProcessObjectRequest, optFns ...func(*Options)) (*AsyncProcessObjectResult, error) { + var err error + if request == nil { + request = &AsyncProcessObjectRequest{} + } + input := &OperationInput{ + OpName: "AsyncProcessObject", + Method: "POST", + Bucket: request.Bucket, + Key: request.Key, + Parameters: map[string]string{ + "x-oss-async-process": "", + }, + } + if err = c.marshalInput(request, input, addProcess, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &AsyncProcessObjectResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyDefault, unmarshalHeader); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type CleanRestoredObjectRequest struct { + // The name of the bucket + Bucket *string `input:"host,bucket,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + // Version of the object. + VersionId *string `input:"query,versionId"` + + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string `input:"header,x-oss-request-payer"` + + RequestCommon +} + +type CleanRestoredObjectResult struct { + ResultCommon +} + +// CleanRestoredObject You can call this operation to clean an object restored from Archive or Cold Archive state. After that, the restored object returns to the frozen state. +func (c *Client) CleanRestoredObject(ctx context.Context, request *CleanRestoredObjectRequest, optFns ...func(*Options)) (*CleanRestoredObjectResult, error) { + var err error + if request == nil { + request = &CleanRestoredObjectRequest{} + } + input := &OperationInput{ + OpName: "CleanRestoredObject", + Method: "POST", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "cleanRestoredObject": "", + }, + Bucket: request.Bucket, + Key: request.Key, + } + + input.OpMetadata.Set(signer.SubResource, []string{"cleanRestoredObject"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &CleanRestoredObjectResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_publicaccessblock.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_publicaccessblock.go new file mode 100644 index 000000000..40710bc5c --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_publicaccessblock.go @@ -0,0 +1,147 @@ +package oss + +import ( + "context" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type PublicAccessBlockConfiguration struct { + // Specifies whether to enable Block Public Access.true: enables Block Public Access.false (default): disables Block Public Access. + BlockPublicAccess *bool `xml:"BlockPublicAccess"` +} + +type GetPublicAccessBlockRequest struct { + RequestCommon +} + +type GetPublicAccessBlockResult struct { + // The container in which the Block Public Access configurations are stored. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `output:"body,PublicAccessBlockConfiguration,xml"` + + ResultCommon +} + +// GetPublicAccessBlock Queries the Block Public Access configurations of OSS resources. +func (c *Client) GetPublicAccessBlock(ctx context.Context, request *GetPublicAccessBlockRequest, optFns ...func(*Options)) (*GetPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &GetPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "GetPublicAccessBlock", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &GetPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type PutPublicAccessBlockRequest struct { + // Request body. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `input:"body,PublicAccessBlockConfiguration,xml,required"` + + RequestCommon +} + +type PutPublicAccessBlockResult struct { + ResultCommon +} + +// PutPublicAccessBlock Enables or disables Block Public Access for Object Storage Service (OSS) resources. +func (c *Client) PutPublicAccessBlock(ctx context.Context, request *PutPublicAccessBlockRequest, optFns ...func(*Options)) (*PutPublicAccessBlockResult, error) { + var err error + if request == nil { + request = &PutPublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "PutPublicAccessBlock", + Method: "PUT", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &PutPublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} + +type DeletePublicAccessBlockRequest struct { + RequestCommon +} + +type DeletePublicAccessBlockResult struct { + ResultCommon +} + +// DeletePublicAccessBlock Deletes the Block Public Access configurations of OSS resources. +func (c *Client) DeletePublicAccessBlock(ctx context.Context, request *DeletePublicAccessBlockRequest, optFns ...func(*Options)) (*DeletePublicAccessBlockResult, error) { + var err error + if request == nil { + request = &DeletePublicAccessBlockRequest{} + } + input := &OperationInput{ + OpName: "DeletePublicAccessBlock", + Method: "DELETE", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "publicAccessBlock": "", + }, + } + input.OpMetadata.Set(signer.SubResource, []string{"publicAccessBlock"}) + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DeletePublicAccessBlockResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_region.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_region.go new file mode 100644 index 000000000..aefb1f1b2 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_region.go @@ -0,0 +1,72 @@ +package oss + +import ( + "context" +) + +type RegionInfo struct { + // The region ID. + Region *string `xml:"Region"` + + // The public endpoint of the region. + InternetEndpoint *string `xml:"InternetEndpoint"` + + // The internal endpoint of the region. + InternalEndpoint *string `xml:"InternalEndpoint"` + + // The acceleration endpoint of the region. The value is always oss-accelerate.aliyuncs.com. + AccelerateEndpoint *string `xml:"AccelerateEndpoint"` +} + +type RegionInfoList struct { + // The information about the regions. + RegionInfos []RegionInfo `xml:"RegionInfo"` +} + +type DescribeRegionsRequest struct { + // The region ID of the request. + Regions *string `input:"query,regions"` + + RequestCommon +} + +type DescribeRegionsResult struct { + // The information about the regions. + RegionInfoList *RegionInfoList `output:"body,RegionInfoList,xml"` + + ResultCommon +} + +// DescribeRegions Queries the endpoints of all supported regions or the endpoints of a specific region. +func (c *Client) DescribeRegions(ctx context.Context, request *DescribeRegionsRequest, optFns ...func(*Options)) (*DescribeRegionsResult, error) { + var err error + if request == nil { + request = &DescribeRegionsRequest{} + } + input := &OperationInput{ + OpName: "DescribeRegions", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + Parameters: map[string]string{ + "regions": "", + }, + } + + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &DescribeRegionsResult{} + + if err = c.unmarshalOutput(result, output, unmarshalBodyXmlMix); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_select_object.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_select_object.go new file mode 100644 index 000000000..24056e662 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_select_object.go @@ -0,0 +1,740 @@ +package oss + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "encoding/xml" + "fmt" + "hash" + "hash/crc32" + "io" + "strconv" + "strings" +) + +// FrameType +const ( + DataFrameType = 8388609 + ContinuousFrameType = 8388612 + EndFrameType = 8388613 + MetaEndFrameCSVType = 8388614 + MetaEndFrameJSONType = 8388615 +) + +type CreateSelectObjectMetaRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + MetaRequest any `input:"nop,meta-request,required"` + + RequestCommon +} + +type JsonMetaRequest struct { + InputSerialization *InputSerialization `xml:"InputSerialization"` + OverwriteIfExists *bool `xml:"OverwriteIfExists"` +} + +type CsvMetaRequest struct { + InputSerialization *InputSerialization `xml:"InputSerialization"` + OverwriteIfExists *bool `xml:"OverwriteIfExists"` +} + +type InputSerialization struct { + CSV *InputSerializationCSV `xml:"CSV"` + JSON *InputSerializationJSON `xml:"JSON"` + CompressionType *string `xml:"CompressionType"` +} + +type InputSerializationCSV struct { + RecordDelimiter *string `xml:"RecordDelimiter"` + FieldDelimiter *string `xml:"FieldDelimiter"` + QuoteCharacter *string `xml:"QuoteCharacter"` +} + +type InputSerializationJSON struct { + JSONType *string `xml:"Type"` +} + +type CreateSelectObjectMetaResult struct { + TotalScanned int64 + MetaStatus int + SplitsCount int32 + RowsCount int64 + ColumnsCount int32 + ErrorMsg string + ResultCommon +} + +type ReadFlagInfo struct { + OpenLine bool + ConsumedBytesLength int32 + EnablePayloadCrc bool + OutputRawData bool +} + +// CreateSelectObjectMeta You can call the CreateSelectObjectMeta operation to obtain information about an object, such as the total number of rows and the number of splits. +func (c *Client) CreateSelectObjectMeta(ctx context.Context, request *CreateSelectObjectMetaRequest, optFns ...func(*Options)) (*CreateSelectObjectMetaResult, error) { + var err error + if request == nil { + request = &CreateSelectObjectMetaRequest{} + } + input := &OperationInput{ + OpName: "CreateSelectObjectMeta", + Method: "POST", + Bucket: request.Bucket, + Key: request.Key, + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + } + if err = c.marshalInput(request, input, marshalMetaRequest, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &CreateSelectObjectMetaResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyCreateSelectObjectMeta); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} + +func marshalMetaRequest(request any, input *OperationInput) error { + var builder strings.Builder + var process string + switch r := request.(*CreateSelectObjectMetaRequest).MetaRequest.(type) { + case *JsonMetaRequest: + process = "json/meta" + builder.WriteString("") + if r.InputSerialization != nil { + bs, err := xml.Marshal(r.InputSerialization) + if err != nil { + return err + } + builder.WriteString(string(bs)) + } + if r.OverwriteIfExists != nil { + builder.WriteString("") + builder.WriteString(strconv.FormatBool(*r.OverwriteIfExists)) + builder.WriteString("") + } + builder.WriteString("") + case *CsvMetaRequest: + r.encodeBase64() + process = "csv/meta" + builder.WriteString("") + if r.InputSerialization != nil { + bs, err := xml.Marshal(r.InputSerialization) + if err != nil { + return err + } + builder.WriteString(string(bs)) + } + if r.OverwriteIfExists != nil { + builder.WriteString("") + builder.WriteString(strconv.FormatBool(*r.OverwriteIfExists)) + builder.WriteString("") + } + builder.WriteString("") + default: + return NewErrParamInvalid("MetaRequest") + } + input.Body = strings.NewReader(builder.String()) + if input.Parameters == nil { + input.Parameters = map[string]string{} + } + input.Parameters["x-oss-process"] = process + return nil +} + +func unmarshalBodyCreateSelectObjectMeta(result any, output *OperationOutput) error { + var err error + if output.Body != nil { + defer output.Body.Close() + readerWrapper := &ReaderWrapper{ + Body: output.Body, + WriterForCheckCrc32: crc32.NewIEEE(), + } + if _, err = io.ReadAll(readerWrapper); err != nil { + return err + } + result.(*CreateSelectObjectMetaResult).TotalScanned = readerWrapper.TotalScanned + result.(*CreateSelectObjectMetaResult).MetaStatus = int(readerWrapper.Status) + result.(*CreateSelectObjectMetaResult).SplitsCount = readerWrapper.SplitsCount + result.(*CreateSelectObjectMetaResult).RowsCount = readerWrapper.RowsCount + result.(*CreateSelectObjectMetaResult).ColumnsCount = readerWrapper.ColumnsCount + result.(*CreateSelectObjectMetaResult).ErrorMsg = readerWrapper.ErrorMsg + } + return err +} + +type SelectObjectRequest struct { + // The name of the bucket. + Bucket *string `input:"host,uploadId,required"` + + // The name of the object. + Key *string `input:"path,key,required"` + + SelectRequest *SelectRequest `input:"nop,SelectRequest,required"` + + RequestCommon +} + +type SelectObjectResult struct { + Body io.ReadCloser + ResultCommon +} + +type SelectRequest struct { + Expression *string `xml:"Expression"` + InputSerializationSelect InputSerializationSelect `xml:"InputSerialization"` + OutputSerializationSelect OutputSerializationSelect `xml:"OutputSerialization"` + SelectOptions *SelectOptions `xml:"Options"` +} + +type OutputSerializationSelect struct { + CsvBodyOutput *CSVSelectOutput `xml:"CSV"` + JsonBodyOutput *JSONSelectOutput `xml:"JSON"` + OutputRawData *bool `xml:"OutputRawData"` + KeepAllColumns *bool `xml:"KeepAllColumns"` + EnablePayloadCrc *bool `xml:"EnablePayloadCrc"` + OutputHeader *bool `xml:"OutputHeader"` +} +type CSVSelectOutput struct { + RecordDelimiter *string `xml:"RecordDelimiter"` + FieldDelimiter *string `xml:"FieldDelimiter"` +} +type JSONSelectOutput struct { + RecordDelimiter *string `xml:"RecordDelimiter"` +} + +type SelectOptions struct { + SkipPartialDataRecord *bool `xml:"SkipPartialDataRecord"` + MaxSkippedRecordsAllowed *int `xml:"MaxSkippedRecordsAllowed"` +} + +type InputSerializationSelect struct { + CsvBodyInput *CSVSelectInput `xml:"CSV"` + JsonBodyInput *JSONSelectInput `xml:"JSON"` + CompressionType *string `xml:"CompressionType"` +} + +type CSVSelectInput struct { + FileHeaderInfo *string `xml:"FileHeaderInfo"` + RecordDelimiter *string `xml:"RecordDelimiter"` + FieldDelimiter *string `xml:"FieldDelimiter"` + QuoteCharacter *string `xml:"QuoteCharacter"` + CommentCharacter *string `xml:"CommentCharacter"` + Range *string `xml:"Range"` + SplitRange *string + AllowQuotedRecordDelimiter *bool `xml:"AllowQuotedRecordDelimiter"` +} + +type JSONSelectInput struct { + JSONType *string `xml:"Type"` + Range *string `xml:"Range"` + ParseJSONNumberAsString *bool `xml:"ParseJsonNumberAsString"` + SplitRange *string +} + +// SelectObject Executes SQL statements to perform operations on an object and obtains the execution results. +func (c *Client) SelectObject(ctx context.Context, request *SelectObjectRequest, optFns ...func(*Options)) (*SelectObjectResult, error) { + var err error + if request == nil { + request = &SelectObjectRequest{} + } + input := &OperationInput{ + OpName: "SelectObject", + Method: "POST", + Bucket: request.Bucket, + Key: request.Key, + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeXML, + }, + } + if err = c.marshalInput(request, input, marshalSelectObjectRequest, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + result := &SelectObjectResult{} + err = unmarshalResultSelectObject(request, result, output) + if err != nil { + return nil, err + } + if err = c.unmarshalOutput(result, output); err != nil { + return nil, err + } + return result, err +} + +func marshalSelectObjectRequest(request any, input *OperationInput) error { + var process string + if request.(*SelectObjectRequest).SelectRequest != nil { + if request.(*SelectObjectRequest).SelectRequest.InputSerializationSelect.JsonBodyInput == nil { + process = "csv/select" + } else { + process = "json/select" + } + request.(*SelectObjectRequest).SelectRequest.encodeBase64() + } + if input.Parameters == nil { + input.Parameters = map[string]string{} + } + input.Parameters["x-oss-process"] = process + bs, err := xml.Marshal(request.(*SelectObjectRequest).SelectRequest) + if err != nil { + return err + } + input.Body = strings.NewReader(string(bs)) + return err +} + +func unmarshalResultSelectObject(request *SelectObjectRequest, result *SelectObjectResult, output *OperationOutput) error { + var err error + if output.Body != nil { + readerWrapper := &ReaderWrapper{ + Body: output.Body, + WriterForCheckCrc32: crc32.NewIEEE(), + } + if request.SelectRequest.OutputSerializationSelect.EnablePayloadCrc != nil && *request.SelectRequest.OutputSerializationSelect.EnablePayloadCrc == true { + readerWrapper.EnablePayloadCrc = true + } + readerWrapper.OutputRawData = strings.ToUpper(output.Headers.Get("x-oss-select-output-raw")) == "TRUE" + result.Body = readerWrapper + } + return err +} + +// The adapter class for Select object's response. +// The response consists of frames. Each frame has the following format: + +// Type | Payload Length | Header Checksum | Payload | Payload Checksum + +// |<4-->| <--4 bytes------><---4 bytes-------><-n/a-----><--4 bytes---------> +// And we have three kind of frames. +// Data Frame: +// Type:8388609 +// Payload: Offset | Data +// <-8 bytes> + +// Continuous Frame +// Type:8388612 +// Payload: Offset (8-bytes) + +// End Frame +// Type:8388613 +// Payload: Offset | total scanned bytes | http status code | error message +// <-- 8bytes--><-----8 bytes--------><---4 bytes-------><---variabe---> + +// SelectObjectResponse defines HTTP response from OSS SelectObject +//type SelectObjectResponse struct { +// Body io.ReadCloser +// Frame SelectObjectResult +// ReadTimeOut uint +// Finish bool +// ResultCommon +//} + +// ReaderWrapper defines HTTP response from OSS SelectObject +type ReaderWrapper struct { + Body io.ReadCloser + Version byte + FrameType int32 + PayloadLength int32 + HeaderCheckSum uint32 + Offset uint64 + Data string + ClientCRC32 uint32 + ServerCRC32 uint32 + WriterForCheckCrc32 hash.Hash32 + HTTPStatusCode int32 + TotalScanned int64 + Status int32 + SplitsCount int32 + RowsCount int64 + ColumnsCount int32 + ErrorMsg string + PayloadChecksum uint32 + ReadFlagInfo + Finish bool +} + +func (rw *ReaderWrapper) Read(p []byte) (n int, err error) { + n, err = rw.readFrames(p) + return +} + +// Close http response body +func (rw *ReaderWrapper) Close() error { + return rw.Body.Close() +} + +// readFrames is read Frame +func (rw *ReaderWrapper) readFrames(p []byte) (int, error) { + var nn int + var err error + var checkValid bool + if rw.OutputRawData == true { + nn, err = rw.Body.Read(p) + return nn, err + } + + if rw.Finish { + return 0, io.EOF + } + + for { + // if this Frame is Read, then not reading Header + if rw.OpenLine != true { + err = rw.analysisHeader() + if err != nil { + return nn, err + } + } + + if rw.FrameType == DataFrameType { + n, err := rw.analysisData(p[nn:]) + if err != nil { + return nn, err + } + nn += n + + // if this Frame is read all data, then empty the Frame to read it with next frame + if rw.ConsumedBytesLength == rw.PayloadLength-8 { + checkValid, err = rw.checkPayloadSum() + if err != nil || !checkValid { + return nn, fmt.Errorf("%s", err.Error()) + } + rw.emptyFrame() + } + + if nn == len(p) { + return nn, nil + } + } else if rw.FrameType == ContinuousFrameType { + checkValid, err = rw.checkPayloadSum() + if err != nil || !checkValid { + return nn, fmt.Errorf("%s", err.Error()) + } + rw.OpenLine = false + } else if rw.FrameType == EndFrameType { + err = rw.analysisEndFrame() + if err != nil { + return nn, err + } + checkValid, err = rw.checkPayloadSum() + if checkValid { + rw.Finish = true + } + return nn, err + } else if rw.FrameType == MetaEndFrameCSVType { + err = rw.analysisMetaEndFrameCSV() + if err != nil { + return nn, err + } + checkValid, err = rw.checkPayloadSum() + if checkValid { + rw.Finish = true + } + return nn, err + } else if rw.FrameType == MetaEndFrameJSONType { + err = rw.analysisMetaEndFrameJSON() + if err != nil { + return nn, err + } + checkValid, err = rw.checkPayloadSum() + if checkValid { + rw.Finish = true + } + return nn, err + } + } +} + +type chanReadIO struct { + readLen int + err error +} + +func (rw *ReaderWrapper) readLen(p []byte) (int, error) { + r := rw.Body + ch := make(chan chanReadIO, 1) + defer close(ch) + go func(p []byte) { + var needReadLength int + readChan := chanReadIO{} + needReadLength = len(p) + for { + n, err := r.Read(p[readChan.readLen:needReadLength]) + readChan.readLen += n + if err != nil { + readChan.err = err + ch <- readChan + return + } + + if readChan.readLen == needReadLength { + break + } + } + ch <- readChan + }(p) + + select { + case result := <-ch: + return result.readLen, result.err + } +} + +// analysisHeader is reading selectObject response body's header +func (rw *ReaderWrapper) analysisHeader() error { + headFrameByte := make([]byte, 20) + _, err := rw.readLen(headFrameByte) + if err != nil { + return fmt.Errorf("read response frame header failure,err:%s", err.Error()) + } + + frameTypeByte := headFrameByte[0:4] + rw.Version = frameTypeByte[0] + frameTypeByte[0] = 0 + bytesToInt(frameTypeByte, &rw.FrameType) + + if rw.FrameType != DataFrameType && rw.FrameType != ContinuousFrameType && + rw.FrameType != EndFrameType && rw.FrameType != MetaEndFrameCSVType && rw.FrameType != MetaEndFrameJSONType { + return fmt.Errorf("unexpected frame type: %d", rw.FrameType) + } + + payloadLengthByte := headFrameByte[4:8] + bytesToInt(payloadLengthByte, &rw.PayloadLength) + headCheckSumByte := headFrameByte[8:12] + bytesToInt(headCheckSumByte, &rw.HeaderCheckSum) + byteOffset := headFrameByte[12:20] + bytesToInt(byteOffset, &rw.Offset) + rw.OpenLine = true + err = rw.writerCheckCrc32(byteOffset) + return err +} + +// analysisData is reading the DataFrameType data of selectObject response body +func (rw *ReaderWrapper) analysisData(p []byte) (int, error) { + var needReadLength int32 + lenP := int32(len(p)) + restByteLength := rw.PayloadLength - 8 - rw.ConsumedBytesLength + if lenP <= restByteLength { + needReadLength = lenP + } else { + needReadLength = restByteLength + } + n, err := rw.readLen(p[:needReadLength]) + if err != nil { + return n, fmt.Errorf("read frame data error,%s", err.Error()) + } + rw.ConsumedBytesLength += int32(n) + err = rw.writerCheckCrc32(p[:n]) + return n, err +} + +// analysisEndFrame is reading the EndFrameType data of selectObject response body +func (rw *ReaderWrapper) analysisEndFrame() error { + payLoadBytes := make([]byte, rw.PayloadLength-8) + _, err := rw.readLen(payLoadBytes) + if err != nil { + return fmt.Errorf("read end frame error:%s", err.Error()) + } + bytesToInt(payLoadBytes[0:8], &rw.TotalScanned) + bytesToInt(payLoadBytes[8:12], &rw.HTTPStatusCode) + errMsgLength := rw.PayloadLength - 20 + rw.ErrorMsg = string(payLoadBytes[12 : errMsgLength+12]) + err = rw.writerCheckCrc32(payLoadBytes) + return err +} + +// analysisMetaEndFrameCSV is reading the MetaEndFrameCSVType data of selectObject response body +func (rw *ReaderWrapper) analysisMetaEndFrameCSV() error { + payLoadBytes := make([]byte, rw.PayloadLength-8) + _, err := rw.readLen(payLoadBytes) + if err != nil { + return fmt.Errorf("read meta end csv frame error:%s", err.Error()) + } + + bytesToInt(payLoadBytes[0:8], &rw.TotalScanned) + bytesToInt(payLoadBytes[8:12], &rw.Status) + bytesToInt(payLoadBytes[12:16], &rw.SplitsCount) + bytesToInt(payLoadBytes[16:24], &rw.RowsCount) + bytesToInt(payLoadBytes[24:28], &rw.ColumnsCount) + errMsgLength := rw.PayloadLength - 36 + rw.ErrorMsg = string(payLoadBytes[28 : errMsgLength+28]) + err = rw.writerCheckCrc32(payLoadBytes) + return err +} + +// analysisMetaEndFrameJSON is reading the MetaEndFrameJSONType data of selectObject response body +func (rw *ReaderWrapper) analysisMetaEndFrameJSON() error { + payLoadBytes := make([]byte, rw.PayloadLength-8) + _, err := rw.readLen(payLoadBytes) + if err != nil { + return fmt.Errorf("read meta end json frame error:%s", err.Error()) + } + + bytesToInt(payLoadBytes[0:8], &rw.TotalScanned) + bytesToInt(payLoadBytes[8:12], &rw.Status) + bytesToInt(payLoadBytes[12:16], &rw.SplitsCount) + bytesToInt(payLoadBytes[16:24], &rw.RowsCount) + errMsgLength := rw.PayloadLength - 32 + rw.ErrorMsg = string(payLoadBytes[24 : errMsgLength+24]) + err = rw.writerCheckCrc32(payLoadBytes) + return err +} + +func (rw *ReaderWrapper) checkPayloadSum() (bool, error) { + payLoadChecksumByte := make([]byte, 4) + n, err := rw.readLen(payLoadChecksumByte) + if n == 4 { + bytesToInt(payLoadChecksumByte, &rw.PayloadChecksum) + rw.ServerCRC32 = rw.PayloadChecksum + rw.ClientCRC32 = rw.WriterForCheckCrc32.Sum32() + if rw.EnablePayloadCrc == true && rw.ServerCRC32 != 0 && rw.ServerCRC32 != rw.ClientCRC32 { + return false, fmt.Errorf("unexpected frame type: %d, client %d but server %d", rw.FrameType, rw.ClientCRC32, rw.ServerCRC32) + } + return true, err + } + return false, fmt.Errorf("read checksum error:%s", err.Error()) +} + +func (rw *ReaderWrapper) writerCheckCrc32(p []byte) (err error) { + err = nil + if rw.EnablePayloadCrc == true { + _, err = rw.WriterForCheckCrc32.Write(p) + } + return err +} + +// emptyFrame is emptying SelectObjectResponse Frame information +func (rw *ReaderWrapper) emptyFrame() { + rw.WriterForCheckCrc32 = crc32.NewIEEE() + + rw.Finish = false + rw.ConsumedBytesLength = 0 + rw.OpenLine = false + rw.Version = byte(0) + rw.FrameType = 0 + rw.PayloadLength = 0 + rw.HeaderCheckSum = 0 + rw.Offset = 0 + rw.Data = "" + + rw.TotalScanned = 0 + rw.Status = 0 + rw.SplitsCount = 0 + rw.RowsCount = 0 + rw.ColumnsCount = 0 + + rw.ErrorMsg = "" + + rw.PayloadChecksum = 0 +} + +// bytesToInt byte's array trans to int +func bytesToInt(b []byte, ret interface{}) { + binBuf := bytes.NewBuffer(b) + binary.Read(binBuf, binary.BigEndian, ret) +} + +// jsonEncodeBase64 encode base64 of the SelectObject api request params +func (selectReq *SelectRequest) jsonEncodeBase64() { + if selectReq == nil { + return + } + if selectReq.Expression != nil { + *selectReq.Expression = base64.StdEncoding.EncodeToString([]byte(*selectReq.Expression)) + } + if selectReq.OutputSerializationSelect.JsonBodyOutput == nil { + return + } + if selectReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter != nil { + *selectReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(*selectReq.OutputSerializationSelect.JsonBodyOutput.RecordDelimiter)) + } + if selectReq.InputSerializationSelect.JsonBodyInput.Range != nil { + *selectReq.InputSerializationSelect.JsonBodyInput.Range = "line-range=" + *selectReq.InputSerializationSelect.JsonBodyInput.Range + } + if selectReq.InputSerializationSelect.JsonBodyInput.SplitRange != nil && *selectReq.InputSerializationSelect.JsonBodyInput.SplitRange != "" { + selectReq.InputSerializationSelect.JsonBodyInput.Range = Ptr("split-range=" + *selectReq.InputSerializationSelect.JsonBodyInput.SplitRange) + selectReq.InputSerializationSelect.JsonBodyInput.SplitRange = nil + } +} + +// encodeBase64 encode base64 of the CreateSelectObjectMeta api request params +func (meta *CsvMetaRequest) encodeBase64() { + if meta == nil || meta.InputSerialization == nil { + return + } + if meta.InputSerialization.CSV.RecordDelimiter != nil { + *meta.InputSerialization.CSV.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(*meta.InputSerialization.CSV.RecordDelimiter)) + } + if meta.InputSerialization.CSV.FieldDelimiter != nil { + *meta.InputSerialization.CSV.FieldDelimiter = + base64.StdEncoding.EncodeToString([]byte(*meta.InputSerialization.CSV.FieldDelimiter)) + } + + if meta.InputSerialization.CSV.QuoteCharacter != nil { + *meta.InputSerialization.CSV.QuoteCharacter = + base64.StdEncoding.EncodeToString([]byte(*meta.InputSerialization.CSV.QuoteCharacter)) + } +} + +func (selectReq *SelectRequest) encodeBase64() { + if selectReq.InputSerializationSelect.JsonBodyInput == nil { + selectReq.csvEncodeBase64() + } else { + selectReq.jsonEncodeBase64() + } +} + +// csvEncodeBase64 encode base64 of the SelectObject api request params +func (selectReq *SelectRequest) csvEncodeBase64() { + if selectReq == nil { + return + } + if selectReq.Expression != nil { + *selectReq.Expression = base64.StdEncoding.EncodeToString([]byte(*selectReq.Expression)) + } + if selectReq.InputSerializationSelect.CsvBodyInput == nil { + return + } + if selectReq.InputSerializationSelect.CsvBodyInput.RecordDelimiter != nil { + *selectReq.InputSerializationSelect.CsvBodyInput.RecordDelimiter = + base64.StdEncoding.EncodeToString([]byte(*selectReq.InputSerializationSelect.CsvBodyInput.RecordDelimiter)) + } + if selectReq.InputSerializationSelect.CsvBodyInput.FieldDelimiter != nil { + *selectReq.InputSerializationSelect.CsvBodyInput.FieldDelimiter = + base64.StdEncoding.EncodeToString([]byte(*selectReq.InputSerializationSelect.CsvBodyInput.FieldDelimiter)) + } + if selectReq.InputSerializationSelect.CsvBodyInput.QuoteCharacter != nil { + *selectReq.InputSerializationSelect.CsvBodyInput.QuoteCharacter = + base64.StdEncoding.EncodeToString([]byte(*selectReq.InputSerializationSelect.CsvBodyInput.QuoteCharacter)) + } + if selectReq.InputSerializationSelect.CsvBodyInput.CommentCharacter != nil { + *selectReq.InputSerializationSelect.CsvBodyInput.CommentCharacter = + base64.StdEncoding.EncodeToString([]byte(*selectReq.InputSerializationSelect.CsvBodyInput.CommentCharacter)) + } + if selectReq.InputSerializationSelect.CsvBodyInput.Range != nil && *selectReq.InputSerializationSelect.CsvBodyInput.Range != "" { + *selectReq.InputSerializationSelect.CsvBodyInput.Range = "line-range=" + *selectReq.InputSerializationSelect.CsvBodyInput.Range + } + if selectReq.InputSerializationSelect.CsvBodyInput.SplitRange != nil && *selectReq.InputSerializationSelect.CsvBodyInput.SplitRange != "" { + selectReq.InputSerializationSelect.CsvBodyInput.Range = Ptr("split-range=" + *selectReq.InputSerializationSelect.CsvBodyInput.SplitRange) + selectReq.InputSerializationSelect.CsvBodyInput.SplitRange = nil + } +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_service.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_service.go new file mode 100644 index 000000000..c9af79ac3 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/api_op_service.go @@ -0,0 +1,106 @@ +package oss + +import ( + "context" + "time" +) + +type ListBucketsRequest struct { + // The name of the bucket from which the list operation begins. + Marker *string `input:"query,marker"` + + // The maximum number of buckets that can be returned in the single query. + // Valid values: 1 to 1000. + MaxKeys int32 `input:"query,max-keys"` + + // The prefix that the names of returned buckets must contain. + Prefix *string `input:"query,prefix"` // Limits the response to keys that begin with the specified prefix + + // The ID of the resource group. + ResourceGroupId *string `input:"header,x-oss-resource-group-id"` + + RequestCommon +} + +type ListBucketsResult struct { + // The prefix contained in the names of the returned bucket. + Prefix *string `xml:"Prefix"` + + // The name of the bucket after which the ListBuckets operation starts. + Marker *string `xml:"Marker"` // The marker filter. + + // The maximum number of buckets that can be returned for the request. + MaxKeys int32 `xml:"MaxKeys"` + + // Indicates whether all results are returned. + // true: Only part of the results are returned for the request. + // false: All results are returned for the request. + IsTruncated bool `xml:"IsTruncated"` + + // The marker for the next ListBuckets request, which can be used to return the remaining results. + NextMarker *string `xml:"NextMarker"` + + // The container that stores information about the bucket owner. + Owner *Owner `xml:"Owner"` + + // The container that stores information about buckets. + Buckets []BucketProperties `xml:"Buckets>Bucket"` + + ResultCommon +} + +type BucketProperties struct { + // The name of the bucket. + Name *string `xml:"Name"` + + // The data center in which the bucket is located. + Location *string `xml:"Location"` + + // The time when the bucket was created. Format: yyyy-mm-ddThh:mm:ss.timezone. + CreationDate *time.Time `xml:"CreationDate"` + + // The storage class of the bucket. Valid values: + // Standard, IA, Archive, ColdArchive and DeepColdArchive. + StorageClass *string `xml:"StorageClass"` + + // The public endpoint used to access the bucket over the Internet. + ExtranetEndpoint *string `xml:"ExtranetEndpoint"` + + // The internal endpoint that is used to access the bucket from ECS instances + // that reside in the same region as the bucket. + IntranetEndpoint *string `xml:"IntranetEndpoint"` + + // The region in which the bucket is located. + Region *string `xml:"Region"` + + // The ID of the resource group to which the bucket belongs. + ResourceGroupId *string `xml:"ResourceGroupId"` +} + +// ListBuckets Lists buckets that belong to the current account. +func (c *Client) ListBuckets(ctx context.Context, request *ListBucketsRequest, optFns ...func(*Options)) (*ListBucketsResult, error) { + var err error + if request == nil { + request = &ListBucketsRequest{} + } + input := &OperationInput{ + OpName: "ListBuckets", + Method: "GET", + Headers: map[string]string{ + HTTPHeaderContentType: contentTypeDefault, + }, + } + if err = c.marshalInput(request, input, updateContentMd5); err != nil { + return nil, err + } + output, err := c.invokeOperation(ctx, input, optFns) + if err != nil { + return nil, err + } + + result := &ListBucketsResult{} + if err = c.unmarshalOutput(result, output, unmarshalBodyXml); err != nil { + return nil, c.toClientError(err, "UnmarshalOutputFail", output) + } + return result, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/checkpoint.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/checkpoint.go new file mode 100644 index 000000000..4e4fb2bbd --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/checkpoint.go @@ -0,0 +1,369 @@ +package oss + +import ( + "crypto/md5" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "reflect" + "strconv" + "strings" +) + +// ----- download checkpoint ----- +type downloadCheckpoint struct { + CpDirPath string // checkpoint dir full path + CpFilePath string // checkpoint file full path + VerifyData bool // verify downloaded data in FilePath + Loaded bool // If Info.Data.DownloadInfo is loaded from checkpoint + + Info struct { //checkpoint data + Magic string // Magic + MD5 string // The Data's MD5 + Data struct { + // source + ObjectInfo struct { + Name string // oss://bucket/key + VersionId string + Range string + } + ObjectMeta struct { + Size int64 + LastModified string + ETag string + } + + // destination + FilePath string // Local file + + // download info + PartSize int64 + + DownloadInfo struct { + Offset int64 + CRC64 uint64 + } + } + } +} + +func newDownloadCheckpoint(request *GetObjectRequest, filePath string, baseDir string, header http.Header, partSize int64) *downloadCheckpoint { + var buf strings.Builder + name := fmt.Sprintf("%v/%v", ToString(request.Bucket), ToString(request.Key)) + buf.WriteString("oss://" + escapePath(name, false)) + buf.WriteString("\n") + buf.WriteString(ToString(request.VersionId)) + buf.WriteString("\n") + buf.WriteString(ToString(request.Range)) + + hashmd5 := md5.New() + hashmd5.Write([]byte(buf.String())) + srcHash := hex.EncodeToString(hashmd5.Sum(nil)) + + absPath, _ := filepath.Abs(filePath) + hashmd5.Reset() + hashmd5.Write([]byte(absPath)) + destHash := hex.EncodeToString(hashmd5.Sum(nil)) + + var dir string + if baseDir == "" { + dir = os.TempDir() + } else { + dir = filepath.Dir(baseDir) + } + + cpFilePath := filepath.Join(dir, fmt.Sprintf("%v-%v%v", srcHash, destHash, CheckpointFileSuffixDownloader)) + + cp := &downloadCheckpoint{ + CpFilePath: cpFilePath, + CpDirPath: dir, + } + + objectSize, _ := strconv.ParseInt(header.Get("Content-Length"), 10, 64) + + cp.Info.Magic = CheckpointMagic + cp.Info.Data.ObjectInfo.Name = "oss://" + name + cp.Info.Data.ObjectInfo.VersionId = ToString(request.VersionId) + cp.Info.Data.ObjectInfo.Range = ToString(request.Range) + cp.Info.Data.ObjectMeta.Size = objectSize + cp.Info.Data.ObjectMeta.LastModified = header.Get("Last-Modified") + cp.Info.Data.ObjectMeta.ETag = header.Get("ETag") + cp.Info.Data.FilePath = filePath + cp.Info.Data.PartSize = partSize + + return cp +} + +// load checkpoint from local file +func (cp *downloadCheckpoint) load() error { + if !DirExists(cp.CpDirPath) { + return fmt.Errorf("Invaid checkpoint dir, %v", cp.CpDirPath) + } + + if !FileExists(cp.CpFilePath) { + return nil + } + + if !cp.valid() { + cp.remove() + return nil + } + + cp.Loaded = true + + return nil +} + +func (cp *downloadCheckpoint) valid() bool { + // Compare the CP's Magic and the MD5 + contents, err := os.ReadFile(cp.CpFilePath) + if err != nil { + return false + } + + dcp := downloadCheckpoint{} + + if err = json.Unmarshal(contents, &dcp.Info); err != nil { + return false + } + + js, _ := json.Marshal(dcp.Info.Data) + sum := md5.Sum(js) + md5sum := hex.EncodeToString(sum[:]) + + if CheckpointMagic != dcp.Info.Magic || + md5sum != dcp.Info.MD5 { + return false + } + + // compare + if !reflect.DeepEqual(cp.Info.Data.ObjectInfo, dcp.Info.Data.ObjectInfo) || + !reflect.DeepEqual(cp.Info.Data.ObjectMeta, dcp.Info.Data.ObjectMeta) || + cp.Info.Data.FilePath != dcp.Info.Data.FilePath || + cp.Info.Data.PartSize != dcp.Info.Data.PartSize { + return false + } + + // download info + if dcp.Info.Data.DownloadInfo.Offset < 0 { + return false + } + + if dcp.Info.Data.DownloadInfo.Offset == 0 && + dcp.Info.Data.DownloadInfo.CRC64 != 0 { + return false + } + + rOffset := int64(0) + if len(cp.Info.Data.ObjectInfo.Range) > 0 { + if r, err := ParseRange(cp.Info.Data.ObjectInfo.Range); err != nil { + return false + } else { + rOffset = r.Offset + } + } + + if dcp.Info.Data.DownloadInfo.Offset < rOffset { + return false + } + + remains := (dcp.Info.Data.DownloadInfo.Offset - rOffset) % dcp.Info.Data.PartSize + if remains != 0 { + return false + } + + //valid data + if cp.VerifyData && dcp.Info.Data.DownloadInfo.CRC64 != 0 { + if file, err := os.Open(cp.Info.Data.FilePath); err == nil { + hash := NewCRC64(0) + limitN := dcp.Info.Data.DownloadInfo.Offset - rOffset + io.Copy(hash, io.LimitReader(file, limitN)) + file.Close() + if hash.Sum64() != dcp.Info.Data.DownloadInfo.CRC64 { + return false + } + } + } + + // update + cp.Info.Data.DownloadInfo = dcp.Info.Data.DownloadInfo + + return true +} + +// dump dumps to file +func (cp *downloadCheckpoint) dump() error { + // Calculate MD5 + js, _ := json.Marshal(cp.Info.Data) + sum := md5.Sum(js) + md5sum := hex.EncodeToString(sum[:]) + cp.Info.MD5 = md5sum + + // Serialize + js, err := json.Marshal(cp.Info) + if err != nil { + return err + } + + // Dump + return os.WriteFile(cp.CpFilePath, js, FilePermMode) +} + +func (cp *downloadCheckpoint) remove() error { + return os.Remove(cp.CpFilePath) +} + +// ----- upload chcekpoint ----- +type uploadCheckpoint struct { + CpDirPath string // checkpoint dir full path + CpFilePath string // checkpoint file full path + Loaded bool // If Info.Data.UploadInfo is loaded from checkpoint + + Info struct { //checkpoint data + Magic string // Magic + MD5 string // The Data's MD5 + Data struct { + // source + FilePath string // Local file + + FileMeta struct { + Size int64 + LastModified string + } + + // destination + ObjectInfo struct { + Name string // oss://bucket/key + } + + // upload info + PartSize int64 + + UploadInfo struct { + UploadId string + } + } + } +} + +func newUploadCheckpoint(request *PutObjectRequest, filePath string, baseDir string, fileInfo os.FileInfo, partSize int64) *uploadCheckpoint { + name := fmt.Sprintf("%v/%v", ToString(request.Bucket), ToString(request.Key)) + hashmd5 := md5.New() + hashmd5.Write([]byte("oss://" + escapePath(name, false))) + destHash := hex.EncodeToString(hashmd5.Sum(nil)) + + absPath, _ := filepath.Abs(filePath) + hashmd5.Reset() + hashmd5.Write([]byte(absPath)) + srcHash := hex.EncodeToString(hashmd5.Sum(nil)) + + var dir string + if baseDir == "" { + dir = os.TempDir() + } else { + dir = filepath.Dir(baseDir) + } + + cpFilePath := filepath.Join(dir, fmt.Sprintf("%v-%v%v", srcHash, destHash, CheckpointFileSuffixUploader)) + + cp := &uploadCheckpoint{ + CpFilePath: cpFilePath, + CpDirPath: dir, + } + + cp.Info.Magic = CheckpointMagic + cp.Info.Data.FilePath = filePath + cp.Info.Data.FileMeta.Size = fileInfo.Size() + cp.Info.Data.FileMeta.LastModified = fileInfo.ModTime().String() + cp.Info.Data.ObjectInfo.Name = "oss://" + name + cp.Info.Data.PartSize = partSize + + return cp +} + +// load checkpoint from local file +func (cp *uploadCheckpoint) load() error { + if !DirExists(cp.CpDirPath) { + return fmt.Errorf("Invaid checkpoint dir, %v", cp.CpDirPath) + } + + if !FileExists(cp.CpFilePath) { + return nil + } + + if !cp.valid() { + cp.remove() + return nil + } + + cp.Loaded = true + + return nil +} + +func (cp *uploadCheckpoint) valid() bool { + // Compare the CP's Magic and the MD5 + contents, err := os.ReadFile(cp.CpFilePath) + if err != nil { + return false + } + + dcp := uploadCheckpoint{} + + if err = json.Unmarshal(contents, &dcp.Info); err != nil { + return false + } + + js, _ := json.Marshal(dcp.Info.Data) + sum := md5.Sum(js) + md5sum := hex.EncodeToString(sum[:]) + + if CheckpointMagic != dcp.Info.Magic || + md5sum != dcp.Info.MD5 { + return false + } + + // compare + if !reflect.DeepEqual(cp.Info.Data.ObjectInfo, dcp.Info.Data.ObjectInfo) || + !reflect.DeepEqual(cp.Info.Data.FileMeta, dcp.Info.Data.FileMeta) || + cp.Info.Data.FilePath != dcp.Info.Data.FilePath || + cp.Info.Data.PartSize != dcp.Info.Data.PartSize { + return false + } + + // download info + if len(dcp.Info.Data.UploadInfo.UploadId) == 0 { + return false + } + + // update + cp.Info.Data.UploadInfo = dcp.Info.Data.UploadInfo + + return true +} + +// dump dumps to file +func (cp *uploadCheckpoint) dump() error { + // Calculate MD5 + js, _ := json.Marshal(cp.Info.Data) + sum := md5.Sum(js) + md5sum := hex.EncodeToString(sum[:]) + cp.Info.MD5 = md5sum + + // Serialize + js, err := json.Marshal(cp.Info) + if err != nil { + return err + } + + // Dump + return os.WriteFile(cp.CpFilePath, js, FilePermMode) +} + +func (cp *uploadCheckpoint) remove() error { + return os.Remove(cp.CpFilePath) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client.go new file mode 100644 index 000000000..ee86ec043 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client.go @@ -0,0 +1,1499 @@ +package oss + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "hash" + "io" + "net" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport" +) + +type Options struct { + Product string + + Region string + + Endpoint *url.URL + + RetryMaxAttempts *int + + Retryer retry.Retryer + + Signer signer.Signer + + CredentialsProvider credentials.CredentialsProvider + + HttpClient HTTPClient + + ResponseHandlers []func(*http.Response) error + + UrlStyle UrlStyleType + + FeatureFlags FeatureFlagsType + + OpReadWriteTimeout *time.Duration + + AuthMethod *AuthMethodType + + AdditionalHeaders []string +} + +func (c Options) Copy() Options { + to := c + to.ResponseHandlers = make([]func(*http.Response) error, len(c.ResponseHandlers)) + copy(to.ResponseHandlers, c.ResponseHandlers) + return to +} + +func OpReadWriteTimeout(value time.Duration) func(*Options) { + return func(o *Options) { + o.OpReadWriteTimeout = Ptr(value) + } +} + +type innerOptions struct { + BwTokenBuckets BwTokenBuckets + + // A clock offset that how much client time is different from server time + ClockOffset time.Duration + + // Logger + Log Logger + + // UserAgent + UserAgent string +} + +type Client struct { + options Options + inner innerOptions +} + +func NewClient(cfg *Config, optFns ...func(*Options)) *Client { + options := Options{ + Product: DefaultProduct, + Region: ToString(cfg.Region), + RetryMaxAttempts: cfg.RetryMaxAttempts, + Retryer: cfg.Retryer, + CredentialsProvider: cfg.CredentialsProvider, + HttpClient: cfg.HttpClient, + FeatureFlags: FeatureFlagsDefault, + AdditionalHeaders: cfg.AdditionalHeaders, + } + inner := innerOptions{ + Log: NewLogger(ToInt(cfg.LogLevel), cfg.LogPrinter), + UserAgent: buildUserAgent(cfg), + } + + resolveEndpoint(cfg, &options) + resolveRetryer(cfg, &options) + resolveHTTPClient(cfg, &options, &inner) + resolveSigner(cfg, &options) + resolveUrlStyle(cfg, &options) + resolveFeatureFlags(cfg, &options) + resolveCloudBox(cfg, &options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + inner: inner, + } + + return client +} + +func resolveEndpoint(cfg *Config, o *Options) { + disableSSL := ToBool(cfg.DisableSSL) + endpoint := ToString(cfg.Endpoint) + region := ToString(cfg.Region) + if len(endpoint) > 0 { + endpoint = addEndpointScheme(endpoint, disableSSL) + } else if isValidRegion(region) { + endpoint = endpointFromRegion( + region, + disableSSL, + func() EndpointType { + if ToBool(cfg.UseInternalEndpoint) { + return EndpointInternal + } else if ToBool(cfg.UseDualStackEndpoint) { + return EndpointDualStack + } else if ToBool(cfg.UseAccelerateEndpoint) { + return EndpointAccelerate + } + return EndpointPublic + }(), + ) + } + + if endpoint == "" { + return + } + + o.Endpoint, _ = url.Parse(endpoint) +} + +func resolveRetryer(_ *Config, o *Options) { + if o.Retryer != nil { + return + } + + o.Retryer = retry.NewStandard() +} + +func resolveHTTPClient(cfg *Config, o *Options, inner *innerOptions) { + if o.HttpClient != nil { + return + } + + //config in http.Transport + custom := []func(*http.Transport){} + if cfg.InsecureSkipVerify != nil { + custom = append(custom, transport.InsecureSkipVerify(*cfg.InsecureSkipVerify)) + } + if cfg.ProxyFromEnvironment != nil && *cfg.ProxyFromEnvironment { + custom = append(custom, transport.ProxyFromEnvironment()) + } + if cfg.ProxyHost != nil { + if url, err := url.Parse(*cfg.ProxyHost); err == nil { + custom = append(custom, transport.HttpProxy(url)) + } + } + + //config in transport package + tcfg := &transport.Config{} + if cfg.ConnectTimeout != nil { + tcfg.ConnectTimeout = cfg.ConnectTimeout + } + if cfg.ReadWriteTimeout != nil { + tcfg.ReadWriteTimeout = cfg.ReadWriteTimeout + } + if cfg.EnabledRedirect != nil { + tcfg.EnabledRedirect = cfg.EnabledRedirect + } + if cfg.UploadBandwidthlimit != nil { + value := *cfg.UploadBandwidthlimit * 1024 + tb := newBwTokenBucket(value) + tcfg.PostWrite = append(tcfg.PostWrite, func(n int, _ error) { + tb.LimitBandwidth(n) + }) + inner.BwTokenBuckets[BwTokenBucketSlotTx] = tb + } + if cfg.DownloadBandwidthlimit != nil { + value := *cfg.DownloadBandwidthlimit * 1024 + tb := newBwTokenBucket(value) + tcfg.PostRead = append(tcfg.PostRead, func(n int, _ error) { + tb.LimitBandwidth(n) + }) + inner.BwTokenBuckets[BwTokenBucketSlotRx] = tb + } + + o.HttpClient = transport.NewHttpClient(tcfg, custom...) +} + +func resolveSigner(cfg *Config, o *Options) { + if o.Signer != nil { + return + } + + ver := DefaultSignatureVersion + if cfg.SignatureVersion != nil { + ver = *cfg.SignatureVersion + } + + switch ver { + case SignatureVersionV1: + o.Signer = &signer.SignerV1{} + default: + o.Signer = &signer.SignerV4{} + } +} + +func resolveUrlStyle(cfg *Config, o *Options) { + if cfg.UseCName != nil && *cfg.UseCName { + o.UrlStyle = UrlStyleCName + } else if cfg.UsePathStyle != nil && *cfg.UsePathStyle { + o.UrlStyle = UrlStylePath + } else { + o.UrlStyle = UrlStyleVirtualHosted + } + + // if the endpoint is ip, set to path-style + if o.Endpoint != nil { + if ip := net.ParseIP(o.Endpoint.Hostname()); ip != nil { + o.UrlStyle = UrlStylePath + } + } +} + +func resolveFeatureFlags(cfg *Config, o *Options) { + if ToBool(cfg.DisableDownloadCRC64Check) { + o.FeatureFlags = o.FeatureFlags & ^FeatureEnableCRC64CheckDownload + } + + if ToBool(cfg.DisableUploadCRC64Check) { + o.FeatureFlags = o.FeatureFlags & ^FeatureEnableCRC64CheckUpload + } +} + +func resolveCloudBox(cfg *Config, o *Options) { + if cfg.CloudBoxId != nil { + o.Region = ToString(cfg.CloudBoxId) + o.Product = CloudBoxProduct + return + } + + if !ToBool(cfg.EnableAutoDetectCloudBoxId) { + return + } + + if o.Endpoint == nil { + return + } + + //cb-***.{region}.oss-cloudbox-control.aliyuncs.com + //cb-***.{region}.oss-cloudbox.aliyuncs.com + host := o.Endpoint.Host + if !(strings.HasSuffix(host, ".oss-cloudbox.aliyuncs.com") || + strings.HasSuffix(host, ".oss-cloudbox-control.aliyuncs.com")) { + return + } + + keys := strings.Split(host, ".") + if keys == nil || + len(keys) != 5 || + !strings.HasPrefix(keys[0], "cb-") { + return + } + o.Region = keys[0] + o.Product = CloudBoxProduct +} + +func buildUserAgent(cfg *Config) string { + if cfg.UserAgent == nil { + return defaultUserAgent + } + + return fmt.Sprintf("%s/%s", defaultUserAgent, ToString(cfg.UserAgent)) +} + +func (c *Client) invokeOperation(ctx context.Context, input *OperationInput, optFns []func(*Options)) (output *OperationOutput, err error) { + if c.getLogLevel() >= LogInfo { + c.inner.Log.Infof("InvokeOperation Start: input[%p], OpName:%s, Bucket:%s, Key:%s", + input, input.OpName, + ToString(input.Bucket), ToString(input.Key)) + defer func() { + c.inner.Log.Infof("InvokeOperation End: input[%p], OpName:%s, output:'%v', err:'%v'", + input, input.OpName, + c.dumpOperationOutput(output), err) + }() + } + + options := c.options.Copy() + opOpt := Options{} + + for _, fn := range optFns { + fn(&opOpt) + } + + applyOperationOpt(&options, &opOpt) + + applyOperationMetadata(input, &options) + + ctx = applyOperationContext(ctx, &options) + + output, err = c.sendRequest(ctx, input, &options) + + if err != nil { + return output, &OperationError{ + name: input.OpName, + err: err} + } + + return output, err +} + +func (c *Client) sendRequest(ctx context.Context, input *OperationInput, opts *Options) (output *OperationOutput, err error) { + var request *http.Request + var response *http.Response + if c.getLogLevel() >= LogInfo { + c.inner.Log.Infof("sendRequest Start: input[%p]", input) + defer func() { + c.inner.Log.Infof("sendRequest End: input[%p], http.Request[%p], http.Response[%p]", input, request, response) + }() + } + + // covert input into httpRequest + if !isValidEndpoint(opts.Endpoint) { + return output, NewErrParamInvalid("Endpoint") + } + + var writers []io.Writer + // tracker in OperationMetaData + for _, w := range input.OpMetadata.Values(OpMetaKeyRequestBodyTracker) { + if ww, ok := w.(io.Writer); ok { + writers = append(writers, ww) + } + } + // host & path + host, path := buildURL(input, opts) + strUrl := fmt.Sprintf("%s://%s%s", opts.Endpoint.Scheme, host, path) + + // querys + if len(input.Parameters) > 0 { + var buf bytes.Buffer + for k, v := range input.Parameters { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(url.QueryEscape(k)) + if len(v) > 0 { + buf.WriteString("=" + strings.Replace(url.QueryEscape(v), "+", "%20", -1)) + } + } + strUrl += "?" + buf.String() + } + + request, err = http.NewRequestWithContext(ctx, input.Method, strUrl, nil) + if err != nil { + return output, err + } + + // headers + for k, v := range input.Headers { + if len(k) > 0 && len(v) > 0 { + request.Header.Add(k, v) + } + } + request.Header.Set("User-Agent", c.inner.UserAgent) + + // body + var body io.Reader + if input.Body == nil { + body = strings.NewReader("") + } else { + body = input.Body + } + var length int64 + if clen := request.Header.Get("Content-Length"); clen != "" { + length, _ = strconv.ParseInt(clen, 10, 64) + } else { + length = GetReaderLen(body) + } + if length >= 0 { + request.ContentLength = length + } + request.Body = TeeReadNopCloser(body, writers...) + + //signing context + subResource, _ := input.OpMetadata.Get(signer.SubResource).([]string) + clockOffset := c.inner.ClockOffset + signingCtx := &signer.SigningContext{ + Product: Ptr(opts.Product), + Region: Ptr(opts.Region), + Bucket: input.Bucket, + Key: input.Key, + Request: request, + SubResource: subResource, + AuthMethodQuery: opts.AuthMethod != nil && *opts.AuthMethod == AuthMethodQuery, + ClockOffset: clockOffset, + AdditionalHeaders: opts.AdditionalHeaders, + } + + if date := request.Header.Get(HeaderOssDate); date != "" { + signingCtx.Time, _ = http.ParseTime(date) + } else if signTime, ok := input.OpMetadata.Get(signer.SignTime).(time.Time); ok { + signingCtx.Time = signTime + } + + // send http request + response, err = c.sendHttpRequest(ctx, signingCtx, opts) + + if err != nil { + return output, err + } + + // covert http response into output context + output = &OperationOutput{ + Input: input, + Status: response.Status, + StatusCode: response.StatusCode, + Body: response.Body, + Headers: response.Header, + httpRequest: request, + } + + // save other info by Metadata filed, ex. retry detail info + //output.OpMetadata.Set(...) + if signingCtx.AuthMethodQuery { + output.OpMetadata.Set(signer.SignTime, signingCtx.Time) + } + + if signingCtx.ClockOffset != clockOffset { + c.inner.ClockOffset = signingCtx.ClockOffset + } + + return output, err +} + +func (c *Client) sendHttpRequest(ctx context.Context, signingCtx *signer.SigningContext, opts *Options) (response *http.Response, err error) { + request := signingCtx.Request + retryer := opts.Retryer + maxAttempts := c.retryMaxAttempts(opts) + body, _ := request.Body.(*teeReadNopCloser) + resetTime := signingCtx.Time.IsZero() + body.Mark() + for tries := 1; tries <= maxAttempts; tries++ { + if tries > 1 { + delay, err := retryer.RetryDelay(tries, err) + if err != nil { + break + } + + if err = sleepWithContext(ctx, delay); err != nil { + err = &CanceledError{Err: err} + break + } + + if err = body.Reset(); err != nil { + break + } + + if resetTime { + signingCtx.Time = time.Time{} + } + + c.inner.Log.Infof("Attempt retry, request[%p], tries:%v, retry delay:%v", request, tries, delay) + } + + if response, err = c.sendHttpRequestOnce(ctx, signingCtx, opts); err == nil { + break + } + + c.postSendHttpRequestOnce(signingCtx, response, err) + + if isContextError(ctx, &err) { + err = &CanceledError{Err: err} + break + } + + if !body.IsSeekable() { + break + } + + if !retryer.IsErrorRetryable(err) { + break + } + } + return response, err +} + +func (c *Client) sendHttpRequestOnce(ctx context.Context, signingCtx *signer.SigningContext, opts *Options) ( + response *http.Response, err error, +) { + if c.getLogLevel() > LogInfo { + c.inner.Log.Infof("sendHttpRequestOnce Start, http.Request[%p]", signingCtx.Request) + defer func() { + c.inner.Log.Infof("sendHttpRequestOnce End, http.Request[%p], response[%p], err:%v", signingCtx.Request, response, err) + }() + } + + if _, anonymous := opts.CredentialsProvider.(*credentials.AnonymousCredentialsProvider); !anonymous { + cred, err := opts.CredentialsProvider.GetCredentials(ctx) + if err != nil { + return response, err + } + + signingCtx.Credentials = &cred + if err = c.options.Signer.Sign(ctx, signingCtx); err != nil { + return response, err + } + c.inner.Log.Debugf("sendHttpRequestOnce::Sign request[%p], StringToSign:%s", signingCtx.Request, signingCtx.StringToSign) + } + + c.logHttpPRequet(signingCtx.Request) + + if response, err = opts.HttpClient.Do(signingCtx.Request); err != nil { + return response, err + } + + c.logHttpResponse(signingCtx.Request, response) + + for _, fn := range opts.ResponseHandlers { + if err = fn(response); err != nil { + return response, err + } + } + + return response, err +} + +func (c *Client) postSendHttpRequestOnce(signingCtx *signer.SigningContext, _ *http.Response, err error) { + if err != nil { + switch e := err.(type) { + case *ServiceError: + if c.hasFeature(FeatureCorrectClockSkew) && + e.Code == "RequestTimeTooSkewed" && + !e.Timestamp.IsZero() { + signingCtx.ClockOffset = e.Timestamp.Sub(signingCtx.Time) + c.inner.Log.Warnf("Got RequestTimeTooSkewed error, correct clock request[%p], ClockOffset:%v, Server Time:%v, Client time:%v", + signingCtx.Request, signingCtx.ClockOffset, e.Timestamp, signingCtx.Time) + } + } + } +} + +func buildURL(input *OperationInput, opts *Options) (host string, path string) { + if input == nil || opts == nil || opts.Endpoint == nil { + return host, path + } + + var paths []string + if input.Bucket == nil { + host = opts.Endpoint.Host + } else { + switch opts.UrlStyle { + default: // UrlStyleVirtualHosted + host = fmt.Sprintf("%s.%s", *input.Bucket, opts.Endpoint.Host) + case UrlStylePath: + host = opts.Endpoint.Host + paths = append(paths, *input.Bucket) + if input.Key == nil { + paths = append(paths, "") + } + case UrlStyleCName: + host = opts.Endpoint.Host + } + } + + if input.Key != nil { + paths = append(paths, escapePath(*input.Key, false)) + } + + return host, ("/" + strings.Join(paths, "/")) +} + +func serviceErrorResponseHandler(response *http.Response) error { + if response.StatusCode/100 == 2 { + return nil + } + return tryConvertServiceError(response) +} + +func callbackErrorResponseHandler(response *http.Response) error { + if response.StatusCode == 203 && + response.Request.Header.Get(HeaderOssCallback) != "" { + return tryConvertServiceError(response) + } + return nil +} + +func tryConvertServiceError(response *http.Response) (err error) { + var respBody []byte + var body []byte + timestamp, err := time.Parse(http.TimeFormat, response.Header.Get("Date")) + if err != nil { + timestamp = time.Now() + } + + defer response.Body.Close() + respBody, err = io.ReadAll(response.Body) + body = respBody + if len(respBody) == 0 && len(response.Header.Get(HeaderOssERR)) > 0 { + body, err = base64.StdEncoding.DecodeString(response.Header.Get(HeaderOssERR)) + if err != nil { + body = respBody + } + } + se := &ServiceError{ + StatusCode: response.StatusCode, + Code: "BadErrorResponse", + RequestID: response.Header.Get(HeaderOssRequestID), + Timestamp: timestamp, + RequestTarget: fmt.Sprintf("%s %s", response.Request.Method, response.Request.URL), + Snapshot: body, + Headers: response.Header, + } + + if err != nil { + se.Message = fmt.Sprintf("The body of the response was not readable, due to :%s", err.Error()) + return se + } + err = xml.Unmarshal(body, &se) + if err != nil { + len := len(body) + if len > 256 { + len = 256 + } + se.Message = fmt.Sprintf("Failed to parse xml from response body due to: %s. With part response body %s.", err.Error(), string(body[:len])) + return se + } + return se +} + +func nonStreamResponseHandler(response *http.Response) error { + body := response.Body + if body == nil { + return nil + } + + defer body.Close() + val, err := io.ReadAll(body) + + if err == nil { + response.Body = io.NopCloser(bytes.NewReader(val)) + } + + return err +} + +func checkResponseHeaderCRC64(ccrc string, header http.Header) (err error) { + if scrc := header.Get(HeaderOssCRC64); scrc != "" { + if scrc != ccrc { + return fmt.Errorf("crc is inconsistent, client %s, server %s", ccrc, scrc) + } + } + return nil +} + +func applyOperationOpt(c *Options, op *Options) { + if c == nil || op == nil { + return + } + + if op.Endpoint != nil { + c.Endpoint = op.Endpoint + } + + if ToInt(op.RetryMaxAttempts) > 0 { + c.RetryMaxAttempts = op.RetryMaxAttempts + } + + if op.Retryer != nil { + c.Retryer = op.Retryer + } + + if c.Retryer == nil { + c.Retryer = retry.NopRetryer{} + } + + if op.OpReadWriteTimeout != nil { + c.OpReadWriteTimeout = op.OpReadWriteTimeout + } + + if op.HttpClient != nil { + c.HttpClient = op.HttpClient + } + + if op.AuthMethod != nil { + c.AuthMethod = op.AuthMethod + } + + //response handler + handlers := []func(*http.Response) error{ + serviceErrorResponseHandler, + } + handlers = append(handlers, c.ResponseHandlers...) + handlers = append(handlers, op.ResponseHandlers...) + c.ResponseHandlers = handlers +} + +func applyOperationContext(ctx context.Context, c *Options) context.Context { + if ctx == nil || c.OpReadWriteTimeout == nil { + return ctx + } + return context.WithValue(ctx, "OpReadWriteTimeout", c.OpReadWriteTimeout) +} + +func applyOperationMetadata(input *OperationInput, c *Options) { + for _, h := range input.OpMetadata.Values(OpMetaKeyResponsHandler) { + if hh, ok := h.(func(*http.Response) error); ok { + c.ResponseHandlers = append(c.ResponseHandlers, hh) + } + } +} + +// fieldInfo holds details for the input/output of a single field. +type fieldInfo struct { + idx int + flags int +} + +const ( + fRequire int = 1 << iota + + fTypeUsermeta + fTypeXml + fTypeTime +) + +func parseFiledFlags(tokens []string) int { + var flags int = 0 + for _, token := range tokens { + switch token { + case "required": + flags |= fRequire + case "time": + flags |= fTypeTime + case "xml": + flags |= fTypeXml + case "usermeta": + flags |= fTypeUsermeta + } + } + return flags +} + +func validateInput(input *OperationInput) error { + if input == nil { + return NewErrParamNull("OperationInput") + } + + if input.Bucket != nil && !isValidBucketName(input.Bucket) { + return NewErrParamInvalid("OperationInput.Bucket") + } + + if input.Key != nil && !isValidObjectName(input.Key) { + return NewErrParamInvalid("OperationInput.Key") + } + + if !isValidMethod(input.Method) { + return NewErrParamInvalid("OperationInput.Method") + } + + return nil +} + +func (c *Client) marshalInput(request any, input *OperationInput, handlers ...func(any, *OperationInput) error) error { + // merge common fields + if cm, ok := request.(RequestCommonInterface); ok { + h, p, b := cm.GetCommonFileds() + // headers + if len(h) > 0 { + if input.Headers == nil { + input.Headers = map[string]string{} + } + for k, v := range h { + input.Headers[k] = v + } + } + + // parameters + if len(p) > 0 { + if input.Parameters == nil { + input.Parameters = map[string]string{} + } + for k, v := range p { + input.Parameters[k] = v + } + } + + // body + input.Body = b + } + + val := reflect.ValueOf(request) + switch val.Kind() { + case reflect.Pointer, reflect.Interface: + if val.IsNil() { + return nil + } + val = val.Elem() + } + if val.Kind() != reflect.Struct || input == nil { + return nil + } + + t := val.Type() + for k := 0; k < t.NumField(); k++ { + if tag, ok := t.Field(k).Tag.Lookup("input"); ok { + // header|query|body,filed_name,[required,time,usermeta...] + v := val.Field(k) + var flags int = 0 + tokens := strings.Split(tag, ",") + if len(tokens) < 2 { + continue + } + + // parse field flags + if len(tokens) > 2 { + flags = parseFiledFlags(tokens[2:]) + } + // check required flag + if isEmptyValue(v) { + if flags&fRequire != 0 { + return NewErrParamRequired(t.Field(k).Name) + } + continue + } + + switch tokens[0] { + case "query": + if input.Parameters == nil { + input.Parameters = map[string]string{} + } + if v.Kind() == reflect.Pointer { + v = v.Elem() + } + input.Parameters[tokens[1]] = fmt.Sprintf("%v", v.Interface()) + case "header": + if input.Headers == nil { + input.Headers = map[string]string{} + } + if v.Kind() == reflect.Pointer { + v = v.Elem() + } + if flags&fTypeUsermeta != 0 { + if m, ok := v.Interface().(map[string]string); ok { + for k, v := range m { + input.Headers[tokens[1]+k] = v + } + } + } else { + input.Headers[tokens[1]] = fmt.Sprintf("%v", v.Interface()) + } + case "body": + if flags&fTypeXml != 0 { + var b bytes.Buffer + if err := xml.NewEncoder(&b).EncodeElement( + v.Interface(), + xml.StartElement{Name: xml.Name{Local: tokens[1]}}); err != nil { + return &SerializationError{ + Err: err, + } + } + input.Body = bytes.NewReader(b.Bytes()) + } else { + if r, ok := v.Interface().(io.Reader); ok { + input.Body = r + } else { + return NewErrParamTypeNotSupport(t.Field(k).Name) + } + } + } + } + } + + if err := validateInput(input); err != nil { + return err + } + + for _, h := range handlers { + if err := h(request, input); err != nil { + return err + } + } + + return nil +} + +func marshalDeleteObjects(request any, input *OperationInput) error { + var builder strings.Builder + delRequest := request.(*DeleteMultipleObjectsRequest) + builder.WriteString("") + builder.WriteString("") + builder.WriteString(strconv.FormatBool(delRequest.Quiet)) + builder.WriteString("") + if len(delRequest.Objects) > 0 { + for _, object := range delRequest.Objects { + builder.WriteString("") + if object.Key != nil { + builder.WriteString("") + builder.WriteString(escapeXml(*object.Key)) + builder.WriteString("") + } + if object.VersionId != nil { + builder.WriteString("") + builder.WriteString(*object.VersionId) + builder.WriteString("") + } + builder.WriteString("") + } + } else { + return NewErrParamInvalid("Objects") + } + builder.WriteString("") + input.Body = strings.NewReader(builder.String()) + return nil +} + +func discardBody(result any, output *OperationOutput) error { + var err error + if output.Body != nil { + defer output.Body.Close() + _, err = io.Copy(io.Discard, output.Body) + } + return err +} + +func unmarshalBodyXml(result any, output *OperationOutput) error { + var err error + var body []byte + if output.Body != nil { + defer output.Body.Close() + if body, err = io.ReadAll(output.Body); err != nil { + return err + } + } + + if len(body) > 0 { + if err = xml.Unmarshal(body, result); err != nil { + err = &DeserializationError{ + Err: err, + Snapshot: body, + } + } + } + return err +} + +func unmarshalBodyXmlMix(result any, output *OperationOutput) error { + var err error + var body []byte + if output.Body != nil { + defer output.Body.Close() + if body, err = io.ReadAll(output.Body); err != nil { + return err + } + } + + if len(body) == 0 { + return nil + } + + val := reflect.ValueOf(result) + switch val.Kind() { + case reflect.Pointer, reflect.Interface: + if val.IsNil() { + return nil + } + val = val.Elem() + } + if val.Kind() != reflect.Struct || output == nil { + return nil + } + + t := val.Type() + idx := -1 + for k := 0; k < t.NumField(); k++ { + if tag, ok := t.Field(k).Tag.Lookup("output"); ok { + tokens := strings.Split(tag, ",") + if len(tokens) < 2 { + continue + } + // header|query|body,filed_name,[required,time,usermeta...] + switch tokens[0] { + case "body": + idx = k + break + } + } + } + + if idx >= 0 { + dst := val.Field(idx) + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + err = xml.Unmarshal(body, dst.Interface()) + } else { + err = xml.Unmarshal(body, result) + } + + if err != nil { + err = &DeserializationError{ + Err: err, + Snapshot: body, + } + } + + return err +} + +func unmarshalBodyXmlVersions(result any, output *OperationOutput) error { + var err error + var body []byte + if output.Body != nil { + defer output.Body.Close() + if body, err = io.ReadAll(output.Body); err != nil { + return err + } + } + + if len(body) > 0 { + oldStrings := []string{"", "", "", ""} + newStrings := []string{"", "", "", ""} + + replacedData := string(body) + for i := range oldStrings { + replacedData = strings.Replace(replacedData, oldStrings[i], newStrings[i], -1) + } + if err = xml.Unmarshal([]byte(replacedData), result); err != nil { + err = &DeserializationError{ + Err: err, + Snapshot: body, + } + } + } + return err +} + +func unmarshalBodyDefault(result any, output *OperationOutput) error { + var err error + var body []byte + if output.Body != nil { + defer output.Body.Close() + if body, err = io.ReadAll(output.Body); err != nil { + return err + } + } + + // extract body + if len(body) > 0 { + contentType := output.Headers.Get("Content-Type") + switch contentType { + case "application/xml": + err = xml.Unmarshal(body, result) + case "application/json": + err = json.Unmarshal(body, result) + case "application/json;charset=utf-8": + err = json.Unmarshal(body, result) + default: + err = fmt.Errorf("unsupport contentType:%s", contentType) + } + + if err != nil { + err = &DeserializationError{ + Err: err, + Snapshot: body, + } + } + } + return err +} + +func unmarshalCallbackBody(result any, output *OperationOutput) error { + var err error + var body []byte + if output.Body != nil { + defer output.Body.Close() + if body, err = io.ReadAll(output.Body); err != nil { + return err + } + } + if len(body) > 0 { + switch r := result.(type) { + case *PutObjectResult: + if err = json.Unmarshal(body, &r.CallbackResult); err != nil { + return err + } + case *CompleteMultipartUploadResult: + if err = json.Unmarshal(body, &r.CallbackResult); err != nil { + return err + } + } + } + return err +} + +func unmarshalHeader(result any, output *OperationOutput) error { + val := reflect.ValueOf(result) + switch val.Kind() { + case reflect.Pointer, reflect.Interface: + if val.IsNil() { + return nil + } + val = val.Elem() + } + if val.Kind() != reflect.Struct || output == nil { + return nil + } + + filedInfos := map[string]fieldInfo{} + + t := val.Type() + var usermetaKeys []string + for k := 0; k < t.NumField(); k++ { + if tag, ok := t.Field(k).Tag.Lookup("output"); ok { + tokens := strings.Split(tag, ",") + if len(tokens) < 2 { + continue + } + // header|query|body,filed_name,[required,time,usermeta...] + switch tokens[0] { + case "header": + lowkey := strings.ToLower(tokens[1]) + + var flags int = 0 + if len(tokens) >= 3 { + flags = parseFiledFlags(tokens[2:]) + } + filedInfos[lowkey] = fieldInfo{idx: k, flags: flags} + if flags&fTypeUsermeta != 0 { + usermetaKeys = append(usermetaKeys, lowkey) + } + } + } + } + var err error + for key, vv := range output.Headers { + lkey := strings.ToLower(key) + for _, prefix := range usermetaKeys { + if strings.HasPrefix(lkey, prefix) { + if field, ok := filedInfos[prefix]; ok { + if field.flags&fTypeUsermeta != 0 { + mapKey := strings.TrimPrefix(lkey, prefix) + err = setMapStringReflectValue(val.Field(field.idx), mapKey, vv[0]) + } + } + } + } + if field, ok := filedInfos[lkey]; ok { + if field.flags&fTypeTime != 0 { + if t, err := http.ParseTime(vv[0]); err == nil { + err = setTimeReflectValue(val.Field(field.idx), t) + } + } else { + err = setReflectValue(val.Field(field.idx), vv[0]) + } + if err != nil { + return err + } + } + } + + return nil +} + +func unmarshalHeaderLite(result any, output *OperationOutput) error { + val := reflect.ValueOf(result) + switch val.Kind() { + case reflect.Pointer, reflect.Interface: + if val.IsNil() { + return nil + } + val = val.Elem() + } + if val.Kind() != reflect.Struct || output == nil { + return nil + } + + t := val.Type() + for k := 0; k < t.NumField(); k++ { + if tag := t.Field(k).Tag.Get("output"); tag != "" { + tokens := strings.Split(tag, ",") + if len(tokens) != 2 { + continue + } + switch tokens[0] { + case "header": + if src := output.Headers.Get(tokens[1]); src != "" { + if err := setReflectValue(val.Field(k), src); err != nil { + return err + } + } + } + } + } + return nil +} + +func (c *Client) unmarshalOutput(result any, output *OperationOutput, handlers ...func(any, *OperationOutput) error) error { + // Common + if cm, ok := result.(ResultCommonInterface); ok { + cm.CopyIn(output.Status, output.StatusCode, output.Headers, output.OpMetadata) + } + + var err error + for _, h := range handlers { + if err = h(result, output); err != nil { + break + } + } + return err +} + +func updateContentMd5(_ any, input *OperationInput) error { + var err error + var contentMd5 string + if input.Body != nil { + var r io.ReadSeeker + var ok bool + if r, ok = input.Body.(io.ReadSeeker); !ok { + buf, _ := io.ReadAll(input.Body) + r = bytes.NewReader(buf) + input.Body = r + } + h := md5.New() + if _, err = copySeekableBody(h, r); err != nil { + // error + } else { + contentMd5 = base64.StdEncoding.EncodeToString(h.Sum(nil)) + } + } else { + contentMd5 = "1B2M2Y8AsgTpgAmY7PhCfg==" + } + + // set content-md5 and content-type + if err == nil { + if input.Headers == nil { + input.Headers = map[string]string{} + } + input.Headers["Content-MD5"] = contentMd5 + } + + return err +} + +func updateContentType(_ any, input *OperationInput) error { + if input.Headers == nil { + input.Headers = map[string]string{} + } + if _, ok := input.Headers[HTTPHeaderContentType]; !ok { + value := TypeByExtension(ToString(input.Key)) + if value == "" { + value = contentTypeDefault + } + input.Headers[HTTPHeaderContentType] = value + } + return nil +} + +func addProgress(request any, input *OperationInput) error { + var w io.Writer + switch req := request.(type) { + case *PutObjectRequest: + if req.ProgressFn == nil { + return nil + } + w = NewProgress(req.ProgressFn, GetReaderLen(input.Body)) + case *AppendObjectRequest: + if req.ProgressFn == nil { + return nil + } + w = NewProgress(req.ProgressFn, GetReaderLen(input.Body)) + case *UploadPartRequest: + if req.ProgressFn == nil { + return nil + } + w = NewProgress(req.ProgressFn, GetReaderLen(input.Body)) + default: + return nil + } + input.OpMetadata.Add(OpMetaKeyRequestBodyTracker, w) + return nil +} + +func addProcess(request any, input *OperationInput) error { + switch req := request.(type) { + case *ProcessObjectRequest: + if req.Process == nil { + return nil + } + processData := fmt.Sprintf("%v=%v", "x-oss-process", ToString(req.Process)) + input.Body = strings.NewReader(processData) + case *AsyncProcessObjectRequest: + if req.AsyncProcess == nil { + return nil + } + processData := fmt.Sprintf("%v=%v", "x-oss-async-process", ToString(req.AsyncProcess)) + input.Body = strings.NewReader(processData) + default: + return nil + } + + return nil +} + +func addCrcCheck(_ any, input *OperationInput) error { + var w io.Writer = NewCRC64(0) + input.OpMetadata.Add(OpMetaKeyRequestBodyTracker, w) + input.OpMetadata.Add(OpMetaKeyResponsHandler, func(response *http.Response) error { + return checkResponseHeaderCRC64(fmt.Sprint(w.(hash.Hash64).Sum64()), response.Header) + }) + return nil +} + +func addCallback(_ any, input *OperationInput) error { + input.OpMetadata.Add(OpMetaKeyResponsHandler, callbackErrorResponseHandler) + return nil +} + +func enableNonStream(_ any, input *OperationInput) error { + input.OpMetadata.Add(OpMetaKeyResponsHandler, func(response *http.Response) error { + return nonStreamResponseHandler(response) + }) + return nil +} + +func (c *Client) updateContentType(request any, input *OperationInput) error { + if !c.hasFeature(FeatureAutoDetectMimeType) { + return nil + } + return updateContentType(request, input) +} + +func (c *Client) addCrcCheck(request any, input *OperationInput) error { + if !c.hasFeature(FeatureEnableCRC64CheckUpload) { + return nil + } + return addCrcCheck(request, input) +} + +func encodeSourceObject(request any) string { + var bucket, key, versionId string + switch req := request.(type) { + case *CopyObjectRequest: + key = ToString(req.SourceKey) + if req.SourceBucket != nil { + bucket = *req.SourceBucket + } else { + bucket = ToString(req.Bucket) + } + versionId = ToString(req.SourceVersionId) + case *UploadPartCopyRequest: + key = ToString(req.SourceKey) + if req.SourceBucket != nil { + bucket = *req.SourceBucket + } else { + bucket = ToString(req.Bucket) + } + versionId = ToString(req.SourceVersionId) + } + + source := fmt.Sprintf("/%s/%s", bucket, escapePath(key, false)) + if versionId != "" { + source += "?versionId=" + versionId + } + + return source +} + +func (c *Client) toClientError(err error, code string, output *OperationOutput) error { + if err == nil { + return nil + } + + return &ClientError{ + Code: code, + Message: fmt.Sprintf("execute %s fail, error code is %s, request id:%s", + output.Input.OpName, + code, + output.Headers.Get(HeaderOssRequestID), + ), + Err: err} +} + +func (c *Client) hasFeature(flag FeatureFlagsType) bool { + return (c.options.FeatureFlags & flag) > 0 +} + +func (c *Client) retryMaxAttempts(opts *Options) int { + if opts == nil { + opts = &c.options + } + + if opts.RetryMaxAttempts != nil { + return ToInt(opts.RetryMaxAttempts) + } + + if opts.Retryer != nil { + return opts.Retryer.MaxAttempts() + } + + return retry.DefaultMaxAttempts +} + +func (c *Client) dumpOperationOutput(output *OperationOutput) string { + if output == nil { + return "" + } + return fmt.Sprintf("http.Request[%p] Status:%v, StatusCode%v, RequestId:%v", + output.httpRequest, output.Status, output.StatusCode, + output.Headers.Get(HeaderOssRequestID), + ) +} + +// LoggerHTTPReq Print the header information of the http request +func (c *Client) logHttpPRequet(request *http.Request) { + if c.getLogLevel() < LogDebug { + return + } + var logBuffer bytes.Buffer + logBuffer.WriteString(fmt.Sprintf("http.request[%p]", request)) + if request != nil { + logBuffer.WriteString(fmt.Sprintf("Method:%s\t", request.Method)) + logBuffer.WriteString(fmt.Sprintf("Host:%s\t", request.URL.Host)) + logBuffer.WriteString(fmt.Sprintf("Path:%s\t", request.URL.Path)) + logBuffer.WriteString(fmt.Sprintf("Query:%s\t", request.URL.RawQuery)) + logBuffer.WriteString(fmt.Sprintf("Header info:")) + + for k, v := range request.Header { + var valueBuffer bytes.Buffer + for j := 0; j < len(v); j++ { + if j > 0 { + valueBuffer.WriteString(" ") + } + valueBuffer.WriteString(v[j]) + } + logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String())) + } + } + + c.inner.Log.Debugf("%s", logBuffer.String()) +} + +// LoggerHTTPResp Print Response to http request +func (c *Client) logHttpResponse(request *http.Request, response *http.Response) { + if c.getLogLevel() < LogDebug { + return + } + var logBuffer bytes.Buffer + logBuffer.WriteString(fmt.Sprintf("http.request[%p]|http.response[%p]", request, response)) + if response != nil { + logBuffer.WriteString(fmt.Sprintf("StatusCode:%d\t", response.StatusCode)) + logBuffer.WriteString(fmt.Sprintf("Header info:")) + for k, v := range response.Header { + var valueBuffer bytes.Buffer + for j := 0; j < len(v); j++ { + if j > 0 { + valueBuffer.WriteString(" ") + } + valueBuffer.WriteString(v[j]) + } + logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String())) + } + } + c.inner.Log.Debugf("%s", logBuffer.String()) +} + +func (c *Client) getLogLevel() int { + if c.inner.Log != nil { + return c.inner.Log.Level() + } + return LogOff +} + +// Content-Type +const ( + contentTypeDefault string = "application/octet-stream" + contentTypeXML = "application/xml" +) diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_extension.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_extension.go new file mode 100644 index 000000000..b7531d088 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_extension.go @@ -0,0 +1,164 @@ +package oss + +import ( + "context" + "errors" + "fmt" + "hash" + "io" + "os" +) + +// NewDownloader creates a new Downloader instance to download objects. +func (c *Client) NewDownloader(optFns ...func(*DownloaderOptions)) *Downloader { + return NewDownloader(c, optFns...) +} + +// NewUploader creates a new Uploader instance to upload objects. +func (c *Client) NewUploader(optFns ...func(*UploaderOptions)) *Uploader { + return NewUploader(c, optFns...) +} + +// NewCopier creates a new Copier instance to copy objects. +func (c *Client) NewCopier(optFns ...func(*CopierOptions)) *Copier { + return NewCopier(c, optFns...) +} + +// OpenFile opens the named file for reading. +func (c *Client) OpenFile(ctx context.Context, bucket string, key string, optFns ...func(*OpenOptions)) (*ReadOnlyFile, error) { + return NewReadOnlyFile(ctx, c, bucket, key, optFns...) +} + +// AppendFile opens or creates the named file for appending. +func (c *Client) AppendFile(ctx context.Context, bucket string, key string, optFns ...func(*AppendOptions)) (*AppendOnlyFile, error) { + return NewAppendFile(ctx, c, bucket, key, optFns...) +} + +type IsObjectExistOptions struct { + VersionId *string + RequestPayer *string +} + +// IsObjectExist checks if the object exists. +func (c *Client) IsObjectExist(ctx context.Context, bucket string, key string, optFns ...func(*IsObjectExistOptions)) (bool, error) { + options := IsObjectExistOptions{} + for _, fn := range optFns { + fn(&options) + } + _, err := c.GetObjectMeta(ctx, &GetObjectMetaRequest{Bucket: Ptr(bucket), Key: Ptr(key), VersionId: options.VersionId, RequestPayer: options.RequestPayer}) + if err == nil { + return true, nil + } + var serr *ServiceError + errors.As(err, &serr) + if errors.As(err, &serr) { + if serr.Code == "NoSuchKey" || + // error code not in response header + (serr.StatusCode == 404 && serr.Code == "BadErrorResponse") { + return false, nil + } + } + return false, err +} + +// IsBucketExist checks if the bucket exists. +func (c *Client) IsBucketExist(ctx context.Context, bucket string, optFns ...func(*Options)) (bool, error) { + _, err := c.GetBucketAcl(ctx, &GetBucketAclRequest{Bucket: Ptr(bucket)}, optFns...) + if err == nil { + return true, nil + } + var serr *ServiceError + if errors.As(err, &serr) { + if serr.Code == "NoSuchBucket" { + return false, nil + } + return true, nil + } + return false, err +} + +// PutObjectFromFile creates a new object from the local file. +func (c *Client) PutObjectFromFile(ctx context.Context, request *PutObjectRequest, filePath string, optFns ...func(*Options)) (*PutObjectResult, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + file, err := os.Open(filePath) + if err != nil { + return nil, err + } + defer file.Close() + pRequest := *request + pRequest.Body = file + return c.PutObject(ctx, &pRequest, optFns...) +} + +// GetObjectToFile downloads the object into a local file. +func (c *Client) GetObjectToFile(ctx context.Context, request *GetObjectRequest, filePath string, optFns ...func(*Options)) (*GetObjectResult, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + var ( + hash hash.Hash64 + prog *progressTracker + result *GetObjectResult + err error + retry bool + ) + if request.ProgressFn != nil { + prog = &progressTracker{ + pr: request.ProgressFn, + } + } + if c.hasFeature(FeatureEnableCRC64CheckDownload) { + hash = NewCRC64(0) + } + i := 0 + maxRetrys := c.retryMaxAttempts(nil) + for { + i++ + result, retry, err = c.getObjectToFileNoRerty(ctx, request, filePath, hash, prog, optFns...) + if err == nil || !retry { + break + } + if i > maxRetrys { + break + } + } + return result, err +} + +func (c *Client) getObjectToFileNoRerty(ctx context.Context, request *GetObjectRequest, filePath string, + hash hash.Hash64, prog *progressTracker, optFns ...func(*Options)) (*GetObjectResult, bool, error) { + result, err := c.GetObject(ctx, request, optFns...) + if err != nil { + return nil, false, err + } + defer result.Body.Close() + + file, err := os.Create(filePath) + if err != nil { + return nil, false, err + } + defer file.Close() + + var writers []io.Writer + if hash != nil { + hash.Reset() + writers = append(writers, hash) + } + if prog != nil { + prog.total = result.ContentLength + prog.Reset() + writers = append(writers, prog) + } + var r io.Reader = result.Body + if len(writers) > 0 { + r = io.TeeReader(result.Body, io.MultiWriter(writers...)) + } + _, err = io.Copy(file, r) + + if err == nil && hash != nil { + err = checkResponseHeaderCRC64(fmt.Sprint(hash.Sum64()), result.Headers) + } + return result, true, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_paginators.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_paginators.go new file mode 100644 index 000000000..109f6499d --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_paginators.go @@ -0,0 +1,407 @@ +package oss + +import ( + "context" + "fmt" +) + +type PaginatorOptions struct { + // The maximum number of items in the response. + Limit int32 +} + +// ListObjectsPaginator is a paginator for ListObjects +type ListObjectsPaginator struct { + options PaginatorOptions + client *Client + request *ListObjectsRequest + marker *string + firstPage bool + isTruncated bool +} + +func (c *Client) NewListObjectsPaginator(request *ListObjectsRequest, optFns ...func(*PaginatorOptions)) *ListObjectsPaginator { + if request == nil { + request = &ListObjectsRequest{} + } + + options := PaginatorOptions{} + options.Limit = request.MaxKeys + + for _, fn := range optFns { + fn(&options) + } + + return &ListObjectsPaginator{ + options: options, + client: c, + request: request, + marker: request.Marker, + firstPage: true, + isTruncated: false, + } +} + +// HasNext Returns true if there’s a next page. +func (p *ListObjectsPaginator) HasNext() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListObjects page. +func (p *ListObjectsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectsResult, error) { + if !p.HasNext() { + return nil, fmt.Errorf("no more pages available") + } + + request := *p.request + request.Marker = p.marker + + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + request.MaxKeys = limit + request.EncodingType = Ptr("url") + + result, err := p.client.ListObjects(ctx, &request, optFns...) + if err != nil { + return nil, err + } + + p.firstPage = false + p.isTruncated = result.IsTruncated + p.marker = result.NextMarker + + return result, nil +} + +// ListObjectsV2Paginator is a paginator for ListObjectsV2 +type ListObjectsV2Paginator struct { + options PaginatorOptions + client *Client + request *ListObjectsV2Request + continueToken *string + firstPage bool + isTruncated bool +} + +func (c *Client) NewListObjectsV2Paginator(request *ListObjectsV2Request, optFns ...func(*PaginatorOptions)) *ListObjectsV2Paginator { + if request == nil { + request = &ListObjectsV2Request{} + } + + options := PaginatorOptions{} + options.Limit = request.MaxKeys + + for _, fn := range optFns { + fn(&options) + } + + return &ListObjectsV2Paginator{ + options: options, + client: c, + request: request, + continueToken: request.ContinuationToken, + firstPage: true, + isTruncated: false, + } +} + +// HasNext Returns true if there’s a next page. +func (p *ListObjectsV2Paginator) HasNext() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListObjectsV2 page. +func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectsV2Result, error) { + if !p.HasNext() { + return nil, fmt.Errorf("no more pages available") + } + + request := *p.request + request.ContinuationToken = p.continueToken + + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + request.MaxKeys = limit + request.EncodingType = Ptr("url") + + result, err := p.client.ListObjectsV2(ctx, &request, optFns...) + if err != nil { + return nil, err + } + + p.firstPage = false + p.isTruncated = result.IsTruncated + p.continueToken = result.NextContinuationToken + + return result, nil +} + +// ListObjectVersionsPaginator is a paginator for ListObjectVersions +type ListObjectVersionsPaginator struct { + options PaginatorOptions + client *Client + request *ListObjectVersionsRequest + keyMarker *string + versionIdMarker *string + firstPage bool + isTruncated bool +} + +func (c *Client) NewListObjectVersionsPaginator(request *ListObjectVersionsRequest, optFns ...func(*PaginatorOptions)) *ListObjectVersionsPaginator { + if request == nil { + request = &ListObjectVersionsRequest{} + } + + options := PaginatorOptions{} + options.Limit = request.MaxKeys + + for _, fn := range optFns { + fn(&options) + } + + return &ListObjectVersionsPaginator{ + options: options, + client: c, + request: request, + keyMarker: request.KeyMarker, + versionIdMarker: request.VersionIdMarker, + firstPage: true, + isTruncated: false, + } +} + +// HasNext Returns true if there’s a next page. +func (p *ListObjectVersionsPaginator) HasNext() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListObjectVersions page. +func (p *ListObjectVersionsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectVersionsResult, error) { + if !p.HasNext() { + return nil, fmt.Errorf("no more pages available") + } + + request := *p.request + request.KeyMarker = p.keyMarker + request.VersionIdMarker = p.versionIdMarker + + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + request.MaxKeys = limit + request.EncodingType = Ptr("url") + + result, err := p.client.ListObjectVersions(ctx, &request, optFns...) + if err != nil { + return nil, err + } + + p.firstPage = false + p.isTruncated = result.IsTruncated + p.keyMarker = result.NextKeyMarker + p.versionIdMarker = result.NextVersionIdMarker + + return result, nil +} + +// ListBucketsPaginator is a paginator for ListBuckets +type ListBucketsPaginator struct { + options PaginatorOptions + client *Client + request *ListBucketsRequest + marker *string + firstPage bool + isTruncated bool +} + +func (c *Client) NewListBucketsPaginator(request *ListBucketsRequest, optFns ...func(*PaginatorOptions)) *ListBucketsPaginator { + if request == nil { + request = &ListBucketsRequest{} + } + + options := PaginatorOptions{} + options.Limit = request.MaxKeys + + for _, fn := range optFns { + fn(&options) + } + + return &ListBucketsPaginator{ + options: options, + client: c, + request: request, + marker: request.Marker, + firstPage: true, + isTruncated: false, + } +} + +// HasNext Returns true if there’s a next page. +func (p *ListBucketsPaginator) HasNext() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListBuckets page. +func (p *ListBucketsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListBucketsResult, error) { + if !p.HasNext() { + return nil, fmt.Errorf("no more pages available") + } + + request := *p.request + request.Marker = p.marker + + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + request.MaxKeys = limit + + result, err := p.client.ListBuckets(ctx, &request, optFns...) + if err != nil { + return nil, err + } + + p.firstPage = false + p.isTruncated = result.IsTruncated + p.marker = result.NextMarker + + return result, nil +} + +type ListPartsAPIClient interface { + ListParts(ctx context.Context, request *ListPartsRequest, optFns ...func(*Options)) (*ListPartsResult, error) +} + +// ListPartsPaginator is a paginator for ListParts +type ListPartsPaginator struct { + options PaginatorOptions + client ListPartsAPIClient + request *ListPartsRequest + marker int32 + firstPage bool + isTruncated bool +} + +func NewListPartsPaginator(c ListPartsAPIClient, request *ListPartsRequest, optFns ...func(*PaginatorOptions)) *ListPartsPaginator { + if request == nil { + request = &ListPartsRequest{} + } + + options := PaginatorOptions{} + options.Limit = request.MaxParts + + for _, fn := range optFns { + fn(&options) + } + + return &ListPartsPaginator{ + options: options, + client: c, + request: request, + marker: request.PartNumberMarker, + firstPage: true, + isTruncated: false, + } +} + +func (c *Client) NewListPartsPaginator(request *ListPartsRequest, optFns ...func(*PaginatorOptions)) *ListPartsPaginator { + return NewListPartsPaginator(c, request, optFns...) +} + +// HasNext Returns true if there’s a next page. +func (p *ListPartsPaginator) HasNext() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListParts page. +func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListPartsResult, error) { + if !p.HasNext() { + return nil, fmt.Errorf("no more pages available") + } + + request := *p.request + request.PartNumberMarker = p.marker + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + request.MaxParts = limit + request.EncodingType = Ptr("url") + result, err := p.client.ListParts(ctx, &request, optFns...) + if err != nil { + return nil, err + } + + p.firstPage = false + p.isTruncated = result.IsTruncated + p.marker = result.NextPartNumberMarker + + return result, nil +} + +// ListMultipartUploadsPaginator is a paginator for ListMultipartUploads +type ListMultipartUploadsPaginator struct { + options PaginatorOptions + client *Client + request *ListMultipartUploadsRequest + keyMarker *string + uploadIdMarker *string + firstPage bool + isTruncated bool +} + +func (c *Client) NewListMultipartUploadsPaginator(request *ListMultipartUploadsRequest, optFns ...func(*PaginatorOptions)) *ListMultipartUploadsPaginator { + if request == nil { + request = &ListMultipartUploadsRequest{} + } + options := PaginatorOptions{} + options.Limit = request.MaxUploads + for _, fn := range optFns { + fn(&options) + } + return &ListMultipartUploadsPaginator{ + options: options, + client: c, + request: request, + keyMarker: request.KeyMarker, + uploadIdMarker: request.UploadIdMarker, + firstPage: true, + isTruncated: false, + } +} + +// HasNext Returns true if there’s a next page. +func (p *ListMultipartUploadsPaginator) HasNext() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListMultipartUploads page. +func (p *ListMultipartUploadsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListMultipartUploadsResult, error) { + if !p.HasNext() { + return nil, fmt.Errorf("no more pages available") + } + request := *p.request + request.KeyMarker = p.keyMarker + request.UploadIdMarker = p.uploadIdMarker + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + request.MaxUploads = limit + request.EncodingType = Ptr("url") + result, err := p.client.ListMultipartUploads(ctx, &request, optFns...) + if err != nil { + return nil, err + } + + p.firstPage = false + p.isTruncated = result.IsTruncated + p.keyMarker = result.NextKeyMarker + p.uploadIdMarker = result.NextUploadIdMarker + return result, nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_presign.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_presign.go new file mode 100644 index 000000000..c5015955a --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/client_presign.go @@ -0,0 +1,164 @@ +package oss + +import ( + "context" + "fmt" + "net/http" + "reflect" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer" +) + +type PresignOptions struct { + // Expires sets the expiration duration for the generated presign url. + Expires time.Duration + + // Expiration sets the expiration time for the generated presign url. + Expiration time.Time +} + +type PresignResult struct { + Method string + URL string + Expiration time.Time + SignedHeaders map[string]string +} + +type nopHttpClient struct { +} + +func (c *nopHttpClient) Do(*http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: http.NoBody, + }, nil +} + +var ( + defaultNopHttpClient = &nopHttpClient{} + defaultPresignOptions = []func(*Options){ + func(o *Options) { + o.HttpClient = defaultNopHttpClient + o.AuthMethod = Ptr(AuthMethodQuery) + }, + } +) + +func (c *Client) Presign(ctx context.Context, request any, optFns ...func(*PresignOptions)) (*PresignResult, error) { + options := PresignOptions{} + + if request == nil { + return nil, NewErrParamNull("request") + } + + for _, fn := range optFns { + fn(&options) + } + + input := OperationInput{} + if err := c.marshalPresignInput(request, &input); err != nil { + return nil, err + } + + // expiration + if !options.Expiration.IsZero() { + input.OpMetadata.Set(signer.SignTime, options.Expiration) + } else if options.Expires > 0 { + input.OpMetadata.Set(signer.SignTime, time.Now().Add(options.Expires)) + } + output, err := c.invokeOperation(ctx, &input, defaultPresignOptions) + if err != nil { + return nil, err + } + + result := &PresignResult{} + err = c.unmarshalPresignOutput(result, output) + return result, err +} + +func PresignExpires(value time.Duration) func(*PresignOptions) { + return func(o *PresignOptions) { + o.Expires = value + } +} + +func PresignExpiration(value time.Time) func(*PresignOptions) { + return func(o *PresignOptions) { + o.Expiration = value + } +} + +func (c *Client) marshalPresignInput(request any, input *OperationInput) error { + switch t := request.(type) { + case *GetObjectRequest: + input.OpName = "GetObject" + input.Method = "GET" + input.Bucket = t.Bucket + input.Key = t.Key + case *PutObjectRequest: + input.OpName = "PutObject" + input.Method = "PUT" + input.Bucket = t.Bucket + input.Key = t.Key + case *HeadObjectRequest: + input.OpName = "HeadObject" + input.Method = "HEAD" + input.Bucket = t.Bucket + input.Key = t.Key + case *InitiateMultipartUploadRequest: + input.OpName = "InitiateMultipartUpload" + input.Method = "POST" + input.Bucket = t.Bucket + input.Key = t.Key + input.Parameters = map[string]string{ + "uploads": "", + } + case *UploadPartRequest: + input.OpName = "UploadPart" + input.Method = "PUT" + input.Bucket = t.Bucket + input.Key = t.Key + case *CompleteMultipartUploadRequest: + input.OpName = "CompleteMultipartUpload" + input.Method = "POST" + input.Bucket = t.Bucket + input.Key = t.Key + case *AbortMultipartUploadRequest: + input.OpName = "AbortMultipartUpload" + input.Method = "DELETE" + input.Bucket = t.Bucket + input.Key = t.Key + default: + return NewErrParamInvalid(fmt.Sprintf("request %v", reflect.ValueOf(request).Type().String())) + } + + return c.marshalInput(request, input) +} + +func (c *Client) unmarshalPresignOutput(result *PresignResult, output *OperationOutput) error { + if chk, ok := c.options.Signer.(interface{ IsSignedHeader([]string, string) bool }); ok { + header := map[string]string{} + for k, v := range output.httpRequest.Header { + if chk.IsSignedHeader(c.options.AdditionalHeaders, k) { + header[k] = v[0] + } + } + if len(header) > 0 { + result.SignedHeaders = header + } + } + result.Method = output.httpRequest.Method + result.URL = output.httpRequest.URL.String() + if signTime, ok := output.OpMetadata.Get(signer.SignTime).(time.Time); ok { + result.Expiration = signTime + } + _, ok := c.options.Signer.(*signer.SignerV4) + if ok { + if !result.Expiration.IsZero() && (result.Expiration.After(time.Now().Add(7 * 24 * time.Hour))) { + return fmt.Errorf("expires should be not greater than 604800(seven days)") + } + } + return nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/config.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/config.go new file mode 100644 index 000000000..9b86a46e1 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/config.go @@ -0,0 +1,286 @@ +package oss + +import ( + "net/http" + "os" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Config struct { + // The region in which the bucket is located. + Region *string + + // The domain names that other services can use to access OSS. + Endpoint *string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. + RetryMaxAttempts *int + + // Retryer guides how HTTP requests should be retried in case of recoverable failures. + Retryer retry.Retryer + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HttpClient HTTPClient + + // The credentials provider to use when signing requests. + CredentialsProvider credentials.CredentialsProvider + + // Allows you to enable the client to use path-style addressing, i.e., https://oss-cn-hangzhou.aliyuncs.com/bucket/key. + // By default, the oss client will use virtual hosted addressing i.e., https://bucket.oss-cn-hangzhou.aliyuncs.com/key. + UsePathStyle *bool + + // If the endpoint is s CName, set this flag to true + UseCName *bool + + // Connect timeout + ConnectTimeout *time.Duration + + // read & write timeout + ReadWriteTimeout *time.Duration + + // Skip server certificate verification + InsecureSkipVerify *bool + + // Enable http redirect or not. Default is disable + EnabledRedirect *bool + + // Flag of using proxy host. + ProxyHost *string + + // Read the proxy setting from the environment variables. + // HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions thereof). + // HTTPS_PROXY takes precedence over HTTP_PROXY for https requests. + ProxyFromEnvironment *bool + + // Upload bandwidth limit in kBytes/s for all request + UploadBandwidthlimit *int64 + + // Download bandwidth limit in kBytes/s for all request + DownloadBandwidthlimit *int64 + + // Authentication with OSS Signature Version + SignatureVersion *SignatureVersionType + + // The level of the output log + LogLevel *int + + // A interface for the SDK to log messages to. + LogPrinter LogPrinter + + // DisableSSL forces the endpoint to be resolved as HTTP. + DisableSSL *bool + + // Dual-stack endpoints are provided in some regions. + // This allows an IPv4 client and an IPv6 client to access a bucket by using the same endpoint. + // Set this to `true` to use a dual-stack endpoint for the requests. + UseDualStackEndpoint *bool + + // OSS provides the transfer acceleration feature to accelerate date transfers of data + // uploads and downloads across countries and regions. + // Set this to `true` to use a accelerate endpoint for the requests. + UseAccelerateEndpoint *bool + + // You can use an internal endpoint to communicate between Alibaba Cloud services located within the same + // region over the internal network. You are not charged for the traffic generated over the internal network. + // Set this to `true` to use a accelerate endpoint for the requests. + UseInternalEndpoint *bool + + // Check data integrity of uploads via the crc64 by default. + // This feature takes effect for PutObject, AppendObject, UploadPart, Uploader.UploadFrom and Uploader.UploadFile + // Set this to `true` to disable this feature. + DisableUploadCRC64Check *bool + + // Check data integrity of download via the crc64 by default. + // This feature only takes effect for Downloader.DownloadFile, GetObjectToFile + // Set this to `true` to disable this feature. + DisableDownloadCRC64Check *bool + + // Additional signable headers. + AdditionalHeaders []string + + // The optional user specific identifier appended to the User-Agent header. + UserAgent *string + + // The cloud box id + CloudBoxId *string + + // The cloud box id is automatically extracted from endpoint. + EnableAutoDetectCloudBoxId *bool +} + +func NewConfig() *Config { + return &Config{} +} + +func (c Config) Copy() Config { + cp := c + return cp +} + +func LoadDefaultConfig() *Config { + config := &Config{} + + // load from env + str := os.Getenv("OSS_SDK_LOG_LEVEL") + if str != "" { + if level := ToLogLevel(str); level > LogOff { + config.LogLevel = Ptr(level) + } + } + + return config +} + +func (c *Config) WithRegion(region string) *Config { + c.Region = Ptr(region) + return c +} + +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = Ptr(endpoint) + return c +} + +func (c *Config) WithRetryMaxAttempts(value int) *Config { + c.RetryMaxAttempts = Ptr(value) + return c +} + +func (c *Config) WithRetryer(retryer retry.Retryer) *Config { + c.Retryer = retryer + return c +} + +func (c *Config) WithHttpClient(client *http.Client) *Config { + c.HttpClient = client + return c +} + +func (c *Config) WithCredentialsProvider(provider credentials.CredentialsProvider) *Config { + c.CredentialsProvider = provider + return c +} + +func (c *Config) WithUsePathStyle(enable bool) *Config { + c.UsePathStyle = Ptr(enable) + return c +} + +func (c *Config) WithUseCName(enable bool) *Config { + c.UseCName = Ptr(enable) + return c +} + +func (c *Config) WithConnectTimeout(value time.Duration) *Config { + c.ConnectTimeout = Ptr(value) + return c +} + +func (c *Config) WithReadWriteTimeout(value time.Duration) *Config { + c.ReadWriteTimeout = Ptr(value) + return c +} + +func (c *Config) WithInsecureSkipVerify(value bool) *Config { + c.InsecureSkipVerify = Ptr(value) + return c +} + +func (c *Config) WithEnabledRedirect(value bool) *Config { + c.EnabledRedirect = Ptr(value) + return c +} + +func (c *Config) WithProxyHost(value string) *Config { + c.ProxyHost = Ptr(value) + return c +} + +func (c *Config) WithProxyFromEnvironment(value bool) *Config { + c.ProxyFromEnvironment = Ptr(value) + return c +} + +func (c *Config) WithUploadBandwidthlimit(value int64) *Config { + c.UploadBandwidthlimit = Ptr(value) + return c +} + +func (c *Config) WithDownloadBandwidthlimit(value int64) *Config { + c.DownloadBandwidthlimit = Ptr(value) + return c +} + +func (c *Config) WithSignatureVersion(value SignatureVersionType) *Config { + c.SignatureVersion = Ptr(value) + return c +} + +func (c *Config) WithLogLevel(level int) *Config { + c.LogLevel = Ptr(level) + return c +} + +func (c *Config) WithLogPrinter(printer LogPrinter) *Config { + c.LogPrinter = printer + return c +} + +func (c *Config) WithDisableSSL(value bool) *Config { + c.DisableSSL = Ptr(value) + return c +} + +func (c *Config) WithUseDualStackEndpoint(value bool) *Config { + c.UseDualStackEndpoint = Ptr(value) + return c +} + +func (c *Config) WithUseAccelerateEndpoint(value bool) *Config { + c.UseAccelerateEndpoint = Ptr(value) + return c +} + +func (c *Config) WithUseInternalEndpoint(value bool) *Config { + c.UseInternalEndpoint = Ptr(value) + return c +} + +func (c *Config) WithDisableUploadCRC64Check(value bool) *Config { + c.DisableUploadCRC64Check = Ptr(value) + return c +} + +func (c *Config) WithDisableDownloadCRC64Check(value bool) *Config { + c.DisableDownloadCRC64Check = Ptr(value) + return c +} + +func (c *Config) WithAdditionalHeaders(value []string) *Config { + c.AdditionalHeaders = value + return c +} + +func (c *Config) WithUserAgent(value string) *Config { + c.UserAgent = Ptr(value) + return c +} + +func (c *Config) WithCloudBoxId(value string) *Config { + c.CloudBoxId = Ptr(value) + return c +} + +func (c *Config) WithEnableAutoDetectCloudBoxId(value bool) *Config { + c.EnableAutoDetectCloudBoxId = Ptr(value) + return c +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/copier.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/copier.go new file mode 100644 index 000000000..768d95dfa --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/copier.go @@ -0,0 +1,587 @@ +package oss + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport" +) + +var metadataCopied = map[string]struct{}{ + "content-type": {}, + "content-language": {}, + "content-encoding": {}, + "content-disposition": {}, + "cache-control": {}, + "expires": {}, +} + +type CopierOptions struct { + PartSize int64 + + ParallelNum int + + MultipartCopyThreshold int64 + + LeavePartsOnError bool + + DisableShallowCopy bool + + ClientOptions []func(*Options) + + // MetaProperties and TagProperties takes effect in Copier.Copy + MetadataProperties *HeadObjectResult + + TagProperties *GetObjectTaggingResult +} + +type Copier struct { + options CopierOptions + client CopyAPIClient + featureFlags FeatureFlagsType +} + +// NewCopier creates a new Copier instance to copy objects. +// Pass In additional functional options to customize the copier's behavior. +func NewCopier(api CopyAPIClient, optFns ...func(*CopierOptions)) *Copier { + options := CopierOptions{ + PartSize: DefaultCopyPartSize, + ParallelNum: DefaultCopyParallel, + MultipartCopyThreshold: DefaultCopyThreshold, + LeavePartsOnError: false, + DisableShallowCopy: false, + } + + for _, fn := range optFns { + fn(&options) + } + + options.TagProperties = nil + options.MetadataProperties = nil + + c := &Copier{ + client: api, + options: options, + } + + //Get Client Feature + switch t := api.(type) { + case *Client: + c.featureFlags = t.options.FeatureFlags + } + + return c +} + +type CopyResult struct { + UploadId *string + + ETag *string + + VersionId *string + + HashCRC64 *string + + ResultCommon +} + +type CopyError struct { + Err error + UploadId string + Path string +} + +func (m *CopyError) Error() string { + var extra string + if m.Err != nil { + extra = fmt.Sprintf(", cause: %s", m.Err.Error()) + } + return fmt.Sprintf("copy failed, upload id: %s%s", m.UploadId, extra) +} + +func (m *CopyError) Unwrap() error { + return m.Err +} + +func (c *Copier) Copy(ctx context.Context, request *CopyObjectRequest, optFns ...func(*CopierOptions)) (*CopyResult, error) { + // Copier wrapper + delegate, err := c.newDelegate(ctx, request, optFns...) + if err != nil { + return nil, err + } + + if err = delegate.checkSource(); err != nil { + return nil, err + } + + if err = delegate.applySource(); err != nil { + return nil, err + } + + return delegate.copy() +} + +type copierDelegate struct { + base *Copier + options CopierOptions + context context.Context + + request *CopyObjectRequest + + // Source's Info + metaProp *HeadObjectResult + tagProp *GetObjectTaggingResult + + sizeInBytes int64 + transferred int64 +} + +func (c *Copier) newDelegate(ctx context.Context, request *CopyObjectRequest, optFns ...func(*CopierOptions)) (*copierDelegate, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + + if request.Bucket == nil { + return nil, NewErrParamNull("request.Bucket") + } + + if request.Key == nil { + return nil, NewErrParamNull("request.Key") + } + + if request.SourceKey == nil { + return nil, NewErrParamNull("request.SourceKey") + } + + if request.MetadataDirective != nil && !isValidCopyDirective(*request.MetadataDirective) { + return nil, NewErrParamInvalid("request.MetadataDirective") + } + + if request.TaggingDirective != nil && !isValidCopyDirective(*request.TaggingDirective) { + return nil, NewErrParamInvalid("request.TaggingDirective") + } + + d := copierDelegate{ + base: c, + options: c.options, + context: ctx, + request: request, + } + + for _, opt := range optFns { + opt(&d.options) + } + + if d.options.ParallelNum <= 0 { + d.options.ParallelNum = DefaultCopyParallel + } + + if d.options.PartSize <= 0 { + d.options.PartSize = DefaultCopyPartSize + } + + if d.options.MultipartCopyThreshold < 0 { + d.options.MultipartCopyThreshold = DefaultCopyThreshold + } + + d.tagProp = d.options.TagProperties + d.metaProp = d.options.MetadataProperties + + return &d, nil +} + +func (d *copierDelegate) checkSource() error { + if d.metaProp != nil { + return nil + } + + var request HeadObjectRequest + copyRequest(&request, d.request) + if d.request.SourceBucket != nil { + request.Bucket = d.request.SourceBucket + } + request.Key = d.request.SourceKey + request.VersionId = d.request.SourceVersionId + + result, err := d.base.client.HeadObject(d.context, &request, d.options.ClientOptions...) + if err != nil { + return err + } + + d.metaProp = result + + return nil +} + +func (d *copierDelegate) applySource() error { + + d.sizeInBytes = d.metaProp.ContentLength + + // signle copy mode + if d.sizeInBytes <= d.options.MultipartCopyThreshold { + return nil + } + + // multi part copy mode + //Part Size + partSize := d.options.PartSize + if d.sizeInBytes > 0 { + for d.sizeInBytes/partSize >= int64(MaxUploadParts) { + partSize += d.options.PartSize + } + } + d.options.PartSize = partSize + + return nil +} + +func (d *copierDelegate) canUseShallowCopy() bool { + if d.options.DisableShallowCopy { + return false + } + + // Change StorageClass + if d.request.StorageClass != "" { + return false + } + + // Cross bucket + if d.request.SourceBucket != nil && + ToString(d.request.SourceBucket) != ToString(d.request.Bucket) { + return false + } + + // Decryption + if d.metaProp.Headers.Get(HeaderOssServerSideEncryption) != "" { + return false + } + + return true +} + +func (d *copierDelegate) copy() (*CopyResult, error) { + if d.sizeInBytes <= d.options.MultipartCopyThreshold { + return d.singleCopy() + } else if d.canUseShallowCopy() { + return d.shallowCopy() + } + return d.multiCopy() +} + +func (d *copierDelegate) singleCopy() (*CopyResult, error) { + result, err := d.base.client.CopyObject(d.context, d.request, d.options.ClientOptions...) + + if err != nil { + return nil, d.wrapErr("", err) + } + + // update + d.transferred = d.sizeInBytes + d.progressCallback(d.sizeInBytes) + + return &CopyResult{ + ETag: result.ETag, + HashCRC64: result.HashCRC64, + VersionId: result.VersionId, + ResultCommon: result.ResultCommon, + }, nil +} + +func (d *copierDelegate) shallowCopy() (*CopyResult, error) { + // use signle copy first, if meets timeout, use multiCopy + ctx, cancel := context.WithTimeout(d.context, 30*time.Second) + defer cancel() + result, err := d.base.client.CopyObject(ctx, d.request, d.options.ClientOptions...) + + if err != nil { + if isContextError(ctx, &err) { + return d.multiCopy() + } + return nil, d.wrapErr("", err) + } + + // update + d.transferred = d.sizeInBytes + d.progressCallback(d.sizeInBytes) + + return &CopyResult{ + ETag: result.ETag, + HashCRC64: result.HashCRC64, + VersionId: result.VersionId, + ResultCommon: result.ResultCommon, + }, nil +} + +type copyChunk struct { + partNum int32 + size int64 + sourceRange string +} + +func (d *copierDelegate) multiCopy() (*CopyResult, error) { + var ( + wg sync.WaitGroup + mu sync.Mutex + parts UploadParts + errValue atomic.Value + ) + + // Init the multipart + imRequest, err := d.newInitiateMultipartUpload() + if err != nil { + return nil, d.wrapErr("", err) + } + + initResult, err := d.base.client.InitiateMultipartUpload(d.context, imRequest, d.options.ClientOptions...) + if err != nil { + return nil, d.wrapErr("", err) + } + + saveErrFn := func(e error) { + errValue.Store(e) + } + + getErrFn := func() error { + v := errValue.Load() + if v == nil { + return nil + } + e, _ := v.(error) + return e + } + + // readChunk runs in worker goroutines to pull chunks off of the ch channel + // timeout for MultiPartCopy API + // 10s per 200M, max timeout is 50s + const PART_SIZE int64 = 200 * 1024 * 1024 + const STEP time.Duration = 10 * time.Second + mpcTimeout := transport.DefaultReadWriteTimeout + partSize := d.options.PartSize + for partSize > PART_SIZE { + mpcTimeout += STEP + partSize -= PART_SIZE + if mpcTimeout > 50*time.Second { + break + } + } + mpcClientOptions := append(d.options.ClientOptions, OpReadWriteTimeout(mpcTimeout)) + + readChunkFn := func(ch chan copyChunk) { + defer wg.Done() + for { + data, ok := <-ch + if !ok { + break + } + if getErrFn() == nil { + upResult, err := d.base.client.UploadPartCopy( + d.context, + &UploadPartCopyRequest{ + Bucket: d.request.Bucket, + Key: d.request.Key, + SourceBucket: d.request.SourceBucket, + SourceKey: d.request.SourceKey, + SourceVersionId: d.request.SourceVersionId, + UploadId: initResult.UploadId, + PartNumber: data.partNum, + Range: Ptr(data.sourceRange), + RequestPayer: d.request.RequestPayer, + }, mpcClientOptions...) + //fmt.Printf("UploadPart result: %#v, %#v\n", upResult, err) + if err == nil { + mu.Lock() + parts = append(parts, UploadPart{ETag: upResult.ETag, PartNumber: data.partNum}) + d.transferred += data.size + d.progressCallback(data.size) + mu.Unlock() + } else { + saveErrFn(err) + } + } + } + } + + ch := make(chan copyChunk, d.options.ParallelNum) + for i := 0; i < d.options.ParallelNum; i++ { + wg.Add(1) + go readChunkFn(ch) + } + + // Read and queue the parts + var ( + qnum int32 = 0 + totalSize int64 = d.sizeInBytes + readerPos int64 = 0 + ) + for getErrFn() == nil && readerPos < totalSize { + n := d.options.PartSize + bytesLeft := totalSize - readerPos + if bytesLeft <= d.options.PartSize { + n = bytesLeft + } + //fmt.Printf("send chunk: %d\n", qnum) + qnum++ + ch <- copyChunk{partNum: qnum, size: n, sourceRange: fmt.Sprintf("bytes=%v-%v", readerPos, (readerPos + n - 1))} + readerPos += n + } + + // Close the channel, wait for workers + close(ch) + wg.Wait() + + // Complete upload + var cmResult *CompleteMultipartUploadResult + if err = getErrFn(); err == nil { + sort.Sort(parts) + cmRequest := &CompleteMultipartUploadRequest{} + copyRequest(cmRequest, d.request) + cmRequest.UploadId = initResult.UploadId + cmRequest.CompleteMultipartUpload = &CompleteMultipartUpload{Parts: parts} + cmResult, err = d.base.client.CompleteMultipartUpload(d.context, cmRequest, d.options.ClientOptions...) + } + //fmt.Printf("CompleteMultipartUpload cmResult: %#v, %#v\n", cmResult, err) + + if err != nil { + //Abort + if !d.options.LeavePartsOnError { + amRequest := &AbortMultipartUploadRequest{} + copyRequest(amRequest, d.request) + amRequest.UploadId = initResult.UploadId + _, _ = d.base.client.AbortMultipartUpload(d.context, amRequest, d.options.ClientOptions...) + } + return nil, d.wrapErr(*initResult.UploadId, err) + } + + // check crc + if cmResult.HashCRC64 != nil { + srcCrc := d.metaProp.Headers.Get(HeaderOssCRC64) + if srcCrc != "" { + destCrc := ToString(cmResult.HashCRC64) + if destCrc != srcCrc { + return nil, d.wrapErr(*initResult.UploadId, fmt.Errorf("crc is inconsistent, source %s, destination %s", srcCrc, destCrc)) + } + } + } + + return &CopyResult{ + UploadId: initResult.UploadId, + ETag: cmResult.ETag, + VersionId: cmResult.VersionId, + HashCRC64: cmResult.HashCRC64, + ResultCommon: cmResult.ResultCommon, + }, nil +} + +func (d *copierDelegate) newInitiateMultipartUpload() (*InitiateMultipartUploadRequest, error) { + var err error + imRequest := &InitiateMultipartUploadRequest{} + copyRequest(imRequest, d.request) + imRequest.DisableAutoDetectMimeType = true + + if err = d.overwirteMetadataProp(imRequest); err != nil { + return nil, err + } + + if err = d.overwirteTagProp(imRequest); err != nil { + return nil, err + } + + return imRequest, nil +} + +func (d *copierDelegate) overwirteMetadataProp(imRequest *InitiateMultipartUploadRequest) error { + copyRequest := d.request + switch strings.ToLower(ToString(copyRequest.MetadataDirective)) { + case "", "copy": + if d.metaProp == nil { + return fmt.Errorf("request.MetadataDirective is COPY, but meets nil metaProp for source") + } + imRequest.CacheControl = nil + imRequest.ContentType = nil + imRequest.ContentDisposition = nil + imRequest.ContentEncoding = nil + imRequest.Expires = nil + imRequest.Metadata = nil + imRequest.Headers = map[string]string{} + // skip meta in Headers + for k, v := range d.request.Headers { + lowK := strings.ToLower(k) + if strings.HasPrefix(lowK, "x-oss-meta") { + //skip + } else if _, ok := metadataCopied[lowK]; ok { + //skip + } else { + imRequest.Headers[k] = v + } + } + // copy meta form source + for k, v := range d.metaProp.Headers { + lowK := strings.ToLower(k) + if strings.HasPrefix(lowK, "x-oss-meta") { + imRequest.Headers[lowK] = v[0] + } else if _, ok := metadataCopied[lowK]; ok { + imRequest.Headers[lowK] = v[0] + } + } + case "replace": + // the metedata has been copied via the copyRequest function before + default: + return fmt.Errorf("Unsupport MetadataDirective, %s", ToString(d.request.MetadataDirective)) + } + + return nil +} + +func (d *copierDelegate) overwirteTagProp(imRequest *InitiateMultipartUploadRequest) error { + switch strings.ToLower(ToString(d.request.TaggingDirective)) { + case "", "copy": + imRequest.Tagging = nil + if d.metaProp.TaggingCount > 0 && d.tagProp == nil { + request := &GetObjectTaggingRequest{} + copyRequest(request, d.request) + if d.request.SourceBucket != nil { + request.Bucket = d.request.SourceBucket + } + request.Key = d.request.SourceKey + request.VersionId = d.request.SourceVersionId + result, err := d.base.client.GetObjectTagging(d.context, request, d.options.ClientOptions...) + if err != nil { + return err + } + d.tagProp = result + } + if d.tagProp != nil { + var tags []string + for _, t := range d.tagProp.Tags { + tags = append(tags, fmt.Sprintf("%v=%v", ToString(t.Key), ToString(t.Value))) + } + if len(tags) > 0 { + imRequest.Tagging = Ptr(strings.Join(tags, "&")) + } + } + case "replace": + // the tag has been copied via the copyRequest function before + default: + return fmt.Errorf("Unsupport TaggingDirective, %s", ToString(d.request.TaggingDirective)) + } + + return nil +} + +func (d *copierDelegate) wrapErr(uploadId string, err error) error { + return &CopyError{ + UploadId: uploadId, + Path: fmt.Sprintf("oss://%s/%s", *d.request.Bucket, *d.request.Key), + Err: err} +} + +func (d *copierDelegate) progressCallback(increment int64) { + if d.request.ProgressFn != nil { + d.request.ProgressFn(increment, d.transferred, d.sizeInBytes) + } +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/credentials.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/credentials.go new file mode 100644 index 000000000..e27438a3e --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/credentials.go @@ -0,0 +1,47 @@ +package credentials + +import ( + "context" + "time" +) + +type Credentials struct { + AccessKeyID string // Access key ID + AccessKeySecret string // Access Key Secret + SecurityToken string // Security Token + Expires *time.Time // The time the credentials will expire at. +} + +func (v Credentials) Expired() bool { + if v.Expires != nil { + return !v.Expires.After(time.Now().Round(0)) + } + return false +} + +func (v Credentials) HasKeys() bool { + return len(v.AccessKeyID) > 0 && len(v.AccessKeySecret) > 0 +} + +type CredentialsProvider interface { + GetCredentials(ctx context.Context) (Credentials, error) +} + +// CredentialsProviderFunc provides a helper wrapping a function value to +// satisfy the CredentialsProvider interface. +type CredentialsProviderFunc func(context.Context) (Credentials, error) + +// GetCredentials delegates to the function value the CredentialsProviderFunc wraps. +func (fn CredentialsProviderFunc) GetCredentials(ctx context.Context) (Credentials, error) { + return fn(ctx) +} + +type AnonymousCredentialsProvider struct{} + +func NewAnonymousCredentialsProvider() CredentialsProvider { + return &AnonymousCredentialsProvider{} +} + +func (*AnonymousCredentialsProvider) GetCredentials(_ context.Context) (Credentials, error) { + return Credentials{AccessKeyID: "", AccessKeySecret: ""}, nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/ecs_role_credentials_provider.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/ecs_role_credentials_provider.go new file mode 100644 index 000000000..a8cc127d0 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/ecs_role_credentials_provider.go @@ -0,0 +1,168 @@ +package credentials + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strings" + "time" +) + +const ecs_ram_cred_url = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" + +type ecsRoleCredentialsProvider struct { + ramCredUrl string + ramRole string + timeout time.Duration + retries int +} + +type ecsRoleCredentials struct { + AccessKeyId string `json:"AccessKeyId,omitempty"` + AccessKeySecret string `json:"AccessKeySecret,omitempty"` + SecurityToken string `json:"SecurityToken,omitempty"` + Expiration time.Time `json:"Expiration,omitempty"` + LastUpDated time.Time `json:"LastUpDated,omitempty"` + Code string `json:"Code,omitempty"` +} + +func (p *ecsRoleCredentialsProvider) httpGet(ctx context.Context, url string) (*http.Response, error) { + c := &http.Client{ + Timeout: p.timeout, + } + var resp *http.Response + var err error + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, err + } + for i := 0; i < p.retries; i++ { + resp, err = c.Do(req) + if err != nil { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + time.Sleep(500 * time.Millisecond) + continue + } + return resp, nil + } + return nil, err +} + +func (p *ecsRoleCredentialsProvider) getRoleFromMetaData(ctx context.Context) (string, error) { + resp, err := p.httpGet(ctx, p.ramCredUrl) + if err != nil { + return "", err + } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("failed to fetch ecs role name, resp.StatusCode:%v", resp.StatusCode) + } + defer resp.Body.Close() + roleName, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + if len(roleName) == 0 { + return "", errors.New("ecs role name is empty") + } + + return string(roleName), nil +} + +func (p *ecsRoleCredentialsProvider) getCredentialsFromMetaData(ctx context.Context) (ecsRoleCredentials, error) { + var ecsCred ecsRoleCredentials + u, err := url.Parse(p.ramCredUrl) + if err != nil { + return ecsCred, err + } + u.Path = path.Join(u.Path, p.ramRole) + resp, err := p.httpGet(ctx, u.String()) + if err != nil { + return ecsCred, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return ecsCred, err + } + err = json.Unmarshal(body, &ecsCred) + if err != nil { + return ecsCred, err + } + + if ecsCred.Code != "" && strings.ToUpper(ecsCred.Code) != "SUCCESS" { + return ecsCred, fmt.Errorf("failed to fetch credentials, return code:%s", ecsCred.Code) + } + + if ecsCred.AccessKeyId == "" || ecsCred.AccessKeySecret == "" { + return ecsCred, fmt.Errorf("AccessKeyId or AccessKeySecret is empty, response body is '%s'", string(body)) + } + + return ecsCred, nil +} + +func (p *ecsRoleCredentialsProvider) GetCredentials(ctx context.Context) (cred Credentials, err error) { + if len(p.ramRole) == 0 { + name, err := p.getRoleFromMetaData(ctx) + if err != nil { + return cred, err + } + p.ramRole = name + } + ecsCred, err := p.getCredentialsFromMetaData(ctx) + if err != nil { + return cred, err + } + cred.AccessKeyID = ecsCred.AccessKeyId + cred.AccessKeySecret = ecsCred.AccessKeySecret + cred.SecurityToken = ecsCred.SecurityToken + if !ecsCred.Expiration.IsZero() { + cred.Expires = &ecsCred.Expiration + } + return cred, nil +} + +type EcsRoleCredentialsProviderOptions struct { + RamRole string + Timeout time.Duration + Retries int +} + +func NewEcsRoleCredentialsProviderWithoutRefresh(optFns ...func(*EcsRoleCredentialsProviderOptions)) CredentialsProvider { + options := EcsRoleCredentialsProviderOptions{ + RamRole: "", + Timeout: time.Second * 10, + Retries: 3, + } + for _, fn := range optFns { + fn(&options) + } + return &ecsRoleCredentialsProvider{ + ramCredUrl: ecs_ram_cred_url, + ramRole: options.RamRole, + timeout: options.Timeout, + retries: options.Retries, + } +} + +func EcsRamRole(ramRole string) func(*EcsRoleCredentialsProviderOptions) { + return func(options *EcsRoleCredentialsProviderOptions) { + options.RamRole = ramRole + } +} + +func NewEcsRoleCredentialsProvider(optFns ...func(*EcsRoleCredentialsProviderOptions)) CredentialsProvider { + p := NewEcsRoleCredentialsProviderWithoutRefresh(optFns...) + provider := NewCredentialsFetcherProvider(CredentialsFetcherFunc(func(ctx context.Context) (Credentials, error) { + return p.GetCredentials(ctx) + })) + return provider +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/environment_credentials_provider.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/environment_credentials_provider.go new file mode 100644 index 000000000..fbd9ca1fb --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/environment_credentials_provider.go @@ -0,0 +1,27 @@ +package credentials + +import ( + "context" + "fmt" + "os" +) + +type EnvironmentVariableCredentialsProvider struct { +} + +func (s *EnvironmentVariableCredentialsProvider) GetCredentials(ctx context.Context) (Credentials, error) { + id := os.Getenv("OSS_ACCESS_KEY_ID") + secret := os.Getenv("OSS_ACCESS_KEY_SECRET") + if id == "" || secret == "" { + return Credentials{}, fmt.Errorf("access key id or access key secret is empty!") + } + return Credentials{ + AccessKeyID: id, + AccessKeySecret: secret, + SecurityToken: os.Getenv("OSS_SESSION_TOKEN"), + }, nil +} + +func NewEnvironmentVariableCredentialsProvider() CredentialsProvider { + return &EnvironmentVariableCredentialsProvider{} +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/fetcher_credentials_provider.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/fetcher_credentials_provider.go new file mode 100644 index 000000000..07d0e5a11 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/fetcher_credentials_provider.go @@ -0,0 +1,183 @@ +package credentials + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" +) + +var ( + // Default expiration time adjustment factor + defaultExpiredFactor = 0.8 + + // backoff of refresh time + defaultRefreshDuration = 120 * time.Second +) + +// CredentialsFetcherOptions are the options +type CredentialsFetcherOptions struct { + ExpiredFactor float64 + RefreshDuration time.Duration +} + +type CredentialsFetcher interface { + Fetch(ctx context.Context) (Credentials, error) +} + +// CredentialsFetcherFunc provides a helper wrapping a function value to +// satisfy the CredentialsFetcher interface. +type CredentialsFetcherFunc func(context.Context) (Credentials, error) + +// Fetch delegates to the function value the CredentialsFetcherFunc wraps. +func (fn CredentialsFetcherFunc) Fetch(ctx context.Context) (Credentials, error) { + return fn(ctx) +} + +type CredentialsFetcherProvider struct { + m sync.Mutex + + //credentials *fetcherCredentials + credentials atomic.Value + + fetcher CredentialsFetcher + + expiredFactor float64 + refreshDuration time.Duration +} + +type fetcherCredentials struct { + Creds Credentials + ExpiryWindow time.Duration +} + +func NewCredentialsFetcherProvider(fetcher CredentialsFetcher, optFns ...func(*CredentialsFetcherOptions)) CredentialsProvider { + options := CredentialsFetcherOptions{ + ExpiredFactor: defaultExpiredFactor, + RefreshDuration: defaultRefreshDuration, + } + + for _, fn := range optFns { + fn(&options) + } + + return &CredentialsFetcherProvider{ + fetcher: fetcher, + expiredFactor: options.ExpiredFactor, + refreshDuration: options.RefreshDuration, + } +} + +func (c *CredentialsFetcherProvider) GetCredentials(ctx context.Context) (Credentials, error) { + fcreds := c.getCreds() + if c.isExpired(fcreds) { + c.m.Lock() + defer c.m.Unlock() + creds, err := c.fetch(ctx) + if err == nil { + c.updateCreds(&creds) + } + return creds, err + } else { + if c.isSoonExpire(fcreds) && c.m.TryLock() { + defer c.m.Unlock() + fcreds1 := c.getCreds() + if fcreds1 == fcreds { + creds, err := c.fetch(ctx) + if err == nil { + c.updateCreds(&creds) + return creds, nil + } else { + c.updateExpiryWindow(fcreds1) + err = nil + } + } + fcreds = fcreds1 + } + return fcreds.Creds, nil + } +} + +type asyncFetchResult struct { + val Credentials + err error +} + +func (c *CredentialsFetcherProvider) asyncFetch(ctx context.Context) <-chan asyncFetchResult { + doChan := func() <-chan asyncFetchResult { + ch := make(chan asyncFetchResult, 1) + + go func() { + cred, err := c.fetcher.Fetch(ctx) + ch <- asyncFetchResult{cred, err} + }() + + return ch + } + + return doChan() +} + +func (c *CredentialsFetcherProvider) fetch(ctx context.Context) (Credentials, error) { + if c.fetcher == nil { + return Credentials{}, fmt.Errorf("fetcher is null.") + } + + select { + case result, _ := <-c.asyncFetch(ctx): + return result.val, result.err + case <-ctx.Done(): + return Credentials{}, fmt.Errorf("FetchCredentialsCanceled") + } +} + +func (p *CredentialsFetcherProvider) getCreds() *fetcherCredentials { + v := p.credentials.Load() + if v == nil { + return nil + } + creds, _ := v.(*fetcherCredentials) + return creds +} + +func (c *CredentialsFetcherProvider) updateCreds(cred *Credentials) { + fcred := fetcherCredentials{ + Creds: *cred, + } + if cred.Expires != nil { + curr := time.Now().Round(0) + durationS := c.expiredFactor * float64(cred.Expires.Sub(curr).Seconds()) + duration := time.Duration(durationS * float64(time.Second)) + if duration > c.refreshDuration { + fcred.ExpiryWindow = duration + } + } + c.credentials.Store(&fcred) +} + +func (c *CredentialsFetcherProvider) updateExpiryWindow(fcreds *fetcherCredentials) { + if fcreds.ExpiryWindow > 0 { + fcreds1 := *fcreds + fcreds1.ExpiryWindow -= c.refreshDuration + c.credentials.Store(&fcreds1) + } +} + +func (c *CredentialsFetcherProvider) isExpired(fcreds *fetcherCredentials) bool { + return fcreds == nil || fcreds.Creds.Expired() +} + +func (c *CredentialsFetcherProvider) isSoonExpire(fcreds *fetcherCredentials) bool { + if fcreds == nil || fcreds.Creds.Expired() { + return true + } + + if fcreds.ExpiryWindow > 0 && fcreds.Creds.Expires != nil { + if !fcreds.Creds.Expires.After(time.Now().Round(0).Add(fcreds.ExpiryWindow)) { + return true + } + } + + return false +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/process_credentials_provider.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/process_credentials_provider.go new file mode 100644 index 000000000..bb87dff7b --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/process_credentials_provider.go @@ -0,0 +1,168 @@ +package credentials + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os" + "os/exec" + "runtime" + "time" +) + +/* +temporary access credentials format +{ + "AccessKeyId" : "ak", + "AccessKeySecret" : "sk", + "Expiration" : "2023-12-29T07:45:02Z", + "SecurityToken" : "token", +} + +long-term access credentials +{ + "AccessKeyId" : "ak", + "AccessKeySecret" : "sk", +} +*/ + +type processCredentialsResult struct { + AccessKeyId string `json:"AccessKeyId"` + + AccessKeySecret string `json:"AccessKeySecret"` + + SecurityToken string `json:"SecurityToken"` + + Expiration *time.Time `json:"Expiration"` +} + +type ProcessCredentialsProviderOptions struct { + Timeout time.Duration +} + +type ProcessCredentialsProvider struct { + timeout time.Duration + args []string +} + +func NewProcessCredentialsProvider(command string, optFns ...func(*ProcessCredentialsProviderOptions)) CredentialsProvider { + options := ProcessCredentialsProviderOptions{ + Timeout: 15 * time.Second, + } + + for _, fn := range optFns { + fn(&options) + } + + var args []string + if len(command) > 0 { + args = []string{command} + } + + return &ProcessCredentialsProvider{ + timeout: options.Timeout, + args: args, + } +} + +func (p *ProcessCredentialsProvider) GetCredentials(ctx context.Context) (Credentials, error) { + return p.fetchCredentials(ctx) +} + +func (p *ProcessCredentialsProvider) buildCommand(ctx context.Context) (*exec.Cmd, error) { + if len(p.args) == 0 { + return nil, fmt.Errorf("command must not be empty") + } + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + cmdArgs = append(cmdArgs, p.args...) + cmd := exec.CommandContext(ctx, cmdArgs[0], cmdArgs[1:]...) + cmd.Env = os.Environ() + + return cmd, nil +} + +func (p *ProcessCredentialsProvider) fetchCredentials(ctx context.Context) (Credentials, error) { + data, err := p.executeProcess(ctx) + if err != nil { + return Credentials{}, err + } + + //json to Credentials + result := &processCredentialsResult{} + if err = json.Unmarshal(data, result); err != nil { + return Credentials{}, err + + } + + creds := Credentials{ + AccessKeyID: result.AccessKeyId, + AccessKeySecret: result.AccessKeySecret, + SecurityToken: result.SecurityToken, + Expires: result.Expiration, + } + + if !creds.HasKeys() { + return creds, fmt.Errorf("missing AccessKeyId or AccessKeySecret in process output") + } + + return creds, nil +} + +func (p *ProcessCredentialsProvider) executeProcess(ctx context.Context) ([]byte, error) { + if p.timeout >= 0 { + var cancelFunc func() + ctx, cancelFunc = context.WithTimeout(ctx, p.timeout) + defer cancelFunc() + } + + cmd, err := p.buildCommand(ctx) + if err != nil { + return nil, err + } + + // get creds from process's stdout + output := bytes.NewBuffer(make([]byte, 0, int(8*1024))) + cmd.Stdout = output + + // Start the command + executeFn := func(cmd *exec.Cmd, exec chan error) { + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + exec <- err + } + + execCh := make(chan error, 1) + go executeFn(cmd, execCh) + + // Wait commnd done + select { + case execError := <-execCh: + if execError == nil { + break + } + select { + case <-ctx.Done(): + return output.Bytes(), fmt.Errorf("credential process timed out: %w", execError) + default: + return output.Bytes(), fmt.Errorf("error in credential_process: %w", execError) + } + } + + out := output.Bytes() + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = bytes.ReplaceAll(out, []byte(`\"`), []byte(`"`)) + } + + return out, nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/static_credentials_provider.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/static_credentials_provider.go new file mode 100644 index 000000000..a6e7a126b --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials/static_credentials_provider.go @@ -0,0 +1,26 @@ +package credentials + +import ( + "context" +) + +type StaticCredentialsProvider struct { + credentials Credentials +} + +func NewStaticCredentialsProvider(id, secret string, tokens ...string) CredentialsProvider { + token := "" + if len(tokens) > 0 { + token = tokens[0] + } + return StaticCredentialsProvider{ + credentials: Credentials{ + AccessKeyID: id, + AccessKeySecret: secret, + SecurityToken: token, + }} +} + +func (s StaticCredentialsProvider) GetCredentials(_ context.Context) (Credentials, error) { + return s.credentials, nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr.go new file mode 100644 index 000000000..d8784d262 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr.go @@ -0,0 +1,65 @@ +package crypto + +import ( + "crypto/aes" + "crypto/cipher" + "io" +) + +type aesCtr struct { + block cipher.Block + cipherData CipherData +} + +func newAesCtr(cd CipherData) (Cipher, error) { + block, err := aes.NewCipher(cd.Key) + if err != nil { + return nil, err + } + return &aesCtr{block, cd}, nil +} + +func (c *aesCtr) Encrypt(src io.Reader) io.Reader { + reader := &ctrEncryptReader{ + encrypter: cipher.NewCTR(c.block, c.cipherData.IV), + src: src, + } + return reader +} + +type ctrEncryptReader struct { + encrypter cipher.Stream + src io.Reader +} + +func (reader *ctrEncryptReader) Read(data []byte) (int, error) { + plainText := make([]byte, len(data), len(data)) + n, err := reader.src.Read(plainText) + if n > 0 { + plainText = plainText[0:n] + reader.encrypter.XORKeyStream(data, plainText) + } + return n, err +} + +func (c *aesCtr) Decrypt(src io.Reader) io.Reader { + return &ctrDecryptReader{ + decrypter: cipher.NewCTR(c.block, c.cipherData.IV), + src: src, + } +} + +type ctrDecryptReader struct { + decrypter cipher.Stream + src io.Reader +} + +func (reader *ctrDecryptReader) Read(data []byte) (int, error) { + cryptoText := make([]byte, len(data), len(data)) + n, err := reader.src.Read(cryptoText) + if n > 0 { + cryptoText = cryptoText[0:n] + reader.decrypter.XORKeyStream(data, cryptoText) + } + return n, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr_cipher.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr_cipher.go new file mode 100644 index 000000000..9d8aec8a3 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/aes_ctr_cipher.go @@ -0,0 +1,208 @@ +package crypto + +import ( + "fmt" + "io" +) + +const ( + aesKeySize = 32 + ivSize = 16 +) + +// aesCtrCipherBuilder for building ContentCipher +type aesCtrCipherBuilder struct { + MasterCipher MasterCipher +} + +// aesCtrCipher will use aes ctr algorithm +type aesCtrCipher struct { + CipherData CipherData + Cipher Cipher +} + +// CreateAesCtrCipher creates ContentCipherBuilder +func CreateAesCtrCipher(cipher MasterCipher) ContentCipherBuilder { + return aesCtrCipherBuilder{MasterCipher: cipher} +} + +// createCipherData create CipherData for encrypt object data +func (builder aesCtrCipherBuilder) createCipherData() (CipherData, error) { + var cd CipherData + var err error + err = cd.RandomKeyIv(aesKeySize, ivSize) + if err != nil { + return cd, err + } + + cd.WrapAlgorithm = builder.MasterCipher.GetWrapAlgorithm() + cd.CEKAlgorithm = AesCtrAlgorithm + cd.MatDesc = builder.MasterCipher.GetMatDesc() + + // EncryptedKey + cd.EncryptedKey, err = builder.MasterCipher.Encrypt(cd.Key) + if err != nil { + return cd, err + } + + // EncryptedIV + cd.EncryptedIV, err = builder.MasterCipher.Encrypt(cd.IV) + if err != nil { + return cd, err + } + + return cd, nil +} + +// contentCipherCD is used to create ContentCipher with CipherData +func (builder aesCtrCipherBuilder) contentCipherCD(cd CipherData) (ContentCipher, error) { + cipher, err := newAesCtr(cd) + if err != nil { + return nil, err + } + + return &aesCtrCipher{ + CipherData: cd, + Cipher: cipher, + }, nil +} + +// ContentCipher is used to create ContentCipher interface +func (builder aesCtrCipherBuilder) ContentCipher() (ContentCipher, error) { + cd, err := builder.createCipherData() + if err != nil { + return nil, err + } + return builder.contentCipherCD(cd) +} + +// ContentCipherEnv is used to create a decrption ContentCipher from Envelope +func (builder aesCtrCipherBuilder) ContentCipherEnv(envelope Envelope) (ContentCipher, error) { + var cd CipherData + cd.EncryptedKey = make([]byte, len(envelope.CipherKey)) + copy(cd.EncryptedKey, []byte(envelope.CipherKey)) + + plainKey, err := builder.MasterCipher.Decrypt([]byte(envelope.CipherKey)) + if err != nil { + return nil, err + } + cd.Key = make([]byte, len(plainKey)) + copy(cd.Key, plainKey) + + cd.EncryptedIV = make([]byte, len(envelope.IV)) + copy(cd.EncryptedIV, []byte(envelope.IV)) + + plainIV, err := builder.MasterCipher.Decrypt([]byte(envelope.IV)) + if err != nil { + return nil, err + } + + cd.IV = make([]byte, len(plainIV)) + copy(cd.IV, plainIV) + + cd.MatDesc = envelope.MatDesc + cd.WrapAlgorithm = envelope.WrapAlg + cd.CEKAlgorithm = envelope.CEKAlg + + return builder.contentCipherCD(cd) +} + +// GetMatDesc is used to get MasterCipher's MatDesc +func (builder aesCtrCipherBuilder) GetMatDesc() string { + return builder.MasterCipher.GetMatDesc() +} + +// EncryptContents will generate a random key and iv and encrypt the data using ctr +func (cc *aesCtrCipher) EncryptContent(src io.Reader) (io.ReadCloser, error) { + if sr, ok := src.(io.ReadSeeker); ok { + if curr, err := sr.Seek(0, io.SeekCurrent); err == nil { + return &aesSeekEncrypter{ + Body: sr, + Encrypter: nil, + Start: curr, + Offset: curr, + cc: cc, + }, nil + } + } + reader := cc.Cipher.Encrypt(src) + return &CryptoEncrypter{Body: src, Encrypter: reader}, nil +} + +// DecryptContent is used to decrypt object using ctr +func (cc *aesCtrCipher) DecryptContent(src io.Reader) (io.ReadCloser, error) { + reader := cc.Cipher.Decrypt(src) + return &CryptoDecrypter{Body: src, Decrypter: reader}, nil +} + +// GetCipherData is used to get cipher data information +func (cc *aesCtrCipher) GetCipherData() *CipherData { + return &(cc.CipherData) +} + +// GetCipherData returns cipher data +func (cc *aesCtrCipher) GetEncryptedLen(plainTextLen int64) int64 { + // AES CTR encryption mode does not change content length + return plainTextLen +} + +// GetAlignLen is used to get align length +func (cc *aesCtrCipher) GetAlignLen() int { + return len(cc.CipherData.IV) +} + +// Clone is used to create a new aesCtrCipher from itself +func (cc *aesCtrCipher) Clone(cd CipherData) (ContentCipher, error) { + cipher, err := newAesCtr(cd) + if err != nil { + return nil, err + } + + return &aesCtrCipher{ + CipherData: cd, + Cipher: cipher, + }, nil +} + +// CryptoSeekEncrypter provides close and seek method for Encrypter +type aesSeekEncrypter struct { + Body io.ReadSeeker + Encrypter io.Reader + isClosed bool + Start int64 + Offset int64 + cc *aesCtrCipher +} + +// Close lets the CryptoSeekEncrypter satisfy io.ReadCloser interface +func (rc *aesSeekEncrypter) Close() error { + rc.isClosed = true + if closer, ok := rc.Body.(io.ReadCloser); ok { + return closer.Close() + } + return nil +} + +// Read lets the CryptoSeekEncrypter satisfy io.ReadCloser interface +func (rc *aesSeekEncrypter) Read(b []byte) (int, error) { + if rc.isClosed { + return 0, io.EOF + } + if rc.Encrypter == nil { + if rc.Start != rc.Offset { + return 0, fmt.Errorf("Cant not encrypt from offset %v, must start from %v", rc.Offset, rc.Start) + } + rc.Encrypter = rc.cc.Cipher.Encrypt(rc.Body) + } + return rc.Encrypter.Read(b) +} + +// Seek lets the CryptoSeekEncrypter satisfy io.Seeker interface +func (rc *aesSeekEncrypter) Seek(offset int64, whence int) (int64, error) { + off, err := rc.Body.Seek(offset, whence) + //Reset Encrypter Reader + rc.Encrypter = nil + rc.Offset = off + + return off, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/cipher.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/cipher.go new file mode 100644 index 000000000..3a46fd9f6 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/cipher.go @@ -0,0 +1,69 @@ +package crypto + +import ( + "io" +) + +// Cipher is interface for encryption or decryption of an object +type Cipher interface { + Encrypter + Decrypter +} + +// Encrypter is interface with only encrypt method +type Encrypter interface { + Encrypt(io.Reader) io.Reader +} + +// Decrypter is interface with only decrypt method +type Decrypter interface { + Decrypt(io.Reader) io.Reader +} + +// CryptoEncrypter provides close method for Encrypter +type CryptoEncrypter struct { + Body io.Reader + Encrypter io.Reader + isClosed bool +} + +// Close lets the CryptoEncrypter satisfy io.ReadCloser interface +func (rc *CryptoEncrypter) Close() error { + rc.isClosed = true + if closer, ok := rc.Body.(io.ReadCloser); ok { + return closer.Close() + } + return nil +} + +// Read lets the CryptoEncrypter satisfy io.ReadCloser interface +func (rc *CryptoEncrypter) Read(b []byte) (int, error) { + if rc.isClosed { + return 0, io.EOF + } + return rc.Encrypter.Read(b) +} + +// CryptoDecrypter provides close method for Decrypter +type CryptoDecrypter struct { + Body io.Reader + Decrypter io.Reader + isClosed bool +} + +// Close lets the CryptoDecrypter satisfy io.ReadCloser interface +func (rc *CryptoDecrypter) Close() error { + rc.isClosed = true + if closer, ok := rc.Body.(io.ReadCloser); ok { + return closer.Close() + } + return nil +} + +// Read lets the CryptoDecrypter satisfy io.ReadCloser interface +func (rc *CryptoDecrypter) Read(b []byte) (int, error) { + if rc.isClosed { + return 0, io.EOF + } + return rc.Decrypter.Read(b) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_const.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_const.go new file mode 100644 index 000000000..d09c47589 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_const.go @@ -0,0 +1,8 @@ +package crypto + +// encryption Algorithm +const ( + RsaCryptoWrap string = "RSA/NONE/PKCS1Padding" + KmsAliCryptoWrap string = "KMS/ALICLOUD" + AesCtrAlgorithm string = "AES/CTR/NoPadding" +) diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_type.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_type.go new file mode 100644 index 000000000..9e5d2b887 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/crypto_type.go @@ -0,0 +1,125 @@ +package crypto + +import ( + "crypto/rand" + "encoding/binary" + "fmt" + "io" + math_rand "math/rand" +) + +// MasterCipher encrypt or decrpt CipherData +// support master key: rsa && ali kms +type MasterCipher interface { + Encrypt([]byte) ([]byte, error) + Decrypt([]byte) ([]byte, error) + GetWrapAlgorithm() string + GetMatDesc() string +} + +// ContentCipherBuilder is used to create ContentCipher for encryting object's data +type ContentCipherBuilder interface { + ContentCipher() (ContentCipher, error) + ContentCipherEnv(Envelope) (ContentCipher, error) + GetMatDesc() string +} + +// ContentCipher is used to encrypt or decrypt object's data +type ContentCipher interface { + EncryptContent(io.Reader) (io.ReadCloser, error) + DecryptContent(io.Reader) (io.ReadCloser, error) + Clone(cd CipherData) (ContentCipher, error) + GetEncryptedLen(int64) int64 + GetCipherData() *CipherData + GetAlignLen() int +} + +// Envelope is stored in oss object's meta +type Envelope struct { + IV string + CipherKey string + MatDesc string + WrapAlg string + CEKAlg string + UnencryptedMD5 string + UnencryptedContentLen string +} + +func (el Envelope) IsValid() bool { + return len(el.IV) > 0 && + len(el.CipherKey) > 0 && + len(el.WrapAlg) > 0 && + len(el.CEKAlg) > 0 +} + +func (el Envelope) String() string { + return fmt.Sprintf("IV=%s&CipherKey=%s&WrapAlg=%s&CEKAlg=%s", el.IV, el.CipherKey, el.WrapAlg, el.CEKAlg) +} + +// CipherData is secret key information +type CipherData struct { + IV []byte + Key []byte + MatDesc string + WrapAlgorithm string + CEKAlgorithm string + EncryptedIV []byte + EncryptedKey []byte +} + +func (cd *CipherData) RandomKeyIv(keyLen int, ivLen int) error { + // Key + cd.Key = make([]byte, keyLen) + if _, err := io.ReadFull(rand.Reader, cd.Key); err != nil { + return err + } + + // sizeof uint64 + if ivLen < 8 { + return fmt.Errorf("ivLen:%d less than 8", ivLen) + } + + // IV:reserve 8 bytes + cd.IV = make([]byte, ivLen) + if _, err := io.ReadFull(rand.Reader, cd.IV[0:ivLen-8]); err != nil { + return err + } + + // only use 4 byte,in order not to overflow when SeekIV() + randNumber := math_rand.Uint32() + cd.SetIV(uint64(randNumber)) + return nil +} + +func (cd *CipherData) SetIV(iv uint64) { + ivLen := len(cd.IV) + binary.BigEndian.PutUint64(cd.IV[ivLen-8:], iv) +} + +func (cd *CipherData) GetIV() uint64 { + ivLen := len(cd.IV) + return binary.BigEndian.Uint64(cd.IV[ivLen-8:]) +} + +func (cd *CipherData) SeekIV(startPos uint64) { + cd.SetIV(cd.GetIV() + startPos/uint64(len(cd.IV))) +} + +func (cd *CipherData) Clone() CipherData { + var cloneCd CipherData + cloneCd = *cd + + cloneCd.Key = make([]byte, len(cd.Key)) + copy(cloneCd.Key, cd.Key) + + cloneCd.IV = make([]byte, len(cd.IV)) + copy(cloneCd.IV, cd.IV) + + cloneCd.EncryptedIV = make([]byte, len(cd.EncryptedIV)) + copy(cloneCd.EncryptedIV, cd.EncryptedIV) + + cloneCd.EncryptedKey = make([]byte, len(cd.EncryptedKey)) + copy(cloneCd.EncryptedKey, cd.EncryptedKey) + + return cloneCd +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/master_rsa_cipher.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/master_rsa_cipher.go new file mode 100644 index 000000000..91f3b9df5 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto/master_rsa_cipher.go @@ -0,0 +1,102 @@ +package crypto + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/asn1" + "encoding/json" + "encoding/pem" + "fmt" +) + +// CreateMasterRsa Create master key interface implemented by rsa +// matDesc will be converted to json string +func CreateMasterRsa(matDesc map[string]string, publicKey string, privateKey string) (MasterCipher, error) { + var masterCipher MasterRsaCipher + var jsonDesc string + if len(matDesc) > 0 { + b, err := json.Marshal(matDesc) + if err != nil { + return masterCipher, err + } + jsonDesc = string(b) + } + masterCipher.MatDesc = jsonDesc + masterCipher.PublicKey = publicKey + masterCipher.PrivateKey = privateKey + return masterCipher, nil +} + +// MasterRsaCipher rsa master key interface +type MasterRsaCipher struct { + MatDesc string + PublicKey string + PrivateKey string +} + +// GetWrapAlgorithm get master key wrap algorithm +func (mrc MasterRsaCipher) GetWrapAlgorithm() string { + return RsaCryptoWrap +} + +// GetMatDesc get master key describe +func (mrc MasterRsaCipher) GetMatDesc() string { + return mrc.MatDesc +} + +// Encrypt encrypt data by rsa public key +// Mainly used to encrypt object's symmetric secret key and iv +func (mrc MasterRsaCipher) Encrypt(plainData []byte) ([]byte, error) { + block, _ := pem.Decode([]byte(mrc.PublicKey)) + if block == nil { + return nil, fmt.Errorf("pem.Decode public key error") + } + + var pub *rsa.PublicKey + if block.Type == "PUBLIC KEY" { + // pks8 format + pubInterface, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, err + } + pub = pubInterface.(*rsa.PublicKey) + } else if block.Type == "RSA PUBLIC KEY" { + // pks1 format + pub = &rsa.PublicKey{} + _, err := asn1.Unmarshal(block.Bytes, pub) + if err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("not supported public key,type:%s", block.Type) + } + return rsa.EncryptPKCS1v15(rand.Reader, pub, plainData) +} + +// Decrypt Decrypt data by rsa private key +// Mainly used to decrypt object's symmetric secret key and iv +func (mrc MasterRsaCipher) Decrypt(cryptoData []byte) ([]byte, error) { + block, _ := pem.Decode([]byte(mrc.PrivateKey)) + if block == nil { + return nil, fmt.Errorf("pem.Decode private key error") + } + + if block.Type == "PRIVATE KEY" { + // pks8 format + privInterface, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + return rsa.DecryptPKCS1v15(rand.Reader, privInterface.(*rsa.PrivateKey), cryptoData) + } else if block.Type == "RSA PRIVATE KEY" { + // pks1 format + priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + return rsa.DecryptPKCS1v15(rand.Reader, priv, cryptoData) + } else { + return nil, fmt.Errorf("not supported private key,type:%s", block.Type) + } +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/defaults.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/defaults.go new file mode 100644 index 000000000..ec9c242df --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/defaults.go @@ -0,0 +1,79 @@ +package oss + +import "os" + +const ( + MaxUploadParts int32 = 10000 + + // MaxPartSize Max part size, 5GB, For UploadPart + MaxPartSize int64 = 5 * 1024 * 1024 * 1024 + + // MinPartSize Min part size, 100KB, For UploadPart + MinPartSize int64 = 100 * 1024 + + // DefaultPartSize Default part size, 6M + DefaultPartSize int64 = 6 * 1024 * 1024 + + // DefaultUploadPartSize Default part size for uploader uploads data + DefaultUploadPartSize = DefaultPartSize + + // DefaultDownloadPartSize Default part size for downloader downloads object + DefaultDownloadPartSize = DefaultPartSize + + // DefaultCopyPartSize Default part size for copier copys object, 64M + DefaultCopyPartSize int64 = 64 * 1024 * 1024 + + // DefaultParallel Default parallel + DefaultParallel = 3 + + // DefaultUploadParallel Default parallel for uploader uploads data + DefaultUploadParallel = DefaultParallel + + // DefaultDownloadParallel Default parallel for downloader downloads object + DefaultDownloadParallel = DefaultParallel + + // DefaultCopyParallel Default parallel for copier copys object + DefaultCopyParallel = DefaultParallel + + // DefaultPrefetchThreshold Default prefetch threshold to swith to async read in ReadOnlyFile + DefaultPrefetchThreshold int64 = 20 * 1024 * 1024 + + // DefaultPrefetchNum Default prefetch number for async read in ReadOnlyFile + DefaultPrefetchNum = DefaultParallel + + // DefaultPrefetchChunkSize Default prefetch chunk size for async read in ReadOnlyFile + DefaultPrefetchChunkSize = DefaultPartSize + + // DefaultCopyThreshold Default threshold to use muitipart copy in Copier, 256M + DefaultCopyThreshold int64 = 200 * 1024 * 1024 + + // FilePermMode File permission + FilePermMode = os.FileMode(0664) + + // TempFileSuffix Temp file suffix + TempFileSuffix = ".temp" + + // CheckpointFileSuffixDownloader Checkpoint file suffix for Downloader + CheckpointFileSuffixDownloader = ".dcp" + + // CheckpointFileSuffixUploader Checkpoint file suffix for Uploader + CheckpointFileSuffixUploader = ".ucp" + + // CheckpointMagic Checkpoint file Magic + CheckpointMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3" + + // DefaultProduct Product for signing + DefaultProduct = "oss" + + // CloudBoxProduct Product of cloud box for signing + CloudBoxProduct = "oss-cloudbox" + + // DefaultEndpointScheme The URL's scheme, default is https + DefaultEndpointScheme = "https" + + // DefaultSignatureVersion Default signature version is v4 + DefaultSignatureVersion = SignatureVersionV4 + + // DefaultOutOfOrderReadThreshold Default out of order read threshold is 3 + DefaultOutOfOrderReadThreshold int64 = 3 +) diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/downloader.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/downloader.go new file mode 100644 index 000000000..bc1329de5 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/downloader.go @@ -0,0 +1,598 @@ +package oss + +import ( + "context" + "fmt" + "hash" + "io" + "net/http" + "os" + "path/filepath" + "sort" + "sync" + "sync/atomic" +) + +type DownloaderOptions struct { + PartSize int64 + + ParallelNum int + + EnableCheckpoint bool + + CheckpointDir string + + VerifyData bool + + UseTempFile bool + + ClientOptions []func(*Options) +} + +type Downloader struct { + options DownloaderOptions + client DownloadAPIClient + featureFlags FeatureFlagsType +} + +// NewDownloader creates a new Downloader instance to downloads objects. +// Pass in additional functional options to customize the downloader behavior. +func NewDownloader(c DownloadAPIClient, optFns ...func(*DownloaderOptions)) *Downloader { + options := DownloaderOptions{ + PartSize: DefaultUploadPartSize, + ParallelNum: DefaultUploadParallel, + UseTempFile: true, + } + + for _, fn := range optFns { + fn(&options) + } + + u := &Downloader{ + client: c, + options: options, + } + + //Get Client Feature + switch t := c.(type) { + case *Client: + u.featureFlags = t.options.FeatureFlags + case *EncryptionClient: + u.featureFlags = (t.Unwrap().options.FeatureFlags & ^FeatureEnableCRC64CheckDownload) + } + + return u +} + +type DownloadResult struct { + Written int64 +} + +type DownloadError struct { + Err error + Path string +} + +func (m *DownloadError) Error() string { + var extra string + if m.Err != nil { + extra = fmt.Sprintf(", cause: %s", m.Err.Error()) + } + return fmt.Sprintf("download failed %s", extra) +} + +func (m *DownloadError) Unwrap() error { + return m.Err +} + +func (d *Downloader) DownloadFile(ctx context.Context, request *GetObjectRequest, filePath string, optFns ...func(*DownloaderOptions)) (result *DownloadResult, err error) { + // Downloader wrapper + delegate, err := d.newDelegate(ctx, request, optFns...) + if err != nil { + return nil, err + } + + // Source + if err = delegate.checkSource(); err != nil { + return nil, err + } + + // Destination + var file *os.File + if file, err = delegate.checkDestination(filePath); err != nil { + return nil, err + } + + // Range + if err = delegate.adjustRange(); err != nil { + return nil, err + } + + // Checkpoint + if err = delegate.checkCheckpoint(); err != nil { + return nil, err + } + + // truncate to the right position + if err = delegate.adjustWriter(file); err != nil { + return nil, err + } + + // CRC Part + delegate.updateCRCFlag() + + // download + result, err = delegate.download() + + return result, delegate.closeWriter(file, err) +} + +type downloaderDelegate struct { + base *Downloader + options DownloaderOptions + client DownloadAPIClient + context context.Context + + m sync.Mutex + + request *GetObjectRequest + w io.WriterAt + rstart int64 + pos int64 + epos int64 + written int64 + + // Source's Info + sizeInBytes int64 + etag string + modTime string + headers http.Header + + //Destination's Info + filePath string + tempFilePath string + fileInfo os.FileInfo + + //crc + calcCRC bool + checkCRC bool + + checkpoint *downloadCheckpoint +} + +type downloaderChunk struct { + w io.WriterAt + start int64 + size int64 + cur int64 + rstart int64 //range start +} + +type downloadedChunk struct { + start int64 + size int64 + crc64 uint64 +} + +type downloadedChunks []downloadedChunk + +func (slice downloadedChunks) Len() int { + return len(slice) +} +func (slice downloadedChunks) Less(i, j int) bool { + return slice[i].start < slice[j].start +} +func (slice downloadedChunks) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func (c *downloaderChunk) Write(p []byte) (n int, err error) { + if c.cur >= c.size { + return 0, io.EOF + } + + n, err = c.w.WriteAt(p, c.start+c.cur-c.rstart) + c.cur += int64(n) + return +} + +func (d *Downloader) newDelegate(ctx context.Context, request *GetObjectRequest, optFns ...func(*DownloaderOptions)) (*downloaderDelegate, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + + if !isValidBucketName(request.Bucket) { + return nil, NewErrParamInvalid("request.Bucket") + } + + if !isValidObjectName(request.Key) { + return nil, NewErrParamInvalid("request.Key") + } + + if request.Range != nil && !isValidRange(request.Range) { + return nil, NewErrParamInvalid("request.Range") + } + + delegate := downloaderDelegate{ + base: d, + options: d.options, + client: d.client, + context: ctx, + request: request, + } + + for _, opt := range optFns { + opt(&delegate.options) + } + + if delegate.options.ParallelNum <= 0 { + delegate.options.ParallelNum = DefaultDownloadParallel + } + if delegate.options.PartSize <= 0 { + delegate.options.PartSize = DefaultDownloadPartSize + } + + return &delegate, nil +} + +func (d *downloaderDelegate) checkSource() error { + var request HeadObjectRequest + copyRequest(&request, d.request) + result, err := d.client.HeadObject(d.context, &request, d.options.ClientOptions...) + if err != nil { + return err + } + + d.sizeInBytes = result.ContentLength + d.modTime = result.Headers.Get(HTTPHeaderLastModified) + d.etag = result.Headers.Get(HTTPHeaderETag) + d.headers = result.Headers + + return nil +} + +func (d *downloaderDelegate) checkDestination(filePath string) (*os.File, error) { + if filePath == "" { + return nil, NewErrParamInvalid("filePath") + } + absFilePath, err := filepath.Abs(filePath) + if err != nil { + return nil, err + } + + // use temporary file + tempFilePath := absFilePath + if d.options.UseTempFile { + tempFilePath += TempFileSuffix + } + d.filePath = absFilePath + d.tempFilePath = tempFilePath + + // use openfile to check the filepath is valid + var file *os.File + if file, err = os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode); err != nil { + return nil, err + } + + if d.fileInfo, err = file.Stat(); err != nil { + return nil, err + } + + return file, nil +} + +func (d *downloaderDelegate) adjustWriter(file *os.File) error { + expectSize := d.epos - d.rstart + if d.fileInfo != nil && d.fileInfo.Size() > expectSize { + if err := file.Truncate(d.pos - d.rstart); err != nil { + return err + } + } + d.w = file + return nil +} + +func (d *downloaderDelegate) closeWriter(file *os.File, err error) error { + if file != nil { + file.Close() + } + + if err != nil { + if d.checkpoint == nil { + os.Remove(d.tempFilePath) + } + } else { + if d.tempFilePath != d.filePath { + err = os.Rename(d.tempFilePath, d.filePath) + } + if err == nil && d.checkpoint != nil { + d.checkpoint.remove() + } + } + + d.w = nil + d.checkpoint = nil + + return err +} + +func (d *downloaderDelegate) adjustRange() error { + d.pos = 0 + d.rstart = 0 + d.epos = d.sizeInBytes + if d.request.Range != nil { + httpRange, _ := ParseRange(*d.request.Range) + if httpRange.Offset >= d.sizeInBytes { + return fmt.Errorf("invalid range, object size :%v, range: %v", d.sizeInBytes, ToString(d.request.Range)) + } + d.pos = httpRange.Offset + d.rstart = d.pos + if httpRange.Count > 0 { + d.epos = minInt64(httpRange.Offset+httpRange.Count, d.sizeInBytes) + } + } + + return nil +} + +func (d *downloaderDelegate) checkCheckpoint() error { + if d.options.EnableCheckpoint { + d.checkpoint = newDownloadCheckpoint(d.request, d.tempFilePath, d.options.CheckpointDir, d.headers, d.options.PartSize) + d.checkpoint.VerifyData = d.options.VerifyData + if err := d.checkpoint.load(); err != nil { + return err + } + + if d.checkpoint.Loaded { + d.pos = d.checkpoint.Info.Data.DownloadInfo.Offset + d.written = d.pos - d.rstart + } else { + d.checkpoint.Info.Data.DownloadInfo.Offset = d.pos + } + } + return nil +} + +func (d *downloaderDelegate) updateCRCFlag() error { + if (d.base.featureFlags & FeatureEnableCRC64CheckDownload) > 0 { + d.checkCRC = d.request.Range == nil + d.calcCRC = (d.checkpoint != nil && d.checkpoint.VerifyData) || d.checkCRC + } + return nil +} + +func (d *downloaderDelegate) download() (*DownloadResult, error) { + var ( + wg sync.WaitGroup + errValue atomic.Value + cpCh chan downloadedChunk + cpWg sync.WaitGroup + cpChunks downloadedChunks + tracker bool = d.calcCRC || d.checkpoint != nil + tCRC64 uint64 = 0 + ) + + saveErrFn := func(e error) { + errValue.Store(e) + } + + getErrFn := func() error { + v := errValue.Load() + if v == nil { + return nil + } + e, _ := v.(error) + return e + } + + // writeChunkFn runs in worker goroutines to pull chunks off of the ch channel + writeChunkFn := func(ch chan downloaderChunk) { + defer wg.Done() + var hash hash.Hash64 + if d.calcCRC { + hash = NewCRC64(0) + } + + for { + chunk, ok := <-ch + if !ok { + break + } + + if getErrFn() != nil { + continue + } + + dchunk, derr := d.downloadChunk(chunk, hash) + + if derr != nil && derr != io.EOF { + saveErrFn(derr) + } else { + // update tracker info + if tracker { + cpCh <- dchunk + } + } + } + } + + // trackerFn runs in worker goroutines to update checkpoint info or calc downloaded crc + trackerFn := func(ch chan downloadedChunk) { + defer cpWg.Done() + var ( + tOffset int64 = 0 + ) + + if d.checkpoint != nil { + tOffset = d.checkpoint.Info.Data.DownloadInfo.Offset + tCRC64 = d.checkpoint.Info.Data.DownloadInfo.CRC64 + } + + for { + chunk, ok := <-ch + if !ok { + break + } + cpChunks = append(cpChunks, chunk) + sort.Sort(cpChunks) + newOffset := tOffset + i := 0 + for ii := range cpChunks { + if cpChunks[ii].start == newOffset { + newOffset += cpChunks[ii].size + i++ + } else { + break + } + } + if newOffset != tOffset { + //remove updated chunk in cpChunks + if d.calcCRC { + tCRC64 = d.combineCRC(tCRC64, cpChunks[0:i]) + } + tOffset = newOffset + cpChunks = cpChunks[i:] + if d.checkpoint != nil { + d.checkpoint.Info.Data.DownloadInfo.Offset = tOffset + d.checkpoint.Info.Data.DownloadInfo.CRC64 = tCRC64 + d.checkpoint.dump() + } + } + } + } + + // Start the download workers + ch := make(chan downloaderChunk, d.options.ParallelNum) + for i := 0; i < d.options.ParallelNum; i++ { + wg.Add(1) + go writeChunkFn(ch) + } + + // Start tracker worker if need track downloaded chunk + if tracker { + cpCh = make(chan downloadedChunk, maxInt(3, d.options.ParallelNum)) + cpWg.Add(1) + go trackerFn(cpCh) + } + + // Consume downloaded data + if d.request.ProgressFn != nil && d.written > 0 { + d.request.ProgressFn(d.written, d.written, d.sizeInBytes) + } + + // Queue the next range of bytes to read. + for getErrFn() == nil { + if d.pos >= d.epos { + break + } + size := minInt64(d.epos-d.pos, d.options.PartSize) + ch <- downloaderChunk{w: d.w, start: d.pos, size: size, rstart: d.rstart} + d.pos += size + } + + // Waiting for parts download finished + close(ch) + wg.Wait() + + if tracker { + close(cpCh) + cpWg.Wait() + } + + if err := getErrFn(); err != nil { + return nil, d.wrapErr(err) + } + + if d.checkCRC { + if len(cpChunks) > 0 { + sort.Sort(cpChunks) + } + if derr := checkResponseHeaderCRC64(fmt.Sprint(d.combineCRC(tCRC64, cpChunks)), d.headers); derr != nil { + return nil, d.wrapErr(derr) + } + } + + return &DownloadResult{ + Written: d.written, + }, nil +} + +func (d *downloaderDelegate) incrWritten(n int64) { + d.m.Lock() + defer d.m.Unlock() + d.written += n + if d.request.ProgressFn != nil && n > 0 { + d.request.ProgressFn(n, d.written, d.sizeInBytes) + } +} + +func (d *downloaderDelegate) downloadChunk(chunk downloaderChunk, hash hash.Hash64) (downloadedChunk, error) { + // Get the next byte range of data + var request GetObjectRequest + copyRequest(&request, d.request) + + getFn := func(ctx context.Context, httpRange HTTPRange) (output *ReaderRangeGetOutput, err error) { + // update range + request.Range = nil + rangeStr := httpRange.FormatHTTPRange() + request.RangeBehavior = nil + if rangeStr != nil { + request.Range = rangeStr + request.RangeBehavior = Ptr("standard") + } + + result, err := d.client.GetObject(ctx, &request, d.options.ClientOptions...) + if err != nil { + return nil, err + } + + return &ReaderRangeGetOutput{ + Body: result.Body, + ETag: result.ETag, + ContentLength: result.ContentLength, + ContentRange: result.ContentRange, + }, nil + } + + reader, _ := NewRangeReader(d.context, getFn, &HTTPRange{chunk.start, chunk.size}, d.etag) + defer reader.Close() + + var ( + r io.Reader = reader + crc64 uint64 = 0 + ) + if hash != nil { + hash.Reset() + r = io.TeeReader(reader, hash) + } + + n, err := io.Copy(&chunk, r) + d.incrWritten(n) + + if hash != nil { + crc64 = hash.Sum64() + } + + return downloadedChunk{ + start: chunk.start, + size: n, + crc64: crc64, + }, err +} + +func (u *downloaderDelegate) combineCRC(hashCRC uint64, crcs downloadedChunks) uint64 { + if len(crcs) == 0 { + return hashCRC + } + crc := hashCRC + for _, c := range crcs { + crc = CRC64Combine(crc, c.crc64, uint64(c.size)) + } + return crc +} + +func (u *downloaderDelegate) wrapErr(err error) error { + return &DownloadError{ + Path: fmt.Sprintf("oss://%s/%s", *u.request.Bucket, *u.request.Key), + Err: err} +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/encryption_client.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/encryption_client.go new file mode 100644 index 000000000..cfa5c20c8 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/encryption_client.go @@ -0,0 +1,503 @@ +package oss + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto" +) + +// EncryptionUaSuffix user agent tag for client encryption +const ( + EncryptionUaSuffix string = "OssEncryptionClient" +) + +type EncryptionClientOptions struct { + MasterCiphers []crypto.MasterCipher +} + +type EncryptionClient struct { + client *Client + defualtCCBuilder crypto.ContentCipherBuilder + ccBuilderMap map[string]crypto.ContentCipherBuilder + alignLen int +} + +// EncryptionMultiPartContext save encryption or decryption information +type EncryptionMultiPartContext struct { + ContentCipher crypto.ContentCipher + DataSize int64 + PartSize int64 +} + +// Valid judge PartCryptoContext is valid or not +func (ec EncryptionMultiPartContext) Valid() bool { + if ec.ContentCipher == nil || ec.DataSize == 0 || ec.PartSize == 0 { + return false + } + return true +} + +func NewEncryptionClient(c *Client, masterCipher crypto.MasterCipher, optFns ...func(*EncryptionClientOptions)) (*EncryptionClient, error) { + options := EncryptionClientOptions{} + for _, fn := range optFns { + fn(&options) + } + + if masterCipher == nil { + return nil, NewErrParamNull("masterCipher") + } + + defualtCCBuilder := crypto.CreateAesCtrCipher(masterCipher) + ccBuilderMap := map[string]crypto.ContentCipherBuilder{} + for _, m := range options.MasterCiphers { + if m != nil && len(m.GetMatDesc()) > 0 { + ccBuilderMap[m.GetMatDesc()] = crypto.CreateAesCtrCipher(m) + } + } + + e := &EncryptionClient{ + client: c, + defualtCCBuilder: defualtCCBuilder, + ccBuilderMap: ccBuilderMap, + alignLen: 16, + } + + return e, nil +} + +func (e *EncryptionClient) Unwrap() *Client { return e.client } + +// GetObjectMeta Queries the metadata of an object, including ETag, Size, and LastModified. +// The content of the object is not returned. +func (e *EncryptionClient) GetObjectMeta(ctx context.Context, request *GetObjectMetaRequest, optFns ...func(*Options)) (*GetObjectMetaResult, error) { + return e.client.GetObjectMeta(ctx, request, optFns...) +} + +// HeadObject Queries information about all objects in a bucket. +func (e *EncryptionClient) HeadObject(ctx context.Context, request *HeadObjectRequest, optFns ...func(*Options)) (*HeadObjectResult, error) { + return e.client.HeadObject(ctx, request, optFns...) +} + +// GetObject Downloads a object. +func (e *EncryptionClient) GetObject(ctx context.Context, request *GetObjectRequest, optFns ...func(*Options)) (*GetObjectResult, error) { + return e.getObjectSecurely(ctx, request, optFns...) +} + +// PutObject Uploads a object. +func (e *EncryptionClient) PutObject(ctx context.Context, request *PutObjectRequest, optFns ...func(*Options)) (*PutObjectResult, error) { + return e.putObjectSecurely(ctx, request, optFns...) +} + +// InitiateMultipartUpload Initiates a multipart upload task before you can upload data in parts to Object Storage Service (OSS). +func (e *EncryptionClient) InitiateMultipartUpload(ctx context.Context, request *InitiateMultipartUploadRequest, optFns ...func(*Options)) (*InitiateMultipartUploadResult, error) { + return e.initiateMultipartUploadSecurely(ctx, request, optFns...) +} + +// UploadPart Call the UploadPart interface to upload data in blocks (parts) based on the specified Object name and uploadId. +func (e *EncryptionClient) UploadPart(ctx context.Context, request *UploadPartRequest, optFns ...func(*Options)) (*UploadPartResult, error) { + return e.uploadPartSecurely(ctx, request, optFns...) +} + +// CompleteMultipartUpload Completes the multipart upload task of an object after all parts of the object are uploaded. +func (e *EncryptionClient) CompleteMultipartUpload(ctx context.Context, request *CompleteMultipartUploadRequest, optFns ...func(*Options)) (*CompleteMultipartUploadResult, error) { + return e.client.CompleteMultipartUpload(ctx, request, optFns...) +} + +// AbortMultipartUpload Cancels a multipart upload task and deletes the parts uploaded in the task. +func (e *EncryptionClient) AbortMultipartUpload(ctx context.Context, request *AbortMultipartUploadRequest, optFns ...func(*Options)) (*AbortMultipartUploadResult, error) { + return e.client.AbortMultipartUpload(ctx, request, optFns...) +} + +// ListParts Lists all parts that are uploaded by using a specified upload ID. +func (e *EncryptionClient) ListParts(ctx context.Context, request *ListPartsRequest, optFns ...func(*Options)) (*ListPartsResult, error) { + return e.client.ListParts(ctx, request, optFns...) +} + +// NewDownloader creates a new Downloader instance to download objects. +func (c *EncryptionClient) NewDownloader(optFns ...func(*DownloaderOptions)) *Downloader { + return NewDownloader(c, optFns...) +} + +// NewUploader creates a new Uploader instance to upload objects. +func (c *EncryptionClient) NewUploader(optFns ...func(*UploaderOptions)) *Uploader { + return NewUploader(c, optFns...) +} + +// OpenFile opens the named file for reading. +func (c *EncryptionClient) OpenFile(ctx context.Context, bucket string, key string, optFns ...func(*OpenOptions)) (*ReadOnlyFile, error) { + return NewReadOnlyFile(ctx, c, bucket, key, optFns...) +} + +func (e *EncryptionClient) getObjectSecurely(ctx context.Context, request *GetObjectRequest, optFns ...func(*Options)) (*GetObjectResult, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + + var ( + err error + httpRange *HTTPRange + discardCount int64 = 0 + adjustOffset int64 = 0 + closeBody bool = true + ) + + if request.Range != nil { + httpRange, err = ParseRange(*request.Range) + if err != nil { + return nil, err + } + offset := httpRange.Offset + count := httpRange.Count + adjustOffset = adjustRangeStart(offset, int64(e.alignLen)) + discardCount = httpRange.Offset - adjustOffset + + if discardCount != 0 { + if count > 0 { + count += discardCount + } + httpRange.Offset = adjustOffset + httpRange.Count = count + } + } + + eRequest := request + if httpRange != nil && discardCount > 0 { + _request := *request + eRequest = &_request + eRequest.Range = httpRange.FormatHTTPRange() + eRequest.RangeBehavior = Ptr("standard") + } + + result, err := e.client.GetObject(ctx, eRequest, optFns...) + + if err != nil { + return nil, err + } + + defer func() { + if closeBody && result.Body != nil { + result.Body.Close() + } + }() + + if hasEncryptedHeader(result.Headers) { + envelope, err := getEnvelopeFromHeader(result.Headers) + if err != nil { + return nil, err + } + if !isValidContentAlg(envelope.CEKAlg) { + return nil, fmt.Errorf("not supported content algorithm %s,object:%s", envelope.CEKAlg, ToString(request.Key)) + } + if !envelope.IsValid() { + return nil, fmt.Errorf("getEnvelopeFromHeader error,object:%s", ToString(request.Key)) + } + + // use ContentCipherBuilder to decrpt object by default + cc, err := e.getContentCipherBuilder(envelope).ContentCipherEnv(envelope) + if err != nil { + return nil, fmt.Errorf("%s,object:%s", err.Error(), ToString(request.Key)) + } + + if adjustOffset > 0 { + cipherData := cc.GetCipherData().Clone() + cipherData.SeekIV(uint64(adjustOffset)) + cc, _ = cc.Clone(cipherData) + } + + result.Body, err = cc.DecryptContent(result.Body) + } + + if discardCount > 0 && err == nil { + //rewrite ContentRange & ContentRange + if result.ContentRange != nil { + if from, to, total, cerr := ParseContentRange(*result.ContentRange); cerr == nil { + from += discardCount + value := fmt.Sprintf("bytes %v-%v/%v", from, to, total) + result.ContentRange = Ptr(value) + result.Headers.Set(HTTPHeaderContentRange, value) + } + } else { + result.Headers.Set(HTTPHeaderContentRange, fmt.Sprintf("bytes %v-/*", discardCount)) + } + if result.ContentLength > 0 { + result.ContentLength -= discardCount + result.Headers.Set(HTTPHeaderContentLength, fmt.Sprint(result.ContentLength)) + } + result.Body = &DiscardReadCloser{ + RC: result.Body, + Discard: int(discardCount), + } + } + + closeBody = false + return result, err +} + +func (e *EncryptionClient) putObjectSecurely(ctx context.Context, request *PutObjectRequest, optFns ...func(*Options)) (*PutObjectResult, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + cc, err := e.defualtCCBuilder.ContentCipher() + if err != nil { + return nil, err + } + cryptoReader, err := cc.EncryptContent(request.Body) + if err != nil { + return nil, err + } + + eRequest := *request + eRequest.Body = cryptoReader + addCryptoHeaders(&eRequest, cc.GetCipherData()) + + return e.client.PutObject(ctx, &eRequest, optFns...) +} + +func (e *EncryptionClient) initiateMultipartUploadSecurely(ctx context.Context, request *InitiateMultipartUploadRequest, optFns ...func(*Options)) (*InitiateMultipartUploadResult, error) { + var err error + if request == nil { + return nil, NewErrParamNull("request") + } + if err = e.validEncryptionContext(request); err != nil { + return nil, err + } + cc, err := e.defualtCCBuilder.ContentCipher() + if err != nil { + return nil, err + } + eRequest := *request + addMultiPartCryptoHeaders(&eRequest, cc.GetCipherData()) + + result, err := e.client.InitiateMultipartUpload(ctx, &eRequest, optFns...) + if err != nil { + return nil, err + } + + result.CSEMultiPartContext = &EncryptionMultiPartContext{ + ContentCipher: cc, + PartSize: ToInt64(request.CSEPartSize), + DataSize: ToInt64(request.CSEDataSize), + } + return result, nil +} + +func (e *EncryptionClient) uploadPartSecurely(ctx context.Context, request *UploadPartRequest, optFns ...func(*Options)) (*UploadPartResult, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + if request.CSEMultiPartContext == nil { + return nil, NewErrParamNull("request.CSEMultiPartContext") + } + cseCtx := request.CSEMultiPartContext + if !cseCtx.Valid() { + return nil, fmt.Errorf("request.CSEMultiPartContext is invalid") + } + if cseCtx.PartSize%int64(e.alignLen) != 0 { + return nil, fmt.Errorf("CSEMultiPartContext's PartSize must be aligned to %v", e.alignLen) + } + + cipherData := cseCtx.ContentCipher.GetCipherData().Clone() + // caclulate iv based on part number + if request.PartNumber > 1 { + cipherData.SeekIV(uint64(request.PartNumber-1) * uint64(cseCtx.PartSize)) + } + + // for parallel upload part + cc, _ := cseCtx.ContentCipher.Clone(cipherData) + + cryptoReader, err := cc.EncryptContent(request.Body) + if err != nil { + return nil, err + } + + eRequest := *request + eRequest.Body = cryptoReader + + addUploadPartCryptoHeaders(&eRequest, cseCtx, cc.GetCipherData()) + + return e.client.UploadPart(ctx, &eRequest, optFns...) +} + +func (e *EncryptionClient) getContentCipherBuilder(envelope crypto.Envelope) crypto.ContentCipherBuilder { + if ccb, ok := e.ccBuilderMap[envelope.MatDesc]; ok { + return ccb + } + return e.defualtCCBuilder +} + +func (e *EncryptionClient) validEncryptionContext(request *InitiateMultipartUploadRequest) error { + partSize := ToInt64(request.CSEPartSize) + if partSize <= 0 { + return NewErrParamInvalid("request.CSEPartSize") + } + + if partSize%int64(e.alignLen) != 0 { + return fmt.Errorf("request.CSEPartSize must aligned to the %v", e.alignLen) + } + + return nil +} + +func hasEncryptedHeader(headers http.Header) bool { + return len(headers.Get(OssClientSideEncryptionKey)) > 0 +} + +// addCryptoHeaders save Envelope information in oss meta +func addCryptoHeaders(request *PutObjectRequest, cd *crypto.CipherData) { + if request.Headers == nil { + request.Headers = map[string]string{} + } + + // convert content-md5 + if request.ContentMD5 != nil { + request.Headers[OssClientSideEncryptionUnencryptedContentMD5] = *request.ContentMD5 + request.ContentMD5 = nil + } + + // convert content-length + if request.ContentLength != nil { + request.Headers[OssClientSideEncryptionUnencryptedContentLength] = fmt.Sprint(*request.ContentLength) + request.ContentLength = nil + } + + // matDesc + if len(cd.MatDesc) > 0 { + request.Headers[OssClientSideEncryptionMatDesc] = cd.MatDesc + } + + // encrypted key + strEncryptedKey := base64.StdEncoding.EncodeToString(cd.EncryptedKey) + request.Headers[OssClientSideEncryptionKey] = strEncryptedKey + + // encrypted iv + strEncryptedIV := base64.StdEncoding.EncodeToString(cd.EncryptedIV) + request.Headers[OssClientSideEncryptionStart] = strEncryptedIV + + // wrap alg + request.Headers[OssClientSideEncryptionWrapAlg] = cd.WrapAlgorithm + + // cek alg + request.Headers[OssClientSideEncryptionCekAlg] = cd.CEKAlgorithm +} + +// addMultiPartCryptoHeaders save Envelope information in oss meta +func addMultiPartCryptoHeaders(request *InitiateMultipartUploadRequest, cd *crypto.CipherData) { + if request.Headers == nil { + request.Headers = map[string]string{} + } + + // matDesc + if len(cd.MatDesc) > 0 { + request.Headers[OssClientSideEncryptionMatDesc] = cd.MatDesc + } + + if ToInt64(request.CSEDataSize) > 0 { + request.Headers[OssClientSideEncryptionDataSize] = fmt.Sprint(*request.CSEDataSize) + } + + request.Headers[OssClientSideEncryptionPartSize] = fmt.Sprint(*request.CSEPartSize) + + // encrypted key + strEncryptedKey := base64.StdEncoding.EncodeToString(cd.EncryptedKey) + request.Headers[OssClientSideEncryptionKey] = strEncryptedKey + + // encrypted iv + strEncryptedIV := base64.StdEncoding.EncodeToString(cd.EncryptedIV) + request.Headers[OssClientSideEncryptionStart] = strEncryptedIV + + // wrap alg + request.Headers[OssClientSideEncryptionWrapAlg] = cd.WrapAlgorithm + + // cek alg + request.Headers[OssClientSideEncryptionCekAlg] = cd.CEKAlgorithm +} + +// addUploadPartCryptoHeaders save Envelope information in oss meta +func addUploadPartCryptoHeaders(request *UploadPartRequest, cseContext *EncryptionMultiPartContext, cd *crypto.CipherData) { + if request.Headers == nil { + request.Headers = map[string]string{} + } + + // matDesc + if len(cd.MatDesc) > 0 { + request.Headers[OssClientSideEncryptionMatDesc] = cd.MatDesc + } + + if cseContext.DataSize > 0 { + request.Headers[OssClientSideEncryptionDataSize] = fmt.Sprint(cseContext.DataSize) + } + + request.Headers[OssClientSideEncryptionPartSize] = fmt.Sprint(cseContext.PartSize) + + // encrypted key + strEncryptedKey := base64.StdEncoding.EncodeToString(cd.EncryptedKey) + request.Headers[OssClientSideEncryptionKey] = strEncryptedKey + + // encrypted iv + strEncryptedIV := base64.StdEncoding.EncodeToString(cd.EncryptedIV) + request.Headers[OssClientSideEncryptionStart] = strEncryptedIV + + // wrap alg + request.Headers[OssClientSideEncryptionWrapAlg] = cd.WrapAlgorithm + + // cek alg + request.Headers[OssClientSideEncryptionCekAlg] = cd.CEKAlgorithm +} + +func isValidContentAlg(algName string) bool { + // now content encyrption only support aec/ctr algorithm + return algName == crypto.AesCtrAlgorithm +} + +func adjustRangeStart(start, align int64) int64 { + return (start / align) * align +} + +func getEnvelopeFromHeader(header http.Header) (crypto.Envelope, error) { + var envelope crypto.Envelope + envelope.IV = header.Get(OssClientSideEncryptionStart) + decodedIV, err := base64.StdEncoding.DecodeString(envelope.IV) + if err != nil { + return envelope, err + } + envelope.IV = string(decodedIV) + + envelope.CipherKey = header.Get(OssClientSideEncryptionKey) + decodedKey, err := base64.StdEncoding.DecodeString(envelope.CipherKey) + if err != nil { + return envelope, err + } + envelope.CipherKey = string(decodedKey) + envelope.MatDesc = header.Get(OssClientSideEncryptionMatDesc) + envelope.WrapAlg = header.Get(OssClientSideEncryptionWrapAlg) + envelope.CEKAlg = header.Get(OssClientSideEncryptionCekAlg) + envelope.UnencryptedMD5 = header.Get(OssClientSideEncryptionUnencryptedContentMD5) + envelope.UnencryptedContentLen = header.Get(OssClientSideEncryptionUnencryptedContentLength) + return envelope, err +} + +func getEnvelopeFromListParts(result *ListPartsResult) (crypto.Envelope, error) { + var envelope crypto.Envelope + if result == nil { + return envelope, NewErrParamNull("result.*ListPartsResult") + } + envelope.IV = ToString(result.ClientEncryptionStart) + decodedIV, err := base64.StdEncoding.DecodeString(envelope.IV) + if err != nil { + return envelope, err + } + envelope.IV = string(decodedIV) + + envelope.CipherKey = ToString(result.ClientEncryptionKey) + decodedKey, err := base64.StdEncoding.DecodeString(envelope.CipherKey) + if err != nil { + return envelope, err + } + envelope.CipherKey = string(decodedKey) + envelope.WrapAlg = ToString(result.ClientEncryptionWrapAlg) + envelope.CEKAlg = ToString(result.ClientEncryptionCekAlg) + return envelope, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/endpoints.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/endpoints.go new file mode 100644 index 000000000..66f8ae8d0 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/endpoints.go @@ -0,0 +1,62 @@ +package oss + +import ( + "fmt" + "regexp" +) + +type EndpointType int + +const ( + // Access OSS over the public network, oss-[region].aliyuncs.com + EndpointPublic EndpointType = iota + + // Access OSS over the internal network, oss-[region]-internal.aliyuncs.com + EndpointInternal + + // Access OSS over the global acceleration endpoint, oss-accelerate.aliyuncs.com + EndpointAccelerate + + // Access OSS over the acceleration endpoint outside the Chinese mainland, oss-accelerate-overseas.aliyuncs.com + EndpointAccelerateOverseas + + // Access OSS over the dual stack endpoint that support both IPv4 and IPv6, [region].oss.aliyuncs.com + EndpointDualStack +) + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +func addEndpointScheme(endpoint string, disableSSL bool) string { + if endpoint != "" && !schemeRE.MatchString(endpoint) { + scheme := DefaultEndpointScheme + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + return endpoint +} + +func endpointFromRegion(region string, disableSSL bool, etype EndpointType) string { + scheme := DefaultEndpointScheme + if disableSSL { + scheme = "http" + } + + var endpoint string + switch etype { + case EndpointInternal: + endpoint = fmt.Sprintf("oss-%s-internal.aliyuncs.com", region) + case EndpointDualStack: + endpoint = fmt.Sprintf("%s.oss.aliyuncs.com", region) + case EndpointAccelerate: + endpoint = "oss-accelerate.aliyuncs.com" + case EndpointAccelerateOverseas: + endpoint = "oss-accelerate-overseas.aliyuncs.com" + default: + endpoint = fmt.Sprintf("oss-%s.aliyuncs.com", region) + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + + return endpoint +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/enums.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/enums.go new file mode 100644 index 000000000..e7b2cb4c8 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/enums.go @@ -0,0 +1,344 @@ +package oss + +// BucketACLType The access control list (ACL) of the bucket +type BucketACLType string + +// Enum values for BucketACLType +const ( + // BucketACLPrivate Only the bucket owner can perform read and write operations on objects in the bucket. + // Other users cannot access the objects in the bucket. + BucketACLPrivate BucketACLType = "private" + + // BucketACLPublicRead Only the bucket owner can write data to objects in the bucket. + // Other users, including anonymous users, can only read objects in the bucket. + BucketACLPublicRead BucketACLType = "public-read" + + // BucketACLPublicReadWrite All users, including anonymous users, can perform read and write operations on the bucket. + BucketACLPublicReadWrite BucketACLType = "public-read-write" +) + +// StorageClassType The storage class of the bucket +type StorageClassType string + +// Enum values for StorageClassType +const ( + // StorageClassStandard Standard provides highly reliable, highly available, + // and high-performance object storage for data that is frequently accessed. + StorageClassStandard StorageClassType = "Standard" + + // StorageClassIA IA provides highly durable storage at lower prices compared with Standard. + // IA has a minimum billable size of 64 KB and a minimum billable storage duration of 30 days. + StorageClassIA StorageClassType = "IA" + + // StorageClassArchive Archive provides high-durability storage at lower prices compared with Standard and IA. + // Archive has a minimum billable size of 64 KB and a minimum billable storage duration of 60 days. + StorageClassArchive StorageClassType = "Archive" + + // StorageClassColdArchive Cold Archive provides highly durable storage at lower prices compared with Archive. + // Cold Archive has a minimum billable size of 64 KB and a minimum billable storage duration of 180 days. + StorageClassColdArchive StorageClassType = "ColdArchive" + + // StorageClassDeepColdArchive Deep Cold Archive provides highly durable storage at lower prices compared with Cold Archive. + // Deep Cold Archive has a minimum billable size of 64 KB and a minimum billable storage duration of 180 days. + StorageClassDeepColdArchive StorageClassType = "DeepColdArchive" +) + +// DataRedundancyType The redundancy type of the bucket +type DataRedundancyType string + +// Enum values for BucketACLType +const ( + // DataRedundancyLRS Locally redundant storage (LRS) stores copies of each object across different devices in the same zone. + // This ensures data reliability and availability even if two storage devices are damaged at the same time. + DataRedundancyLRS DataRedundancyType = "LRS" + + // DataRedundancyZRS Zone-redundant storage (ZRS) uses the multi-zone mechanism to distribute user data across + // multiple zones in the same region. If one zone becomes unavailable, you can continue to access the data + // that is stored in other zones. + DataRedundancyZRS DataRedundancyType = "ZRS" +) + +// ObjectACLType The access control list (ACL) of the object +type ObjectACLType string + +// Enum values for ObjectACLType +const ( + // ObjectACLPrivate Only the object owner is allowed to perform read and write operations on the object. + // Other users cannot access the object. + ObjectACLPrivate ObjectACLType = "private" + + // ObjectACLPublicRead Only the object owner can write data to the object. + // Other users, including anonymous users, can only read the object. + ObjectACLPublicRead ObjectACLType = "public-read" + + // ObjectACLPublicReadWrite All users, including anonymous users, can perform read and write operations on the object. + ObjectACLPublicReadWrite ObjectACLType = "public-read-write" + + // ObjectACLDefault The ACL of the object is the same as that of the bucket in which the object is stored. + ObjectACLDefault ObjectACLType = "default" +) + +// VersioningStatusType bucket versioning status +type VersioningStatusType string + +const ( + // VersionEnabled Versioning Status definition: Enabled + VersionEnabled VersioningStatusType = "Enabled" + + // VersionSuspended Versioning Status definition: Suspended + VersionSuspended VersioningStatusType = "Suspended" +) + +// PayerType the type of request payer +type PayerType string + +const ( + // Requester the requester who send the request + Requester PayerType = "Requester" + + // BucketOwner the requester who send the request + BucketOwner PayerType = "BucketOwner" +) + +// BucketWormStateType the type of bucket worm state +type BucketWormStateType string + +const ( + BucketWormStateInProgress BucketWormStateType = "InProgress" + BucketWormStateLocked BucketWormStateType = "Locked" +) + +// InventoryFormatType The format of exported inventory lists +type InventoryFormatType string + +// InventoryFormatCSV Enum values for InventoryFormatType +const ( + InventoryFormatCSV InventoryFormatType = "CSV" +) + +// InventoryFrequencyType The frequency at which inventory lists are exported +type InventoryFrequencyType string + +// Enum values for InventoryFrequencyType +const ( + InventoryFrequencyDaily InventoryFrequencyType = "Daily" + InventoryFrequencyWeekly InventoryFrequencyType = "Weekly" +) + +// InventoryOptionalFieldType The configuration fields that are included in inventory lists. +type InventoryOptionalFieldType string + +// Enum values for InventoryOptionalFieldType +const ( + InventoryOptionalFieldSize InventoryOptionalFieldType = "Size" + InventoryOptionalFieldLastModifiedDate InventoryOptionalFieldType = "LastModifiedDate" + InventoryOptionalFieldETag InventoryOptionalFieldType = "ETag" + InventoryOptionalFieldStorageClass InventoryOptionalFieldType = "StorageClass" + InventoryOptionalFieldIsMultipartUploaded InventoryOptionalFieldType = "IsMultipartUploaded" + InventoryOptionalFieldEncryptionStatus InventoryOptionalFieldType = "EncryptionStatus" + InventoryOptionalFieldTransitionTime InventoryOptionalFieldType = "TransitionTime" +) + +// AccessMonitorStatusType The type of access monitor status +type AccessMonitorStatusType string + +// Enum values for AccessMonitorStatusType +const ( + AccessMonitorStatusEnabled AccessMonitorStatusType = "Enabled" + AccessMonitorStatusDisabled AccessMonitorStatusType = "Disabled" +) + +type HistoricalObjectReplicationType string + +// Enum values for HistoricalObjectReplicationType +const ( + HistoricalObjectReplicationEnabled HistoricalObjectReplicationType = "enabled" + HistoricalObjectReplicationDisabled HistoricalObjectReplicationType = "disabled" +) + +type TransferTypeType string + +// Enum values for TransferTypeType +const ( + TransferTypeInternal TransferTypeType = "internal" + TransferTypeOssAcc TransferTypeType = "oss_acc" +) + +type StatusType string + +// Enum values for StatusType +const ( + StatusEnabled StatusType = "Enabled" + StatusDisabled StatusType = "Disabled" +) + +type MetaQueryOrderType string + +// Enum values for MetaQueryOrderType +const ( + MetaQueryOrderAsc MetaQueryOrderType = "asc" + MetaQueryOrderDesc MetaQueryOrderType = "desc" +) + +// OSS headers +const ( + HeaderOssPrefix string = "X-Oss-" + HeaderOssMetaPrefix = "X-Oss-Meta-" + HeaderOssACL = "X-Oss-Acl" + HeaderOssObjectACL = "X-Oss-Object-Acl" + HeaderOssObjectType = "X-Oss-Object-Type" + HeaderOssSecurityToken = "X-Oss-Security-Token" + HeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption" + HeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id" + HeaderOssServerSideDataEncryption = "X-Oss-Server-Side-Data-Encryption" + HeaderOssSSECAlgorithm = "X-Oss-Server-Side-Encryption-Customer-Algorithm" + HeaderOssSSECKey = "X-Oss-Server-Side-Encryption-Customer-Key" + HeaderOssSSECKeyMd5 = "X-Oss-Server-Side-Encryption-Customer-Key-MD5" + HeaderOssCopySource = "X-Oss-Copy-Source" + HeaderOssCopySourceRange = "X-Oss-Copy-Source-Range" + HeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match" + HeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match" + HeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since" + HeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since" + HeaderOssMetadataDirective = "X-Oss-Metadata-Directive" + HeaderOssNextAppendPosition = "X-Oss-Next-Append-Position" + HeaderOssRequestID = "X-Oss-Request-Id" + HeaderOssCRC64 = "X-Oss-Hash-Crc64ecma" + HeaderOssSymlinkTarget = "X-Oss-Symlink-Target" + HeaderOssStorageClass = "X-Oss-Storage-Class" + HeaderOssCallback = "X-Oss-Callback" + HeaderOssCallbackVar = "X-Oss-Callback-Var" + HeaderOssRequester = "X-Oss-Request-Payer" + HeaderOssTagging = "X-Oss-Tagging" + HeaderOssTaggingDirective = "X-Oss-Tagging-Directive" + HeaderOssTrafficLimit = "X-Oss-Traffic-Limit" + HeaderOssForbidOverWrite = "X-Oss-Forbid-Overwrite" + HeaderOssRangeBehavior = "X-Oss-Range-Behavior" + HeaderOssAllowSameActionOverLap = "X-Oss-Allow-Same-Action-Overlap" + HeaderOssDate = "X-Oss-Date" + HeaderOssContentSha256 = "X-Oss-Content-Sha256" + HeaderOssEC = "X-Oss-Ec" + HeaderOssERR = "X-Oss-Err" +) + +// OSS headers for client sider encryption +const ( + OssClientSideEncryptionKey string = "X-Oss-Meta-Client-Side-Encryption-Key" + OssClientSideEncryptionStart = "X-Oss-Meta-Client-Side-Encryption-Start" + OssClientSideEncryptionCekAlg = "X-Oss-Meta-Client-Side-Encryption-Cek-Alg" + OssClientSideEncryptionWrapAlg = "X-Oss-Meta-Client-Side-Encryption-Wrap-Alg" + OssClientSideEncryptionMatDesc = "X-Oss-Meta-Client-Side-Encryption-Matdesc" + OssClientSideEncryptionUnencryptedContentLength = "X-Oss-Meta-Client-Side-Encryption-Unencrypted-Content-Length" + OssClientSideEncryptionUnencryptedContentMD5 = "X-Oss-Meta-Client-Side-Encryption-Unencrypted-Content-Md5" + OssClientSideEncryptionDataSize = "X-Oss-Meta-Client-Side-Encryption-Data-Size" + OssClientSideEncryptionPartSize = "X-Oss-Meta-Client-Side-Encryption-Part-Size" +) + +// HTTP headers +const ( + HTTPHeaderAcceptEncoding string = "Accept-Encoding" + HTTPHeaderAuthorization = "Authorization" + HTTPHeaderCacheControl = "Cache-Control" + HTTPHeaderContentDisposition = "Content-Disposition" + HTTPHeaderContentEncoding = "Content-Encoding" + HTTPHeaderContentLength = "Content-Length" + HTTPHeaderContentMD5 = "Content-MD5" + HTTPHeaderContentType = "Content-Type" + HTTPHeaderContentLanguage = "Content-Language" + HTTPHeaderContentRange = "Content-Range" + HTTPHeaderDate = "Date" + HTTPHeaderETag = "ETag" + HTTPHeaderExpires = "Expires" + HTTPHeaderHost = "Host" + HTTPHeaderLastModified = "Last-Modified" + HTTPHeaderRange = "Range" + HTTPHeaderLocation = "Location" + HTTPHeaderUserAgent = "User-Agent" + HTTPHeaderIfModifiedSince = "If-Modified-Since" + HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since" + HTTPHeaderIfMatch = "If-Match" + HTTPHeaderIfNoneMatch = "If-None-Match" +) + +type UrlStyleType int + +const ( + UrlStyleVirtualHosted UrlStyleType = iota + UrlStylePath + UrlStyleCName +) + +func (f UrlStyleType) String() string { + switch f { + default: + return "virtual-hosted-style" + case UrlStylePath: + return "path-style" + case UrlStyleCName: + return "cname-style" + } +} + +type FeatureFlagsType int + +const ( + // FeatureCorrectClockSkew If the client time is different from server time by more than about 15 minutes, + // the requests your application makes will be signed with the incorrect time, and the server will reject them. + // The feature to help to identify this case, and SDK will correct for clock skew. + FeatureCorrectClockSkew FeatureFlagsType = 1 << iota + + FeatureEnableMD5 + + // FeatureAutoDetectMimeType Content-Type is automatically added based on the object name if not specified. + // This feature takes effect for PutObject, AppendObject and InitiateMultipartUpload + FeatureAutoDetectMimeType + + // FeatureEnableCRC64CheckUpload check data integrity of uploads via the crc64. + // This feature takes effect for PutObject, AppendObject, UploadPart, Uploader.UploadFrom and Uploader.UploadFile + FeatureEnableCRC64CheckUpload + + // FeatureEnableCRC64CheckDownload check data integrity of downloads via the crc64. + // This feature takes effect for Downloader.DownloadFile + FeatureEnableCRC64CheckDownload + + FeatureFlagsDefault = FeatureCorrectClockSkew + FeatureAutoDetectMimeType + + FeatureEnableCRC64CheckUpload + FeatureEnableCRC64CheckDownload +) + +type SignatureVersionType int + +const ( + SignatureVersionV1 SignatureVersionType = iota + SignatureVersionV4 +) + +func (f SignatureVersionType) String() string { + switch f { + case SignatureVersionV4: + return "OSS Signature Version 4" + default: + return "OSS Signature Version 1" + } +} + +type AuthMethodType int + +const ( + AuthMethodHeader AuthMethodType = iota + AuthMethodQuery +) + +func (f AuthMethodType) String() string { + switch f { + case AuthMethodQuery: + return "authentication in query" + default: + return "authentication in header" + } +} + +// OperationMetadata Keys +const ( + OpMetaKeyResponsHandler string = "opm-response-handler" + OpMetaKeyRequestBodyTracker string = "opm-request-body-tracker" +) diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/errors.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/errors.go new file mode 100644 index 000000000..a0b455835 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/errors.go @@ -0,0 +1,170 @@ +package oss + +import ( + "encoding/xml" + "fmt" + "net/http" + "strings" + "time" +) + +type ServiceError struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` + RequestID string `xml:"RequestId"` + EC string `xml:"EC"` + + StatusCode int + Snapshot []byte + Timestamp time.Time + RequestTarget string + Headers http.Header +} + +func (e *ServiceError) Error() string { + return fmt.Sprintf( + `Error returned by Service. +Http Status Code: %d. +Error Code: %s. +Request Id: %s. +Message: %s. +EC: %s. +Timestamp: %s. +Request Endpoint: %s.`, + e.StatusCode, e.Code, e.RequestID, e.Message, e.EC, e.Timestamp, e.RequestTarget) +} + +func (e *ServiceError) HttpStatusCode() int { + return e.StatusCode +} + +func (e *ServiceError) ErrorCode() string { + return e.Code +} + +type ClientError struct { + Code string + Message string + Err error +} + +func (e *ClientError) Unwrap() error { return e.Err } + +func (e *ClientError) Error() string { + return fmt.Sprintf("client error: %v, %v", e.Message, e.Err) +} + +type OperationError struct { + name string + err error +} + +func (e *OperationError) Operation() string { return e.name } + +func (e *OperationError) Unwrap() error { return e.err } + +func (e *OperationError) Error() string { + return fmt.Sprintf("operation error %s: %v", e.name, e.err) +} + +type DeserializationError struct { + Err error + Snapshot []byte +} + +func (e *DeserializationError) Error() string { + const msg = "deserialization failed" + if e.Err == nil { + return msg + } + return fmt.Sprintf("%s, %v", msg, e.Err) +} + +func (e *DeserializationError) Unwrap() error { return e.Err } + +type SerializationError struct { + Err error +} + +func (e *SerializationError) Error() string { + const msg = "serialization failed" + if e.Err == nil { + return msg + } + return fmt.Sprintf("%s: %v", msg, e.Err) +} + +func (e *SerializationError) Unwrap() error { return e.Err } + +type CanceledError struct { + Err error +} + +func (*CanceledError) CanceledError() bool { return true } + +func (e *CanceledError) Unwrap() error { + return e.Err +} + +func (e *CanceledError) Error() string { + return fmt.Sprintf("canceled, %v", e.Err) +} + +type InvalidParamError interface { + error + Field() string + SetContext(string) +} + +type invalidParamError struct { + context string + field string + reason string +} + +func (e invalidParamError) Error() string { + return fmt.Sprintf("%s, %s.", e.reason, e.Field()) +} + +func (e invalidParamError) Field() string { + sb := &strings.Builder{} + sb.WriteString(e.context) + if sb.Len() > 0 { + sb.WriteRune('.') + } + sb.WriteString(e.field) + return sb.String() +} + +func (e *invalidParamError) SetContext(ctx string) { + e.context = ctx +} + +func NewErrParamRequired(field string) InvalidParamError { + return &invalidParamError{ + field: field, + reason: fmt.Sprintf("missing required field"), + } +} + +func NewErrParamInvalid(field string) InvalidParamError { + return &invalidParamError{ + field: field, + reason: fmt.Sprintf("invalid field"), + } +} + +func NewErrParamNull(field string) InvalidParamError { + return &invalidParamError{ + field: field, + reason: fmt.Sprintf("null field"), + } +} + +func NewErrParamTypeNotSupport(field string) InvalidParamError { + return &invalidParamError{ + field: field, + reason: fmt.Sprintf("type not support"), + } +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/filelike.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/filelike.go new file mode 100644 index 000000000..f5f169f8d --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/filelike.go @@ -0,0 +1,795 @@ +package oss + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "os" + "runtime" + "strings" + "time" +) + +type OpenOptions struct { + Offset int64 + + VersionId *string + + EnablePrefetch bool + PrefetchNum int + ChunkSize int64 + + PrefetchThreshold int64 + RequestPayer *string + + OutOfOrderReadThreshold int64 +} + +type ReadOnlyFile struct { + client OpenFileAPIClient + context context.Context + + // object info + bucket string + key string + versionId *string + requestPayer *string + + // file info + sizeInBytes int64 + modTime string + etag string + headers http.Header + + // current read position + offset int64 + + // read + reader io.ReadCloser + readBufOffset int64 + + // prefetch + enablePrefetch bool + chunkSize int64 + prefetchNum int + prefetchThreshold int64 + + asyncReaders []*AsyncRangeReader + seqReadAmount int64 // number of sequential read + numOOORead int64 // number of out of order read + + closed bool // whether we have closed the file + + oooReadThreshold int64 +} + +// NewReadOnlyFile OpenFile opens the named file for reading. +// If successful, methods on the returned file can be used for reading. +func NewReadOnlyFile(ctx context.Context, c OpenFileAPIClient, bucket string, key string, optFns ...func(*OpenOptions)) (*ReadOnlyFile, error) { + options := OpenOptions{ + Offset: 0, + EnablePrefetch: false, + PrefetchNum: DefaultPrefetchNum, + ChunkSize: DefaultPrefetchChunkSize, + PrefetchThreshold: DefaultPrefetchThreshold, + OutOfOrderReadThreshold: DefaultOutOfOrderReadThreshold, + } + + for _, fn := range optFns { + fn(&options) + } + + if options.EnablePrefetch { + var chunkSize int64 + if options.ChunkSize > 0 { + chunkSize = (options.ChunkSize + AsyncReadeBufferSize - 1) / AsyncReadeBufferSize * AsyncReadeBufferSize + } else { + chunkSize = DefaultPrefetchChunkSize + } + options.ChunkSize = chunkSize + + if options.PrefetchNum <= 0 { + options.PrefetchNum = DefaultPrefetchNum + } + + if options.OutOfOrderReadThreshold <= int64(0) { + options.OutOfOrderReadThreshold = DefaultOutOfOrderReadThreshold + } + } + + f := &ReadOnlyFile{ + client: c, + context: ctx, + + bucket: bucket, + key: key, + versionId: options.VersionId, + requestPayer: options.RequestPayer, + + offset: options.Offset, + + enablePrefetch: options.EnablePrefetch, + prefetchNum: options.PrefetchNum, + chunkSize: options.ChunkSize, + prefetchThreshold: options.PrefetchThreshold, + oooReadThreshold: options.OutOfOrderReadThreshold, + } + + result, err := f.client.HeadObject(f.context, &HeadObjectRequest{ + Bucket: &f.bucket, + Key: &f.key, + VersionId: f.versionId, + RequestPayer: f.requestPayer, + }) + + if err != nil { + return nil, err + } + + //File info + f.sizeInBytes = result.ContentLength + f.modTime = result.Headers.Get(HTTPHeaderLastModified) + f.etag = result.Headers.Get(HTTPHeaderETag) + f.headers = result.Headers + + if f.sizeInBytes < 0 { + return nil, fmt.Errorf("file size is invaid, got %v", f.sizeInBytes) + } + + if f.offset > f.sizeInBytes { + return nil, fmt.Errorf("offset is unavailable, offset:%v, file size:%v", f.offset, f.sizeInBytes) + } + + return f, nil +} + +// Close closes the File. +func (f *ReadOnlyFile) Close() error { + if f == nil { + return os.ErrInvalid + } + return f.close() +} + +func (f *ReadOnlyFile) close() error { + if f.closed { + return nil + } + + if f.reader != nil { + f.reader.Close() + f.reader = nil + } + for _, reader := range f.asyncReaders { + reader.Close() + } + f.asyncReaders = nil + + f.closed = true + runtime.SetFinalizer(f, nil) + return nil +} + +// Read reads up to len(b) bytes from the File and stores them in b. +// It returns the number of bytes read and any error encountered. +// At end of file, Read returns 0, io.EOF. +func (f *ReadOnlyFile) Read(p []byte) (bytesRead int, err error) { + if err := f.checkValid("read"); err != nil { + return 0, err + } + n, e := f.read(p) + return n, f.wrapErr("read", e) +} + +func (f *ReadOnlyFile) read(p []byte) (bytesRead int, err error) { + defer func() { + f.offset += int64(bytesRead) + }() + nwant := len(p) + var nread int + for bytesRead < nwant && err == nil { + nread, err = f.readInternal(f.offset+int64(bytesRead), p[bytesRead:]) + if nread > 0 { + bytesRead += nread + } + } + return +} + +// Seek sets the offset for the next Read or Write on file to offset, interpreted +// according to whence: 0 means relative to the origin of the file, 1 means +// relative to the current offset, and 2 means relative to the end. +// It returns the new offset and an error. +func (f *ReadOnlyFile) Seek(offset int64, whence int) (int64, error) { + if err := f.checkValid("seek"); err != nil { + return 0, err + } + r, e := f.seek(offset, whence) + if e != nil { + return 0, f.wrapErr("seek", e) + } + return r, nil +} + +func (f *ReadOnlyFile) seek(offset int64, whence int) (int64, error) { + var abs int64 + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs = f.offset + offset + case io.SeekEnd: + abs = f.sizeInBytes + offset + default: + return 0, fmt.Errorf("invalid whence") + } + if abs < 0 { + return 0, fmt.Errorf("negative position") + } + if abs > f.sizeInBytes { + return offset - (abs - f.sizeInBytes), fmt.Errorf("offset is unavailable") + } + + f.offset = abs + + return abs, nil +} + +type fileInfo struct { + name string + size int64 + modTime time.Time + header http.Header +} + +func (fi *fileInfo) Name() string { return fi.name } +func (fi *fileInfo) Size() int64 { return fi.size } +func (fi *fileInfo) Mode() os.FileMode { return os.FileMode(0644) } +func (fi *fileInfo) ModTime() time.Time { return fi.modTime } +func (fi *fileInfo) IsDir() bool { return false } +func (fi *fileInfo) Sys() any { return fi.header } + +// Stat returns the FileInfo structure describing file. +func (f *ReadOnlyFile) Stat() (os.FileInfo, error) { + if err := f.checkValid("stat"); err != nil { + return nil, err + } + mtime, _ := http.ParseTime(f.modTime) + return &fileInfo{ + name: f.name(), + size: f.sizeInBytes, + modTime: mtime, + header: f.headers, + }, nil +} + +func (f *ReadOnlyFile) name() string { + var name string + if f.versionId != nil { + name = fmt.Sprintf("oss://%s/%s?versionId=%s", f.bucket, f.key, *f.versionId) + } else { + name = fmt.Sprintf("oss://%s/%s", f.bucket, f.key) + } + return name +} + +func (f *ReadOnlyFile) wrapErr(op string, err error) error { + if err == nil || err == io.EOF { + return err + } + return &os.PathError{Op: op, Path: f.name(), Err: err} +} + +func (f *ReadOnlyFile) checkValid(_ string) error { + if f == nil { + return os.ErrInvalid + } else if f.closed { + return os.ErrClosed + } + return nil +} + +func (f *ReadOnlyFile) readInternal(offset int64, p []byte) (bytesRead int, err error) { + defer func() { + if bytesRead > 0 { + f.readBufOffset += int64(bytesRead) + f.seqReadAmount += int64(bytesRead) + } + }() + + if offset >= f.sizeInBytes { + err = io.EOF + return + } + + if f.readBufOffset != offset { + f.readBufOffset = offset + f.seqReadAmount = 0 + + if f.reader != nil { + f.reader.Close() + f.reader = nil + } + + if f.asyncReaders != nil { + f.numOOORead++ + } + + for _, ar := range f.asyncReaders { + ar.Close() + } + f.asyncReaders = nil + } + + if f.enablePrefetch && f.seqReadAmount >= f.prefetchThreshold && f.numOOORead < f.oooReadThreshold { + //swith to async reader + if f.reader != nil { + f.reader.Close() + f.reader = nil + } + + err = f.prefetch(offset, len(p)) + if err == nil { + bytesRead, err = f.readFromPrefetcher(offset, p) + if err == nil { + return + } + } + + // fall back to read serially + f.seqReadAmount = 0 + for _, ar := range f.asyncReaders { + ar.Close() + } + f.asyncReaders = nil + } + + bytesRead, err = f.readDirect(offset, p) + return +} + +func (f *ReadOnlyFile) readDirect(offset int64, buf []byte) (bytesRead int, err error) { + if offset >= f.sizeInBytes { + return + } + + if f.reader == nil { + var result *GetObjectResult + result, err = f.client.GetObject(f.context, &GetObjectRequest{ + Bucket: Ptr(f.bucket), + Key: Ptr(f.key), + VersionId: f.versionId, + Range: Ptr(fmt.Sprintf("bytes=%d-", offset)), + RangeBehavior: Ptr("standard"), + RequestPayer: f.requestPayer, + }) + if err != nil { + return bytesRead, err + } + + if err = f.checkResultValid(offset, result.Headers); err != nil { + if result != nil { + result.Body.Close() + } + return bytesRead, err + } + + f.reader = result.Body + } + + bytesRead, err = f.reader.Read(buf) + if err != nil { + f.reader.Close() + f.reader = nil + err = nil + } + + return +} + +func (f *ReadOnlyFile) checkResultValid(offset int64, header http.Header) error { + modTime := header.Get(HTTPHeaderLastModified) + etag := header.Get(HTTPHeaderETag) + gotOffset, _ := parseOffsetAndSizeFromHeaders(header) + if gotOffset != offset { + return fmt.Errorf("Range get fail, expect offset:%v, got offset:%v", offset, gotOffset) + } + + if (modTime != "" && f.modTime != "" && modTime != f.modTime) || + (etag != "" && f.etag != "" && etag != f.etag) { + return fmt.Errorf("Source file is changed, origin info [%v,%v], new info [%v,%v]", + f.modTime, f.etag, modTime, etag) + } + + return nil +} + +func (f *ReadOnlyFile) readFromPrefetcher(offset int64, buf []byte) (bytesRead int, err error) { + var nread int + for len(f.asyncReaders) != 0 { + asyncReader := f.asyncReaders[0] + //check offset + if offset != asyncReader.Offset() { + return 0, errors.New("out of order") + } + nread, err = asyncReader.Read(buf) + bytesRead += nread + if err != nil { + if err == io.EOF { + //fmt.Printf("asyncReader done\n") + asyncReader.Close() + f.asyncReaders = f.asyncReaders[1:] + err = nil + } else { + return 0, err + } + } + buf = buf[nread:] + if len(buf) == 0 { + return + } + // Update offset for the next read + offset += int64(nread) + } + + return +} + +func (f *ReadOnlyFile) prefetch(offset int64, _ /*needAtLeast*/ int) (err error) { + off := offset + for _, ar := range f.asyncReaders { + off = ar.oriHttpRange.Offset + ar.oriHttpRange.Count + } + //fmt.Printf("prefetch:offset %v, needAtLeast:%v, off:%v\n", offset, needAtLeast, off) + for len(f.asyncReaders) < f.prefetchNum { + remaining := f.sizeInBytes - off + size := minInt64(remaining, f.chunkSize) + cnt := (size + (AsyncReadeBufferSize - 1)) / AsyncReadeBufferSize + //fmt.Printf("f.sizeInBytes:%v, off:%v, size:%v, cnt:%v\n", f.sizeInBytes, off, size, cnt) + //NewAsyncRangeReader support softStartInitial, add more buffer count to prevent connections from not being released + if size > softStartInitial { + acnt := (AsyncReadeBufferSize+(softStartInitial-1))/softStartInitial - 1 + cnt += int64(acnt) + } + if size != 0 { + getFn := func(ctx context.Context, httpRange HTTPRange) (output *ReaderRangeGetOutput, err error) { + request := &GetObjectRequest{ + Bucket: Ptr(f.bucket), + Key: Ptr(f.key), + VersionId: f.versionId, + RequestPayer: f.requestPayer, + } + rangeStr := httpRange.FormatHTTPRange() + if rangeStr != nil { + request.Range = rangeStr + request.RangeBehavior = Ptr("standard") + } + var result *GetObjectResult + result, err = f.client.GetObject(f.context, request) + if err != nil { + return nil, err + } + + return &ReaderRangeGetOutput{ + Body: result.Body, + ETag: result.ETag, + ContentLength: result.ContentLength, + ContentRange: result.ContentRange, + }, nil + //fmt.Printf("result.Headers:%#v\n", result.Headers) + } + ar, err := NewAsyncRangeReader(f.context, getFn, &HTTPRange{off, size}, f.etag, int(cnt)) + if err != nil { + break + } + f.asyncReaders = append(f.asyncReaders, ar) + off += size + } + + if size != f.chunkSize { + break + } + } + return nil +} + +type AppendOptions struct { + // To indicate that the requester is aware that the request and data download will incur costs + RequestPayer *string + + // The parameters when the object is first generated, supports below + // CacheControl, ContentEncoding, Expires, ContentType, ContentType, Metadata + // SSE's parameters, Acl, StorageClass, Tagging + // If the object exists, ignore this parameters + CreateParameter *AppendObjectRequest +} + +type AppendOnlyFile struct { + client AppendFileAPIClient + context context.Context + + // object info + bucket string + key string + requestPayer *string + + info os.FileInfo + + created bool + createParam *AppendObjectRequest + + // current write position + offset int64 + hashCRC64 *string + + closed bool +} + +// NewAppendFile AppendFile opens or creates the named file for appending. +// If successful, methods on the returned file can be used for appending. +func NewAppendFile(ctx context.Context, c AppendFileAPIClient, bucket string, key string, optFns ...func(*AppendOptions)) (*AppendOnlyFile, error) { + options := AppendOptions{} + + for _, fn := range optFns { + fn(&options) + } + + f := &AppendOnlyFile{ + client: c, + context: ctx, + + bucket: bucket, + key: key, + requestPayer: options.RequestPayer, + + created: false, + createParam: options.CreateParameter, + } + + result, err := f.client.HeadObject(f.context, &HeadObjectRequest{Bucket: &f.bucket, Key: &f.key, RequestPayer: f.requestPayer}) + if err != nil { + var serr *ServiceError + if errors.As(err, &serr) && serr.StatusCode == 404 { + // not found + } else { + return nil, err + } + } else { + if !strings.EqualFold(ToString(result.ObjectType), "Appendable") { + return nil, errors.New("Not a appendable file") + } + f.offset = result.ContentLength + f.hashCRC64 = result.HashCRC64 + f.created = true + } + + return f, nil +} + +// Write writes len(b) bytes from b to the AppendOnlyFile. +// It returns the number of bytes written and an error, if any. +// Write returns a non-nil error when n != len(b). +func (f *AppendOnlyFile) Write(b []byte) (n int, err error) { + if err := f.checkValid("write"); err != nil { + return 0, err + } + + n, e := f.write(b) + if n < 0 { + n = 0 + } + + if e == nil && n != len(b) { + err = io.ErrShortWrite + } + + if e != nil { + err = f.wrapErr("write", e) + } + + return n, err +} + +// write writes len(b) bytes to the File. +// It returns the number of bytes written and an error, if any. +func (f *AppendOnlyFile) write(b []byte) (n int, err error) { + offset := f.offset + + request := &AppendObjectRequest{ + Bucket: &f.bucket, + Key: &f.key, + Position: Ptr(f.offset), + Body: bytes.NewReader(b), + InitHashCRC64: f.hashCRC64, + RequestPayer: f.requestPayer, + } + + f.applyCreateParamIfNeed(request) + + if f.offset == 0 { + request.InitHashCRC64 = Ptr("0") + } + + var result *AppendObjectResult + if result, err = f.client.AppendObject(f.context, request); err == nil { + f.offset = result.NextPosition + f.hashCRC64 = result.HashCRC64 + f.created = true + } else { + var serr *ServiceError + if errors.As(err, &serr) && serr.Code == "PositionNotEqualToLength" { + if position, hashcrc, ok := f.nextAppendPosition(); ok { + if offset+int64(len(b)) == position { + f.offset = position + f.hashCRC64 = hashcrc + f.created = true + err = nil + } + } + } + } + + return int(f.offset - offset), err +} + +// WriteFrom writes io.Reader to the File. +// It returns the number of bytes written and an error, if any. +func (f *AppendOnlyFile) WriteFrom(r io.Reader) (n int64, err error) { + if err := f.checkValid("write"); err != nil { + return 0, err + } + + n, err = f.writeFrom(r) + + if err != nil { + err = f.wrapErr("write", err) + } + + return n, err +} + +func (f *AppendOnlyFile) writeFrom(r io.Reader) (n int64, err error) { + offset := f.offset + + request := &AppendObjectRequest{ + Bucket: &f.bucket, + Key: &f.key, + Position: Ptr(f.offset), + Body: r, + RequestPayer: f.requestPayer, + } + + f.applyCreateParamIfNeed(request) + + var roffset int64 + var rs io.Seeker + rs, ok := r.(io.Seeker) + if ok { + roffset, _ = rs.Seek(0, io.SeekCurrent) + } + + var result *AppendObjectResult + if result, err = f.client.AppendObject(f.context, request); err == nil { + f.offset = result.NextPosition + f.hashCRC64 = result.HashCRC64 + f.created = true + } else { + var serr *ServiceError + if errors.As(err, &serr) && serr.Code == "PositionNotEqualToLength" { + if position, hashcrc, ok := f.nextAppendPosition(); ok { + if rs != nil { + if curr, e := rs.Seek(0, io.SeekCurrent); e == nil { + if offset+(curr-roffset) == position { + f.offset = position + f.hashCRC64 = hashcrc + f.created = true + err = nil + } + } + } + } + } + } + + return f.offset - offset, err +} + +func (f *AppendOnlyFile) nextAppendPosition() (int64, *string, bool) { + if h, e := f.client.HeadObject(f.context, &HeadObjectRequest{Bucket: &f.bucket, Key: &f.key, RequestPayer: f.requestPayer}); e == nil { + return h.ContentLength, h.HashCRC64, true + } + return 0, nil, false +} + +func (f *AppendOnlyFile) applyCreateParamIfNeed(request *AppendObjectRequest) { + if f.created || f.createParam == nil { + return + } + + if len(f.createParam.Acl) > 0 { + request.Acl = f.createParam.Acl + } + if len(f.createParam.StorageClass) > 0 { + request.StorageClass = f.createParam.StorageClass + } + request.CacheControl = f.createParam.CacheControl + request.ContentDisposition = f.createParam.ContentDisposition + request.ContentEncoding = f.createParam.ContentEncoding + request.Expires = f.createParam.Expires + request.ContentType = f.createParam.ContentType + request.ServerSideEncryption = f.createParam.ServerSideEncryption + request.ServerSideDataEncryption = f.createParam.ServerSideDataEncryption + request.ServerSideEncryptionKeyId = f.createParam.ServerSideEncryptionKeyId + request.Metadata = f.createParam.Metadata + request.Tagging = f.createParam.Tagging +} + +// wrapErr wraps an error that occurred during an operation on an open file. +// It passes io.EOF through unchanged, otherwise converts +// Wraps the error in a PathError. +func (f *AppendOnlyFile) wrapErr(op string, err error) error { + if err == nil || err == io.EOF { + return err + } + return &os.PathError{Op: op, Path: f.name(), Err: err} +} + +func (f *AppendOnlyFile) checkValid(_ string) error { + if f == nil { + return os.ErrInvalid + } else if f.closed { + return os.ErrClosed + } + return nil +} + +func (f *AppendOnlyFile) name() string { + return fmt.Sprintf("oss://%s/%s", f.bucket, f.key) +} + +// Stat returns the FileInfo structure describing file. +func (f *AppendOnlyFile) Stat() (os.FileInfo, error) { + if err := f.checkValid("stat"); err != nil { + return nil, err + } + + info, err := f.stat() + + if err != nil { + return nil, f.wrapErr("stat", err) + } + + return info, nil +} + +func (f *AppendOnlyFile) stat() (os.FileInfo, error) { + var err error + if f.info == nil || f.info.Size() != f.offset { + f.info = nil + if result, err := f.client.HeadObject(f.context, &HeadObjectRequest{Bucket: &f.bucket, Key: &f.key, RequestPayer: f.requestPayer}); err == nil { + f.info = &fileInfo{ + name: f.name(), + size: result.ContentLength, + modTime: ToTime(result.LastModified), + header: result.Headers, + } + } + } + return f.info, err +} + +// Close closes the File. +func (f *AppendOnlyFile) Close() error { + if f == nil { + return os.ErrInvalid + } + return f.close() +} + +func (f *AppendOnlyFile) close() error { + f.closed = true + return nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/from_ptr.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/from_ptr.go new file mode 100644 index 000000000..3059bf1cd --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/from_ptr.go @@ -0,0 +1,63 @@ +package oss + +import "time" + +// ToBool returns bool value if the pointer is not nil. +// Returns a bool zero value if the pointer is nil. +func ToBool(p *bool) (v bool) { + if p == nil { + return v + } + + return *p +} + +// ToInt returns int value if the pointer is not nil. +// Returns a int zero value if the pointer is nil. +func ToInt(p *int) (v int) { + if p == nil { + return v + } + + return *p +} + +// ToInt64 returns int value if the pointer is not nil. +// Returns a int64 zero value if the pointer is nil. +func ToInt64(p *int64) (v int64) { + if p == nil { + return v + } + + return *p +} + +// ToString returns bool value if the pointer is not nil. +// Returns a string zero value if the pointer is nil. +func ToString(p *string) (v string) { + if p == nil { + return v + } + + return *p +} + +// ToTime returns time.Time value if the pointer is not nil. +// Returns a time.Time zero value if the pointer is nil. +func ToTime(p *time.Time) (v time.Time) { + if p == nil { + return v + } + + return *p +} + +// ToDuration returns time.Duration value if the pointer is not nil. +// Returns a time.Duration zero value if the pointer is nil. +func ToDuration(p *time.Duration) (v time.Duration) { + if p == nil { + return v + } + + return *p +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/io_utils.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/io_utils.go new file mode 100644 index 000000000..f67d1cc21 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/io_utils.go @@ -0,0 +1,869 @@ +package oss + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + "time" +) + +type LimitedReadCloser struct { + *io.LimitedReader + io.Closer +} + +func NewLimitedReadCloser(rc io.ReadCloser, limit int64) io.ReadCloser { + if limit < 0 { + return rc + } + return &LimitedReadCloser{ + LimitedReader: &io.LimitedReader{R: rc, N: limit}, + Closer: rc, + } +} + +func ReadSeekNopCloser(r io.Reader) ReadSeekerNopClose { + return ReadSeekerNopClose{r} +} + +type ReadSeekerNopClose struct { + r io.Reader +} + +func (r ReadSeekerNopClose) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +func (r ReadSeekerNopClose) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +func (r ReadSeekerNopClose) Close() error { + return nil +} + +func (r ReadSeekerNopClose) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +func (r ReadSeekerNopClose) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +func (r ReadSeekerNopClose) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, io.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, io.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +func isReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReadSeekerNopClose: + return v.IsSeeker() + case *ReadSeekerNopClose: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +func GetReaderLen(r io.Reader) int64 { + type lenner interface { + Len() int + } + + if lr, ok := r.(lenner); ok { + return int64(lr.Len()) + } + + if s, ok := r.(io.Seeker); ok { + if l, err := seekerLen(s); err == nil { + return l + } + } + + return -1 +} + +type buffer struct { + buf []byte + err error + offset int +} + +func (b *buffer) isEmpty() bool { + if b == nil { + return true + } + if len(b.buf)-b.offset <= 0 { + return true + } + return false +} + +func (b *buffer) read(rd io.Reader) error { + var n int + n, b.err = readFill(rd, b.buf) + b.buf = b.buf[0:n] + b.offset = 0 + return b.err +} + +func (b *buffer) buffer() []byte { + return b.buf[b.offset:] +} + +func (b *buffer) increment(n int) { + b.offset += n +} + +const ( + AsyncReadeBufferSize = 1024 * 1024 + softStartInitial = 512 * 1024 +) + +type ReaderRangeGetOutput struct { + Body io.ReadCloser + ContentLength int64 + ContentRange *string + ETag *string + LastModified *time.Time +} + +type ReaderRangeGetFn func(context.Context, HTTPRange) (output *ReaderRangeGetOutput, err error) + +type AsyncRangeReader struct { + in io.ReadCloser // Input reader + ready chan *buffer // Buffers ready to be handed to the reader + token chan struct{} // Tokens which allow a buffer to be taken + exit chan struct{} // Closes when finished + buffers int // Number of buffers + err error // If an error has occurred it is here + cur *buffer // Current buffer being served + exited chan struct{} // Channel is closed been the async reader shuts down + size int // size of buffer to use + closed bool // whether we have closed the underlying stream + mu sync.Mutex // lock for Read/WriteTo/Abandon/Close + + //Range Getter + rangeGet ReaderRangeGetFn + httpRange HTTPRange + + // For reader + offset int64 + gotsize int64 + + oriHttpRange HTTPRange + + context context.Context + cancel context.CancelFunc + + // Origin file pattern + etag string + modTime string +} + +// NewAsyncRangeReader returns a reader that will asynchronously read from +// the Reader returued by getter from the given offset into a number of buffers each of size AsyncReadeBufferSize +// The input can be read from the returned reader. +// When done use Close to release the buffers and close the supplied input. +// The etag is used to identify the content of the object. If not set, the first ETag returned value will be used instead. +func NewAsyncRangeReader(ctx context.Context, + rangeGet ReaderRangeGetFn, httpRange *HTTPRange, etag string, buffers int) (*AsyncRangeReader, error) { + + if buffers <= 0 { + return nil, errors.New("number of buffers too small") + } + if rangeGet == nil { + return nil, errors.New("nil reader supplied") + } + + context, cancel := context.WithCancel(ctx) + + range_ := HTTPRange{} + if httpRange != nil { + range_ = *httpRange + } + + a := &AsyncRangeReader{ + rangeGet: rangeGet, + context: context, + cancel: cancel, + httpRange: range_, + oriHttpRange: range_, + offset: range_.Offset, + gotsize: 0, + etag: etag, + buffers: buffers, + } + + //fmt.Printf("NewAsyncRangeReader, range: %s, etag:%s, buffer count:%v\n", ToString(a.httpRange.FormatHTTPRange()), a.etag, a.buffers) + + a.init(buffers) + return a, nil +} + +func (a *AsyncRangeReader) init(buffers int) { + a.ready = make(chan *buffer, buffers) + a.token = make(chan struct{}, buffers) + a.exit = make(chan struct{}) + a.exited = make(chan struct{}) + a.buffers = buffers + a.cur = nil + a.size = softStartInitial + + // Create tokens + for i := 0; i < buffers; i++ { + a.token <- struct{}{} + } + + // Start async reader + go func() { + // Ensure that when we exit this is signalled. + defer close(a.exited) + defer close(a.ready) + for { + select { + case <-a.token: + b := a.getBuffer() + if a.size < AsyncReadeBufferSize { + b.buf = b.buf[:a.size] + a.size <<= 1 + } + + if a.httpRange.Count > 0 && a.gotsize > a.httpRange.Count { + b.buf = b.buf[0:0] + b.err = io.EOF + //fmt.Printf("a.gotsize > a.httpRange.Count, err:%v\n", b.err) + a.ready <- b + return + } + + if a.in == nil { + httpRangeRemains := a.httpRange + if a.httpRange.Count > 0 { + gotNum := a.httpRange.Offset - a.oriHttpRange.Offset + if gotNum > 0 && a.httpRange.Count > gotNum { + httpRangeRemains.Count = a.httpRange.Count - gotNum + } + } + output, err := a.rangeGet(a.context, httpRangeRemains) + if err == nil { + etag := ToString(output.ETag) + if a.etag == "" { + a.etag = etag + } + if etag != a.etag { + err = fmt.Errorf("Source file is changed, expect etag:%s ,got etag:%s", a.etag, etag) + } + + // Partial Response check + var off int64 + if output.ContentRange == nil { + off = 0 + } else { + off, _, _, _ = ParseContentRange(*output.ContentRange) + } + if off != httpRangeRemains.Offset { + err = fmt.Errorf("Range get fail, expect offset:%v, got offset:%v", httpRangeRemains.Offset, off) + } + } + if err != nil { + b.buf = b.buf[0:0] + b.err = err + if output != nil && output.Body != nil { + output.Body.Close() + } + //fmt.Printf("call getFunc fail, err:%v\n", err) + a.ready <- b + return + } + body := output.Body + if httpRangeRemains.Count > 0 { + body = NewLimitedReadCloser(output.Body, httpRangeRemains.Count) + } + a.in = body + //fmt.Printf("call getFunc done, range:%s\n", ToString(httpRangeRemains.FormatHTTPRange())) + } + + // ignore err from read + err := b.read(a.in) + a.httpRange.Offset += int64(len(b.buf)) + a.gotsize += int64(len(b.buf)) + if err != io.EOF { + b.err = nil + } + //fmt.Printf("read into buffer, size:%v, next begin:%v, err:%v\n", len(b.buf), a.httpRange.Offset, err) + a.ready <- b + if err != nil { + a.in.Close() + a.in = nil + if err == io.EOF { + return + } + } + case <-a.exit: + return + } + } + }() +} + +func (a *AsyncRangeReader) fill() (err error) { + if a.cur.isEmpty() { + if a.cur != nil { + a.putBuffer(a.cur) + a.token <- struct{}{} + a.cur = nil + } + b, ok := <-a.ready + if !ok { + // Return an error to show fill failed + if a.err == nil { + return errors.New("stream abandoned") + } + return a.err + } + a.cur = b + } + return nil +} + +// Read will return the next available data. +func (a *AsyncRangeReader) Read(p []byte) (n int, err error) { + a.mu.Lock() + defer a.mu.Unlock() + defer func() { + a.offset += int64(n) + }() + + // Swap buffer and maybe return error + err = a.fill() + if err != nil { + return 0, err + } + + // Copy what we can + n = copy(p, a.cur.buffer()) + a.cur.increment(n) + + // If at end of buffer, return any error, if present + if a.cur.isEmpty() { + a.err = a.cur.err + return n, a.err + } + return n, nil +} + +func (a *AsyncRangeReader) Offset() int64 { + return a.offset +} + +func (a *AsyncRangeReader) Close() (err error) { + a.abandon() + if a.closed { + return nil + } + a.closed = true + + if a.in != nil { + err = a.in.Close() + } + return +} + +func (a *AsyncRangeReader) abandon() { + a.stop() + a.mu.Lock() + defer a.mu.Unlock() + if a.cur != nil { + a.putBuffer(a.cur) + a.cur = nil + } + for b := range a.ready { + a.putBuffer(b) + } +} + +func (a *AsyncRangeReader) stop() { + select { + case <-a.exit: + return + default: + } + a.cancel() + close(a.exit) + <-a.exited +} + +// bufferPool is a global pool of buffers +var bufferPool *sync.Pool +var bufferPoolOnce sync.Once + +// TODO use pool +func (a *AsyncRangeReader) putBuffer(b *buffer) { + b.buf = b.buf[0:cap(b.buf)] + bufferPool.Put(b.buf) +} + +func (a *AsyncRangeReader) getBuffer() *buffer { + bufferPoolOnce.Do(func() { + // Initialise the buffer pool when used + bufferPool = &sync.Pool{ + New: func() any { + //fmt.Printf("make([]byte, BufferSize)\n") + return make([]byte, AsyncReadeBufferSize) + }, + } + }) + return &buffer{ + buf: bufferPool.Get().([]byte), + } +} + +func readFill(r io.Reader, buf []byte) (n int, err error) { + var nn int + for n < len(buf) && err == nil { + nn, err = r.Read(buf[n:]) + n += nn + } + return n, err +} + +// MultiBytesReader A Reader implements the io.Reader, io.Seeker interfaces by reading from multi byte slice. +type MultiBytesReader struct { + s [][]byte + i int64 // current reading index + size int64 + rbuf int + rp int +} + +// Len returns the number of bytes of the unread portion of the slice. +func (r *MultiBytesReader) Len() int { + if r.i >= r.size { + return 0 + } + return int(r.size - r.i) +} + +// Size returns the original length of the underlying byte slice. +func (r *MultiBytesReader) Size() int64 { return r.size } + +// Read implements the io.Reader interface. +func (r *MultiBytesReader) Read(b []byte) (n int, err error) { + if r.i >= r.size { + return 0, io.EOF + } + + var nn int + for n < len(b) && err == nil { + nn, err = r.read(b[n:]) + n += nn + } + + if err == io.EOF { + err = nil + } + + return n, err +} + +func (r *MultiBytesReader) read(b []byte) (n int, err error) { + if r.i >= r.size { + return 0, io.EOF + } + + //if r.rp == cap(r.s[r.rbuf]) { + if r.rp == len(r.s[r.rbuf]) { + r.rbuf++ + r.rp = 0 + } + + if r.rbuf == len(r.s) { + err = io.EOF + return + } else if r.rbuf > len(r.s) { + return 0, fmt.Errorf("read overflow, rbuf:%d, buf len%d", r.rbuf, len(r.s)) + } + + n = copy(b, r.s[r.rbuf][r.rp:]) + r.rp += n + r.i += int64(n) + + return +} + +// Seek implements the io.Seeker interface. +func (r *MultiBytesReader) Seek(offset int64, whence int) (int64, error) { + var abs int64 + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs = r.i + offset + case io.SeekEnd: + abs = r.size + offset + default: + return 0, errors.New("MultiSliceReader.Seek: invalid whence") + } + if abs < 0 { + return 0, errors.New("MultiSliceReader.Seek: negative position") + } + r.i = abs + r.updateRp() + return abs, nil +} + +// Reset resets the Reader to be reading from b. +func (r *MultiBytesReader) Reset(b [][]byte) { + n := MultiBytesReader{ + s: b, + i: 0, + } + n.size = int64(r.calcSize(n.s)) + n.updateRp() + *r = n +} + +func (r *MultiBytesReader) calcSize(b [][]byte) int { + size := 0 + for i := 0; i < len(b); i++ { + size += len(r.s[i]) + } + return size +} + +func (r *MultiBytesReader) updateRp() { + remains := r.i + rbuf := 0 + for remains > 0 && rbuf < len(r.s) { + slen := int64(len(r.s[rbuf])) + if remains < slen { + break + } + rbuf++ + remains -= slen + } + r.rbuf = rbuf + r.rp = int(remains) +} + +// NewReader returns a new Reader reading from b. +func NewMultiBytesReader(b [][]byte) *MultiBytesReader { + r := &MultiBytesReader{ + s: b, + i: 0, + } + r.size = int64(r.calcSize(r.s)) + r.updateRp() + return r +} + +type RangeReader struct { + in io.ReadCloser // Input reader + closed bool // whether we have closed the underlying stream + + //Range Getter + rangeGet ReaderRangeGetFn + httpRange HTTPRange + + // For reader + offset int64 + + oriHttpRange HTTPRange + + context context.Context + + // Origin file pattern + etag string + modTime *time.Time + totalSize int64 +} + +// NewRangeReader returns a reader that will read from the Reader returued by getter from the given offset. +// The etag is used to identify the content of the object. If not set, the first ETag returned value will be used instead. +func NewRangeReader(ctx context.Context, rangeGet ReaderRangeGetFn, httpRange *HTTPRange, etag string) (*RangeReader, error) { + if rangeGet == nil { + return nil, errors.New("nil reader supplied") + } + + range_ := HTTPRange{} + if httpRange != nil { + range_ = *httpRange + } + + a := &RangeReader{ + rangeGet: rangeGet, + context: ctx, + httpRange: range_, + oriHttpRange: range_, + offset: range_.Offset, + etag: etag, + } + + //fmt.Printf("NewRangeReader, range: %s, etag:%s\n", ToString(a.httpRange.FormatHTTPRange()), a.etag) + + return a, nil +} + +// Read will return the next available data. +func (r *RangeReader) Read(p []byte) (n int, err error) { + defer func() { + r.offset += int64(n) + r.httpRange.Offset += int64(n) + }() + n, err = r.read(p) + return +} + +func (r *RangeReader) read(p []byte) (int, error) { + if r.closed { + return 0, fmt.Errorf("RangeReader is closed") + } + + // open stream + if r.in == nil { + httpRangeRemains := r.httpRange + if r.httpRange.Count > 0 { + gotNum := r.httpRange.Offset - r.oriHttpRange.Offset + if gotNum > 0 && r.httpRange.Count > gotNum { + httpRangeRemains.Count = r.httpRange.Count - gotNum + } + } + output, err := r.rangeGet(r.context, httpRangeRemains) + if err == nil { + etag := ToString(output.ETag) + if r.etag == "" { + r.etag = etag + r.modTime = output.LastModified + } + if etag != r.etag { + err = fmt.Errorf("Source file is changed, expect etag:%s ,got etag:%s", r.etag, etag) + } + + // Partial Response check + var off int64 + if output.ContentRange == nil { + off = 0 + r.totalSize = output.ContentLength + } else { + off, _, r.totalSize, _ = ParseContentRange(*output.ContentRange) + } + if off != httpRangeRemains.Offset { + err = fmt.Errorf("Range get fail, expect offset:%v, got offset:%v", httpRangeRemains.Offset, off) + } + } + if err != nil { + if output != nil && output.Body != nil { + output.Body.Close() + } + return 0, err + } + body := output.Body + if httpRangeRemains.Count > 0 { + body = NewLimitedReadCloser(output.Body, httpRangeRemains.Count) + } + r.in = body + } + + // read from stream + // ignore error when reading from stream + n, err := r.in.Read(p) + if err != nil && err != io.EOF { + r.in.Close() + r.in = nil + err = nil + } + + return n, err +} + +func (r *RangeReader) Offset() int64 { + return r.offset +} + +func (r *RangeReader) Close() (err error) { + if r.closed { + return nil + } + r.closed = true + + if r.in != nil { + err = r.in.Close() + } + return +} + +// TeeReadNopCloser returns a Reader that writes to w what it reads from r. +// All reads from r performed through it are matched with +// corresponding writes to w. There is no internal buffering - +// the write must complete before the read completes. +// Any error encountered while writing is reported as a read error. +func TeeReadNopCloser(reader io.Reader, writers ...io.Writer) io.ReadCloser { + return &teeReadNopCloser{ + reader: reader, + writers: writers, + mark: -1, + } +} + +type teeReadNopCloser struct { + reader io.Reader + writers []io.Writer + mark int64 +} + +func (t *teeReadNopCloser) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if n > 0 { + for _, w := range t.writers { + if nn, err := w.Write(p[:n]); err != nil { + return nn, err + } + } + } + return +} + +func (t *teeReadNopCloser) Seek(offset int64, whence int) (int64, error) { + switch t := t.reader.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +func (t *teeReadNopCloser) Close() error { + return nil +} + +// IsSeekable tests if this reader supports Seek method. +func (t *teeReadNopCloser) IsSeekable() bool { + _, ok := t.reader.(io.Seeker) + return ok +} + +// MarkSupported tests if this reader supports the Mark and Reset methods. +func (t *teeReadNopCloser) MarkSupported() bool { + return t.IsSeekable() +} + +// Mark marks the current position in this reader. A subsequent call to +// the Reset method repositions this reader at the last marked position +// so that subsequent reads re-read the same bytes. +func (t *teeReadNopCloser) Mark() { + if s, ok := t.reader.(io.Seeker); ok { + if pos, err := s.Seek(0, io.SeekCurrent); err == nil { + t.mark = pos + } + } +} + +// Reset repositions this stream to the position at the time +// the Mark method was last called on this reader. +func (t *teeReadNopCloser) Reset() error { + if !t.MarkSupported() { + return fmt.Errorf("Mark/Reset not supported") + } + + if t.mark < 0 { + return fmt.Errorf("Mark is not called yet") + } + + // seek to the last marked position + if s, ok := t.reader.(io.Seeker); ok { + if _, err := s.Seek(t.mark, io.SeekStart); err != nil { + return err + } + } + + // reset writer + type reseter interface { + Reset() + } + + for _, w := range t.writers { + if rw, ok := w.(reseter); ok { + rw.Reset() + } + } + + return nil +} + +type DiscardReadCloser struct { + RC io.ReadCloser + Discard int +} + +func (drc *DiscardReadCloser) Read(b []byte) (int, error) { + n, err := drc.RC.Read(b) + if drc.Discard == 0 || n <= 0 { + return n, err + } + + if n <= drc.Discard { + drc.Discard -= n + return 0, err + } + + realLen := n - drc.Discard + copy(b[0:realLen], b[drc.Discard:n]) + drc.Discard = 0 + return realLen, err +} + +func (drc *DiscardReadCloser) Close() error { + closer, ok := drc.RC.(io.ReadCloser) + if ok { + return closer.Close() + } + return nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/limiter.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/limiter.go new file mode 100644 index 000000000..49a8f99d1 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/limiter.go @@ -0,0 +1,44 @@ +package oss + +import ( + "context" + "time" + + "golang.org/x/time/rate" +) + +const ( + BwTokenBucketSlotRx int = iota + BwTokenBucketSlotTx + BwTokenBucketSlots +) + +type BwTokenBucket struct { + // Byte/S + Bandwidth int64 + Limiter *rate.Limiter +} + +type BwTokenBuckets [BwTokenBucketSlots]*BwTokenBucket + +func newBwTokenBucket(bandwidth int64) *BwTokenBucket { + return &BwTokenBucket{ + Bandwidth: bandwidth, + Limiter: newEmptyTokenBucket(bandwidth), + } +} + +func newEmptyTokenBucket(bandwidth int64) *rate.Limiter { + const defaultMaxBurstSize = 4 * 1024 * 1024 + maxBurstSize := (bandwidth * defaultMaxBurstSize) / (256 * 1024 * 1024) + if maxBurstSize < defaultMaxBurstSize { + maxBurstSize = defaultMaxBurstSize + } + tb := rate.NewLimiter(rate.Limit(bandwidth), int(maxBurstSize)) + tb.AllowN(time.Now(), int(maxBurstSize)) + return tb +} + +func (tb *BwTokenBucket) LimitBandwidth(n int) { + tb.Limiter.WaitN(context.Background(), n) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/logger.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/logger.go new file mode 100644 index 000000000..e9a7f458c --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/logger.go @@ -0,0 +1,130 @@ +package oss + +import ( + "fmt" + "log" + "strings" +) + +// A LogPrinter is a interface for the SDK to log messages to. +type LogPrinter interface { + Print(...any) +} + +// A LogPrinterFunc is a convenience type to wrap it so the LogPrinter interface can be used. +type LogPrinterFunc func(...any) + +// Print calls the wrapped function with the arguments provided +func (f LogPrinterFunc) Print(v ...any) { + f(v...) +} + +// Define the level of the output log +const ( + LogOff = iota + LogError + LogWarn + LogInfo + LogDebug +) + +var logLevelTag = []string{"", "ERROR ", "WARNING ", "INFO ", "DEBUG "} + +// Logger interface to handle logging +type Logger interface { + Debugf(format string, v ...any) + Infof(format string, v ...any) + Warnf(format string, v ...any) + Errorf(format string, v ...any) + Level() int +} + +type nopLogger struct { +} + +func (*nopLogger) Debugf(_ string, _ ...any) {} +func (*nopLogger) Infof(_ string, _ ...any) {} +func (*nopLogger) Warnf(_ string, _ ...any) {} +func (*nopLogger) Errorf(_ string, _ ...any) {} +func (*nopLogger) Level() int { return LogOff } + +// NewLogger returns a Logger +func NewLogger(level int, printer LogPrinter) Logger { + if level <= LogOff { + return &nopLogger{} + } + + if printer == nil { + printer = LogPrinterFunc(func(v ...any) { + log.Print(v...) + }) + } + + return &standardLogger{ + level: level, + printer: printer, + } +} + +type standardLogger struct { + level int + printer LogPrinter +} + +func (l *standardLogger) printf(level int, format string, v ...any) { + if l.printer == nil { + return + } + l.printer.Print(logLevelTag[level], fmt.Sprintf(format, v...)) +} + +func (l *standardLogger) Debugf(format string, v ...any) { + if l.level < LogDebug { + return + } + l.printf(LogDebug, format, v...) +} + +func (l *standardLogger) Infof(format string, v ...any) { + if l.level < LogInfo { + return + } + l.printf(LogInfo, format, v...) +} + +func (l *standardLogger) Warnf(format string, v ...any) { + if l.level < LogWarn { + return + } + l.printf(LogWarn, format, v...) +} + +func (l *standardLogger) Errorf(format string, v ...any) { + if l.level < LogError { + return + } + l.printf(LogError, format, v...) +} + +func (l *standardLogger) Level() int { + return l.level +} + +func ToLogLevel(s string) int { + s = strings.ToLower(s) + switch s { + case "error", "err": + return LogError + case "warning", "warn": + return LogWarn + case "info": + return LogInfo + case "debug", "dbg": + return LogDebug + default: + return LogOff + } +} + +var _ Logger = (*nopLogger)(nil) +var _ Logger = (*standardLogger)(nil) diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/progress.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/progress.go new file mode 100644 index 000000000..e96b54407 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/progress.go @@ -0,0 +1,41 @@ +package oss + +import "io" + +type ProgressFunc func(increment, transferred, total int64) + +type progressTracker struct { + pr ProgressFunc + written int64 + lwritten int64 // last written + total int64 +} + +// NewProgress NewRequestProgress creates a tracker with progress reporting +func NewProgress(pr ProgressFunc, total int64) io.Writer { + return &progressTracker{ + pr: pr, + written: 0, + lwritten: 0, + total: total, + } +} + +func (p *progressTracker) Write(b []byte) (n int, err error) { + n = len(b) + p.written += int64(n) + + // Invokes the user's callback method to report progress + if p.written > p.lwritten { + p.pr(int64(n), p.written, p.total) + } + + return +} + +func (p *progressTracker) Reset() { + p.lwritten = p.written + p.written = 0 +} + +var _ RequestBodyTracker = (*progressTracker)(nil) diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/backoff.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/backoff.go new file mode 100644 index 000000000..550577e57 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/backoff.go @@ -0,0 +1,79 @@ +package retry + +import ( + "math" + "math/rand" + "time" +) + +type EqualJitterBackoff struct { + baseDelay time.Duration + maxBackoff time.Duration + attemptCelling int +} + +func NewEqualJJitterBackoff(baseDelay time.Duration, maxBackoff time.Duration) *EqualJitterBackoff { + return &EqualJitterBackoff{ + baseDelay: baseDelay, + maxBackoff: maxBackoff, + attemptCelling: int(math.Log2(float64(math.MaxInt64 / baseDelay))), + } +} + +func (j *EqualJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) { + // ceil = min(2 ^ attempts * baseDealy, maxBackoff) + // ceil/2 + [0.0, 1.0) *(ceil/2 + 1) + if attempt > j.attemptCelling { + attempt = j.attemptCelling + } + delayDuration := j.baseDelay * (1 << attempt) + if delayDuration > j.maxBackoff { + delayDuration = j.maxBackoff + } + half := delayDuration.Seconds() / 2 + return floatSecondsDuration(half + rand.Float64()*float64(half+1)), nil +} + +type FullJitterBackoff struct { + baseDelay time.Duration + maxBackoff time.Duration + attemptCelling int +} + +func NewFullJitterBackoff(baseDelay time.Duration, maxBackoff time.Duration) *FullJitterBackoff { + return &FullJitterBackoff{ + baseDelay: baseDelay, + maxBackoff: maxBackoff, + attemptCelling: int(math.Log2(float64(math.MaxInt64 / baseDelay))), + } +} + +func (j *FullJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) { + // [0.0, 1.0) * min(2 ^ attempts * baseDealy, maxBackoff) + if attempt > j.attemptCelling { + attempt = j.attemptCelling + } + delayDuration := j.baseDelay * (1 << attempt) + if delayDuration > j.maxBackoff { + delayDuration = j.maxBackoff + } + return floatSecondsDuration(rand.Float64() * float64(delayDuration.Seconds())), nil +} + +type FixedDelayBackoff struct { + fixedBackoff time.Duration +} + +func NewFixedDelayBackoff(fixedBackoff time.Duration) *FixedDelayBackoff { + return &FixedDelayBackoff{ + fixedBackoff: fixedBackoff, + } +} + +func (j *FixedDelayBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) { + return j.fixedBackoff, nil +} + +func floatSecondsDuration(v float64) time.Duration { + return time.Duration(v * float64(time.Second)) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryable_error.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryable_error.go new file mode 100644 index 000000000..fff7b91fc --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryable_error.go @@ -0,0 +1,103 @@ +package retry + +import ( + "errors" + "io" + "net" + "net/url" + "strings" +) + +type HTTPStatusCodeRetryable struct { +} + +var retryErrorCodes = []int{ + 401, // Unauthorized + 408, // Request Timeout + 429, // Rate exceeded. +} + +func (*HTTPStatusCodeRetryable) IsErrorRetryable(err error) bool { + var v interface{ HttpStatusCode() int } + if errors.As(err, &v) { + code := v.HttpStatusCode() + if code >= 500 { + return true + } + for _, e := range retryErrorCodes { + if code == e { + return true + } + } + } + return false +} + +type ServiceErrorCodeRetryable struct { +} + +var retryServiceErrorCodes = map[string]struct{}{ + "RequestTimeTooSkewed": {}, + "BadRequest": {}, +} + +func (*ServiceErrorCodeRetryable) IsErrorRetryable(err error) bool { + var v interface{ ErrorCode() string } + if errors.As(err, &v) { + if _, ok := retryServiceErrorCodes[v.ErrorCode()]; ok { + return true + } + } + return false +} + +type ConnectionErrorRetryable struct{} + +var retriableErrorStrings = []string{ + "connection reset", + "connection refused", + "use of closed network connection", + "unexpected EOF reading trailer", + "transport connection broken", + "server closed idle connection", + "bad record MAC", + "stream error:", + "tls: use of closed connection", + "connection was forcibly closed", + "broken pipe", + "crc is inconsistent", // oss crc check error pattern +} + +var retriableErrors = []error{ + io.EOF, + io.ErrUnexpectedEOF, +} + +func (c *ConnectionErrorRetryable) IsErrorRetryable(err error) bool { + if err != nil { + switch t := err.(type) { + case *url.Error: + if t.Err != nil { + return c.IsErrorRetryable(t.Err) + } + case net.Error: + if t.Temporary() || t.Timeout() { + return true + } + } + + for _, retriableErr := range retriableErrors { + if err == retriableErr { + return true + } + } + + errString := err.Error() + for _, phrase := range retriableErrorStrings { + if strings.Contains(errString, phrase) { + return true + } + } + } + return false +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryer.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryer.go new file mode 100644 index 000000000..51634ec36 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/retryer.go @@ -0,0 +1,22 @@ +package retry + +import ( + "fmt" + "time" +) + +type Retryer interface { + IsErrorRetryable(error) bool + MaxAttempts() int + RetryDelay(attempt int, opErr error) (time.Duration, error) +} + +type NopRetryer struct{} + +func (NopRetryer) IsErrorRetryable(error) bool { return false } + +func (NopRetryer) MaxAttempts() int { return 1 } + +func (NopRetryer) RetryDelay(int, error) (time.Duration, error) { + return 0, fmt.Errorf("not retrying any attempt errors") +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/standard.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/standard.go new file mode 100644 index 000000000..fa8e38b77 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/standard.go @@ -0,0 +1,71 @@ +package retry + +import ( + "time" +) + +const ( + DefaultMaxAttempts = 3 + DefaultMaxBackoff = 20 * time.Second + DefaultBaseDelay = 200 * time.Millisecond +) + +var DefaultErrorRetryables = []ErrorRetryable{ + &HTTPStatusCodeRetryable{}, + &ServiceErrorCodeRetryable{}, + &ConnectionErrorRetryable{}, +} + +type Standard struct { + maxAttempts int + retryables []ErrorRetryable + backoff BackoffDelayer +} + +func NewStandard(fnOpts ...func(*RetryOptions)) *Standard { + o := RetryOptions{ + MaxAttempts: DefaultMaxAttempts, + MaxBackoff: DefaultMaxBackoff, + BaseDelay: DefaultBaseDelay, + ErrorRetryables: DefaultErrorRetryables, + } + + for _, fn := range fnOpts { + fn(&o) + } + + if o.MaxAttempts <= 0 { + o.MaxAttempts = DefaultMaxAttempts + } + + if o.BaseDelay <= 0 { + o.BaseDelay = DefaultBaseDelay + } + + if o.Backoff == nil { + o.Backoff = NewFullJitterBackoff(o.BaseDelay, o.MaxBackoff) + } + + return &Standard{ + maxAttempts: o.MaxAttempts, + retryables: o.ErrorRetryables, + backoff: o.Backoff, + } +} + +func (s *Standard) MaxAttempts() int { + return s.maxAttempts +} + +func (s *Standard) IsErrorRetryable(err error) bool { + for _, re := range s.retryables { + if v := re.IsErrorRetryable(err); v { + return v + } + } + return false +} + +func (s *Standard) RetryDelay(attempt int, err error) (time.Duration, error) { + return s.backoff.BackoffDelay(attempt, err) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/types.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/types.go new file mode 100644 index 000000000..3fea8091d --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry/types.go @@ -0,0 +1,19 @@ +package retry + +import "time" + +type RetryOptions struct { + MaxAttempts int + MaxBackoff time.Duration + BaseDelay time.Duration + Backoff BackoffDelayer + ErrorRetryables []ErrorRetryable +} + +type BackoffDelayer interface { + BackoffDelay(attempt int, err error) (time.Duration, error) +} + +type ErrorRetryable interface { + IsErrorRetryable(error) bool +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/signer.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/signer.go new file mode 100644 index 000000000..684fe1428 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/signer.go @@ -0,0 +1,51 @@ +package signer + +import ( + "context" + "net/http" + "time" + + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials" +) + +const ( + SubResource = "SubResource" + SignTime = "SignTime" +) + +type SigningContext struct { + //input + Product *string + Region *string + Bucket *string + Key *string + Request *http.Request + + SubResource []string + AdditionalHeaders []string + + Credentials *credentials.Credentials + + AuthMethodQuery bool + + // input and output + Time time.Time + ClockOffset time.Duration + + // output + SignedHeaders map[string]string + StringToSign string + + // for test + signTime *time.Time +} + +type Signer interface { + Sign(ctx context.Context, signingCtx *SigningContext) error +} + +type NopSigner struct{} + +func (*NopSigner) Sign(ctx context.Context, signingCtx *SigningContext) error { + return nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v1.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v1.go new file mode 100644 index 000000000..ea3a1dd3a --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v1.go @@ -0,0 +1,264 @@ +package signer + +import ( + "context" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "hash" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +var requiredSignedParametersMap = map[string]struct{}{ + "acl": {}, + "bucketInfo": {}, + "location": {}, + "stat": {}, + "delete": {}, + "append": {}, + "tagging": {}, + "objectMeta": {}, + "uploads": {}, + "uploadId": {}, + "partNumber": {}, + "security-token": {}, + "position": {}, + "response-content-type": {}, + "response-content-language": {}, + "response-expires": {}, + "response-cache-control": {}, + "response-content-disposition": {}, + "response-content-encoding": {}, + "restore": {}, + "callback": {}, + "callback-var": {}, + "versions": {}, + "versioning": {}, + "versionId": {}, + "sequential": {}, + "continuation-token": {}, + "regionList": {}, + "cloudboxes": {}, + "symlink": {}, +} + +const ( + // headers + authorizationHeader = "Authorization" + securityTokenHeader = "x-oss-security-token" + + dateHeader = "Date" + contentTypeHeader = "Content-Type" + contentMd5Header = "Content-MD5" + ossHeaderPreifx = "x-oss-" + ossDateHeader = "x-oss-date" + + //Query + securityTokenQuery = "security-token" + expiresQuery = "Expires" + accessKeyIdQuery = "OSSAccessKeyId" + signatureQuery = "Signature" + + defaultExpiresDuration = 15 * time.Minute +) + +type SignerV1 struct { +} + +func isSubResource(list []string, key string) bool { + for _, k := range list { + if key == k { + return true + } + } + return false +} + +func (*SignerV1) calcStringToSign(date string, signingCtx *SigningContext) string { + /* + SignToString = + VERB + "\n" + + Content-MD5 + "\n" + + Content-Type + "\n" + + Date + "\n" + + CanonicalizedOSSHeaders + + CanonicalizedResource + Signature = base64(hmac-sha1(AccessKeySecret, SignToString)) + */ + request := signingCtx.Request + + contentMd5 := request.Header.Get(contentMd5Header) + contentType := request.Header.Get(contentTypeHeader) + + //CanonicalizedOSSHeaders + var headers []string + for k := range request.Header { + lowerCaseKey := strings.ToLower(k) + if strings.HasPrefix(lowerCaseKey, ossHeaderPreifx) { + headers = append(headers, lowerCaseKey) + } + } + sort.Strings(headers) + headerItems := make([]string, len(headers)) + for i, k := range headers { + headerValues := make([]string, len(request.Header.Values(k))) + for i, v := range request.Header.Values(k) { + headerValues[i] = strings.TrimSpace(v) + } + headerItems[i] = k + ":" + strings.Join(headerValues, ",") + "\n" + } + canonicalizedOSSHeaders := strings.Join(headerItems, "") + + //CanonicalizedResource + query := request.URL.Query() + var params []string + for k := range query { + if _, ok := requiredSignedParametersMap[k]; ok { + params = append(params, k) + } else if strings.HasPrefix(k, ossHeaderPreifx) { + params = append(params, k) + } else if isSubResource(signingCtx.SubResource, k) { + params = append(params, k) + } + } + sort.Strings(params) + paramItems := make([]string, len(params)) + for i, k := range params { + v := query.Get(k) + if len(v) > 0 { + paramItems[i] = k + "=" + v + } else { + paramItems[i] = k + } + } + subResource := strings.Join(paramItems, "&") + canonicalizedResource := "/" + if signingCtx.Bucket != nil { + canonicalizedResource += *signingCtx.Bucket + "/" + } + if signingCtx.Key != nil { + canonicalizedResource += *signingCtx.Key + } + if subResource != "" { + canonicalizedResource += "?" + subResource + } + + // string to Sign + stringToSign := + request.Method + "\n" + + contentMd5 + "\n" + + contentType + "\n" + + date + "\n" + + canonicalizedOSSHeaders + + canonicalizedResource + + //fmt.Printf("stringToSign:%s\n", stringToSign) + return stringToSign +} + +func (s *SignerV1) authHeader(ctx context.Context, signingCtx *SigningContext) error { + request := signingCtx.Request + cred := signingCtx.Credentials + + // Date + if signingCtx.Time.IsZero() { + signingCtx.Time = time.Now().Add(signingCtx.ClockOffset) + } + datetime := signingCtx.Time.UTC().Format(http.TimeFormat) + request.Header.Set(dateHeader, datetime) + + // Credentials information + if cred.SecurityToken != "" { + request.Header.Set(securityTokenHeader, cred.SecurityToken) + } + + // StringToSign + stringToSign := s.calcStringToSign(datetime, signingCtx) + signingCtx.StringToSign = stringToSign + + // Signature + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(cred.AccessKeySecret)) + if _, err := io.WriteString(h, stringToSign); err != nil { + return err + } + signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + // Authorization header + request.Header.Set(authorizationHeader, fmt.Sprintf("OSS %s:%s", cred.AccessKeyID, signature)) + + return nil +} + +func (s *SignerV1) authQuery(ctx context.Context, signingCtx *SigningContext) error { + request := signingCtx.Request + cred := signingCtx.Credentials + + // Date + if signingCtx.Time.IsZero() { + signingCtx.Time = time.Now().UTC().Add(defaultExpiresDuration) + } + datetime := fmt.Sprintf("%v", signingCtx.Time.UTC().Unix()) + + // Credentials information + query, _ := url.ParseQuery(request.URL.RawQuery) + if cred.SecurityToken != "" { + query.Add(securityTokenQuery, cred.SecurityToken) + request.URL.RawQuery = query.Encode() + } + + // StringToSign + stringToSign := s.calcStringToSign(datetime, signingCtx) + signingCtx.StringToSign = stringToSign + + // Signature + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(cred.AccessKeySecret)) + if _, err := io.WriteString(h, stringToSign); err != nil { + return err + } + signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + // Authorization query + query.Add(expiresQuery, datetime) + query.Add(accessKeyIdQuery, cred.AccessKeyID) + query.Add(signatureQuery, signature) + request.URL.RawQuery = strings.Replace(query.Encode(), "+", "%20", -1) + + return nil +} + +func (s *SignerV1) Sign(ctx context.Context, signingCtx *SigningContext) error { + if signingCtx == nil { + return fmt.Errorf("SigningContext is null.") + } + + if signingCtx.Credentials == nil || !signingCtx.Credentials.HasKeys() { + return fmt.Errorf("SigningContext.Credentials is null or empty.") + } + + if signingCtx.Request == nil { + return fmt.Errorf("SigningContext.Request is null.") + } + + if signingCtx.AuthMethodQuery { + return s.authQuery(ctx, signingCtx) + } + + return s.authHeader(ctx, signingCtx) +} + +func (*SignerV1) IsSignedHeader(additionalHeaders []string, h string) bool { + lowerCaseKey := strings.ToLower(h) + if strings.HasPrefix(lowerCaseKey, ossHeaderPreifx) || + lowerCaseKey == "date" || + lowerCaseKey == "content-type" || + lowerCaseKey == "content-md5" { + return true + } + return false +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v4.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v4.go new file mode 100644 index 000000000..a0d46f947 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer/v4.go @@ -0,0 +1,390 @@ +package signer + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +const ( + // headers + contentSha256Header = "x-oss-content-sha256" + iso8601DatetimeFormat = "20060102T150405Z" + iso8601DateFormat = "20060102" + algorithmV4 = "OSS4-HMAC-SHA256" + + unsignedPayload = "UNSIGNED-PAYLOAD" +) + +var noEscape [256]bool + +func init() { + for i := 0; i < len(noEscape); i++ { + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +func toString(p *string) (v string) { + if p == nil { + return v + } + return *p +} + +func escapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func isDefaultSignedHeader(low string) bool { + if strings.HasPrefix(low, ossHeaderPreifx) || + low == "content-type" || + low == "content-md5" { + return true + } + return false +} + +func getCommonAdditionalHeaders(header http.Header, additionalHeaders []string) []string { + var keys []string + for _, k := range additionalHeaders { + lowK := strings.ToLower(k) + if isDefaultSignedHeader(lowK) { + //default signed header, skip + continue + } else if header.Get(lowK) != "" { + keys = append(keys, lowK) + } + } + sort.Strings(keys) + return keys +} + +type SignerV4 struct { +} + +func (s *SignerV4) calcStringToSign(datetime, scope, canonicalRequest string) string { + /** + StringToSign + "OSS4-HMAC-SHA256" + "\n" + + TimeStamp + "\n" + + Scope + "\n" + + Hex(SHA256Hash(Canonical Request)) + */ + hash256 := sha256.New() + hash256.Write([]byte(canonicalRequest)) + hashValue := hash256.Sum(nil) + canonicalHash := hex.EncodeToString(hashValue) + + return "OSS4-HMAC-SHA256" + "\n" + + datetime + "\n" + + scope + "\n" + + canonicalHash +} + +func (s *SignerV4) calcCanonicalRequest(signingCtx *SigningContext, additionalHeaders []string) string { + request := signingCtx.Request + /* + Canonical Request + HTTP Verb + "\n" + + Canonical URI + "\n" + + Canonical Query String + "\n" + + Canonical Headers + "\n" + + Additional Headers + "\n" + + Hashed PayLoad + */ + + //Canonical Uri + uri := "/" + if signingCtx.Bucket != nil { + uri += *signingCtx.Bucket + "/" + } + if signingCtx.Key != nil { + uri += *signingCtx.Key + } + canonicalUri := escapePath(uri, false) + + //Canonical Query + query := strings.Replace(request.URL.RawQuery, "+", "%20", -1) + values := make(map[string]string) + var params []string + for query != "" { + var key string + key, query, _ = strings.Cut(query, "&") + if key == "" { + continue + } + key, value, _ := strings.Cut(key, "=") + values[key] = value + params = append(params, key) + } + sort.Strings(params) + var buf strings.Builder + for _, k := range params { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(k) + if len(values[k]) > 0 { + buf.WriteByte('=') + buf.WriteString(values[k]) + } + } + canonicalQuery := buf.String() + + //Canonical Headers + var headers []string + buf.Reset() + addHeadersMap := make(map[string]bool) + for _, k := range additionalHeaders { + addHeadersMap[strings.ToLower(k)] = true + } + for k := range request.Header { + lowk := strings.ToLower(k) + if isDefaultSignedHeader(lowk) { + headers = append(headers, lowk) + } else if _, ok := addHeadersMap[lowk]; ok { + headers = append(headers, lowk) + } + } + sort.Strings(headers) + for _, k := range headers { + headerValues := make([]string, len(request.Header.Values(k))) + for i, v := range request.Header.Values(k) { + headerValues[i] = strings.TrimSpace(v) + } + buf.WriteString(k) + buf.WriteString(":") + buf.WriteString(strings.Join(headerValues, ",")) + buf.WriteString("\n") + } + canonicalHeaders := buf.String() + + //Additional Headers + canonicalAdditionalHeaders := strings.Join(additionalHeaders, ";") + + hashPayload := unsignedPayload + if val := request.Header.Get(contentSha256Header); val != "" { + hashPayload = val + } + + buf.Reset() + buf.WriteString(request.Method) + buf.WriteString("\n") + buf.WriteString(canonicalUri) + buf.WriteString("\n") + buf.WriteString(canonicalQuery) + buf.WriteString("\n") + buf.WriteString(canonicalHeaders) + buf.WriteString("\n") + buf.WriteString(canonicalAdditionalHeaders) + buf.WriteString("\n") + buf.WriteString(hashPayload) + + return buf.String() +} + +func buildScope(date, region, product string) string { + return fmt.Sprintf("%s/%s/%s/aliyun_v4_request", date, region, product) +} + +func (s *SignerV4) calcSignature(sk, date, region, product, stringToSign string) string { + hmacHash := func() hash.Hash { return sha256.New() } + + signingKey := "aliyun_v4" + sk + + h1 := hmac.New(func() hash.Hash { return sha256.New() }, []byte(signingKey)) + io.WriteString(h1, date) + h1Key := h1.Sum(nil) + + h2 := hmac.New(hmacHash, h1Key) + io.WriteString(h2, region) + h2Key := h2.Sum(nil) + + h3 := hmac.New(hmacHash, h2Key) + io.WriteString(h3, product) + h3Key := h3.Sum(nil) + + h4 := hmac.New(hmacHash, h3Key) + io.WriteString(h4, "aliyun_v4_request") + h4Key := h4.Sum(nil) + + h := hmac.New(hmacHash, h4Key) + io.WriteString(h, stringToSign) + signature := hex.EncodeToString(h.Sum(nil)) + + return signature +} + +func (s *SignerV4) authHeader(ctx context.Context, signingCtx *SigningContext) error { + request := signingCtx.Request + cred := signingCtx.Credentials + + // Date + if signingCtx.Time.IsZero() { + signingCtx.Time = time.Now().Add(signingCtx.ClockOffset) + } + utcTime := signingCtx.Time.UTC() + datetime := utcTime.Format(iso8601DatetimeFormat) + date := utcTime.Format(iso8601DateFormat) + request.Header.Set(ossDateHeader, datetime) + request.Header.Set(dateHeader, utcTime.Format(http.TimeFormat)) + + // Credentials information + if cred.SecurityToken != "" { + request.Header.Set(securityTokenHeader, cred.SecurityToken) + } + + // Other Headers + request.Header.Set(contentSha256Header, unsignedPayload) + + // Scope + region := toString(signingCtx.Region) + product := toString(signingCtx.Product) + scope := buildScope(date, region, product) + + additionalHeaders := getCommonAdditionalHeaders(request.Header, signingCtx.AdditionalHeaders) + + // CanonicalRequest + canonicalRequest := s.calcCanonicalRequest(signingCtx, additionalHeaders) + + // StringToSign + stringToSign := s.calcStringToSign(datetime, scope, canonicalRequest) + signingCtx.StringToSign = stringToSign + + // Signature + signature := s.calcSignature(cred.AccessKeySecret, date, region, product, stringToSign) + + // credential + var buf strings.Builder + buf.WriteString("OSS4-HMAC-SHA256 Credential=") + buf.WriteString(cred.AccessKeyID + "/" + scope) + if len(additionalHeaders) > 0 { + buf.WriteString(",AdditionalHeaders=") + buf.WriteString(strings.Join(additionalHeaders, ";")) + } + buf.WriteString(",Signature=") + buf.WriteString(signature) + + request.Header.Set(authorizationHeader, buf.String()) + + //fmt.Printf("canonicalRequest:\n%s\n", canonicalRequest) + + //fmt.Printf("stringToSign:\n%s\n", stringToSign) + + return nil +} + +func (s *SignerV4) authQuery(ctx context.Context, signingCtx *SigningContext) error { + request := signingCtx.Request + cred := signingCtx.Credentials + + // Date + now := time.Now().UTC() + if signingCtx.Time.IsZero() { + signingCtx.Time = now.Add(defaultExpiresDuration) + } + if signingCtx.signTime != nil { + now = signingCtx.signTime.UTC() + } + datetime := now.Format(iso8601DatetimeFormat) + date := now.Format(iso8601DateFormat) + expires := signingCtx.Time.Unix() - now.Unix() + + // Scope + region := toString(signingCtx.Region) + product := toString(signingCtx.Product) + scope := buildScope(date, region, product) + + additionalHeaders := getCommonAdditionalHeaders(request.Header, signingCtx.AdditionalHeaders) + + // Credentials information + query, _ := url.ParseQuery(request.URL.RawQuery) + if cred.SecurityToken != "" { + query.Add("x-oss-security-token", cred.SecurityToken) + } + query.Add("x-oss-signature-version", algorithmV4) + query.Add("x-oss-date", datetime) + query.Add("x-oss-expires", fmt.Sprintf("%v", expires)) + query.Add("x-oss-credential", fmt.Sprintf("%s/%s", cred.AccessKeyID, scope)) + if len(additionalHeaders) > 0 { + query.Add("x-oss-additional-headers", strings.Join(additionalHeaders, ";")) + } + request.URL.RawQuery = query.Encode() + + // CanonicalRequest + canonicalRequest := s.calcCanonicalRequest(signingCtx, additionalHeaders) + + // StringToSign + stringToSign := s.calcStringToSign(datetime, scope, canonicalRequest) + signingCtx.StringToSign = stringToSign + + //fmt.Printf("canonicalRequest:\n%s\n", canonicalRequest) + + //fmt.Printf("stringToSign:\n%s\n", stringToSign) + + // Signature + signature := s.calcSignature(cred.AccessKeySecret, date, region, product, stringToSign) + + // Authorization query + query.Add("x-oss-signature", signature) + request.URL.RawQuery = strings.Replace(query.Encode(), "+", "%20", -1) + + return nil +} + +func (s *SignerV4) Sign(ctx context.Context, signingCtx *SigningContext) error { + if signingCtx == nil { + return fmt.Errorf("SigningContext is null.") + } + + if signingCtx.Credentials == nil || !signingCtx.Credentials.HasKeys() { + return fmt.Errorf("SigningContext.Credentials is null or empty.") + } + + if signingCtx.Request == nil { + return fmt.Errorf("SigningContext.Request is null.") + } + if signingCtx.AuthMethodQuery { + return s.authQuery(ctx, signingCtx) + } + return s.authHeader(ctx, signingCtx) +} + +func (s *SignerV4) IsSignedHeader(additionalHeaders []string, h string) bool { + return isDefaultSignedHeader(strings.ToLower(h)) || ContainsStr(additionalHeaders, h) +} + +// ContainsStr Used to check if the string is in the slice +func ContainsStr(slice []string, str string) bool { + for _, item := range slice { + if strings.ToLower(str) == strings.ToLower(item) { + return true + } + } + return false +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/to_ptr.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/to_ptr.go new file mode 100644 index 000000000..9827c96f6 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/to_ptr.go @@ -0,0 +1,15 @@ +package oss + +// Ptr returns a pointer to the provided value. +func Ptr[T any](v T) *T { + return &v +} + +// SliceOfPtrs returns a slice of *T from the specified values. +func SliceOfPtrs[T any](vv ...T) []*T { + slc := make([]*T, len(vv)) + for i := range vv { + slc[i] = Ptr(vv[i]) + } + return slc +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/dialer.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/dialer.go new file mode 100644 index 000000000..0d24e99ed --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/dialer.go @@ -0,0 +1,88 @@ +package transport + +import ( + "context" + "net" + "time" +) + +// Dialer +type Dialer struct { + net.Dialer + // Read/Write timeout + timeout time.Duration + postRead []func(n int, err error) + postWrite []func(n int, err error) +} + +func newDialer(cfg *Config) *Dialer { + dialer := &Dialer{ + Dialer: net.Dialer{ + Timeout: *cfg.ConnectTimeout, + KeepAlive: *cfg.KeepAliveTimeout, + }, + timeout: *cfg.ReadWriteTimeout, + postRead: cfg.PostRead, + postWrite: cfg.PostWrite, + } + return dialer +} + +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + return d.DialContext(context.Background(), network, address) +} + +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + c, err := d.Dialer.DialContext(ctx, network, address) + if err != nil { + return c, err + } + + timeout := d.timeout + if u, ok := ctx.Value("OpReadWriteTimeout").(*time.Duration); ok { + timeout = *u + } + + t := &timeoutConn{ + Conn: c, + timeout: timeout, + dialer: d, + } + return t, t.nudgeDeadline() +} + +// A net.Conn with Read/Write timeout and rate limiting, +type timeoutConn struct { + net.Conn + timeout time.Duration + dialer *Dialer +} + +func (c *timeoutConn) nudgeDeadline() error { + if c.timeout > 0 { + return c.SetDeadline(time.Now().Add(c.timeout)) + } + return nil +} + +func (c *timeoutConn) Read(b []byte) (n int, err error) { + n, err = c.Conn.Read(b) + for _, fn := range c.dialer.postRead { + fn(n, err) + } + if err == nil && n > 0 && c.timeout > 0 { + err = c.nudgeDeadline() + } + return n, err +} + +func (c *timeoutConn) Write(b []byte) (n int, err error) { + n, err = c.Conn.Write(b) + for _, fn := range c.dialer.postWrite { + fn(n, err) + } + if err == nil && n > 0 && c.timeout > 0 { + err = c.nudgeDeadline() + } + return n, err +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/http.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/http.go new file mode 100644 index 000000000..3be398345 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport/http.go @@ -0,0 +1,177 @@ +package transport + +import ( + "crypto/tls" + "net/http" + "net/url" + "time" +) + +// Defaults for the Transport +var ( + DefaultConnectTimeout = 5 * time.Second + DefaultReadWriteTimeout = 10 * time.Second + DefaultIdleConnectionTimeout = 50 * time.Second + DefaultExpectContinueTimeout = 1 * time.Second + DefaultKeepAliveTimeout = 30 * time.Second + + DefaultMaxConnections = 100 + + // Default to TLS 1.2 for all HTTPS requests. + DefaultTLSMinVersion uint16 = tls.VersionTLS12 +) + +var DefaultConfig = Config{ + ConnectTimeout: &DefaultConnectTimeout, + ReadWriteTimeout: &DefaultReadWriteTimeout, + IdleConnectionTimeout: &DefaultIdleConnectionTimeout, + KeepAliveTimeout: &DefaultKeepAliveTimeout, +} + +type Config struct { + ConnectTimeout *time.Duration + ReadWriteTimeout *time.Duration + IdleConnectionTimeout *time.Duration + KeepAliveTimeout *time.Duration + EnabledRedirect *bool + + PostRead []func(n int, err error) + PostWrite []func(n int, err error) +} + +func newTransportCustom(cfg *Config, fns ...func(*http.Transport)) http.RoundTripper { + tr := &http.Transport{ + DialContext: newDialer(cfg).DialContext, + TLSHandshakeTimeout: *cfg.ConnectTimeout, + IdleConnTimeout: *cfg.IdleConnectionTimeout, + MaxConnsPerHost: DefaultMaxConnections, + ExpectContinueTimeout: DefaultExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: DefaultTLSMinVersion, + }, + } + + for _, fn := range fns { + fn(tr) + } + + return tr +} + +func (c *Config) mergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func (c *Config) copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.mergeIn(c) + + for _, cfg := range cfgs { + dst.mergeIn(cfg) + } + + return dst +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.ConnectTimeout != nil { + dst.ConnectTimeout = other.ConnectTimeout + } + + if other.ReadWriteTimeout != nil { + dst.ReadWriteTimeout = other.ReadWriteTimeout + } + + if other.IdleConnectionTimeout != nil { + dst.IdleConnectionTimeout = other.IdleConnectionTimeout + } + + if other.KeepAliveTimeout != nil { + dst.KeepAliveTimeout = other.KeepAliveTimeout + } + + if other.EnabledRedirect != nil { + dst.EnabledRedirect = other.EnabledRedirect + } + + if other.PostRead != nil { + dst.PostRead = make([]func(n int, err error), len(other.PostRead)) + copy(dst.PostRead, other.PostRead) + } + + if other.PostWrite != nil { + dst.PostWrite = make([]func(n int, err error), len(other.PostWrite)) + copy(dst.PostWrite, other.PostWrite) + } +} + +func InsecureSkipVerify(enabled bool) func(*http.Transport) { + return func(t *http.Transport) { + if t.TLSClientConfig != nil { + t.TLSClientConfig.InsecureSkipVerify = enabled + } else { + t.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: enabled, + } + } + } +} + +func MaxConnections(value int) func(*http.Transport) { + return func(t *http.Transport) { + t.MaxConnsPerHost = value + } +} + +func ExpectContinueTimeout(value time.Duration) func(*http.Transport) { + return func(t *http.Transport) { + t.ExpectContinueTimeout = value + } +} + +func TLSMinVersion(value int) func(*http.Transport) { + return func(t *http.Transport) { + if t.TLSClientConfig != nil { + t.TLSClientConfig.MinVersion = uint16(value) + } else { + t.TLSClientConfig = &tls.Config{ + MinVersion: uint16(value), + } + } + } +} + +func HttpProxy(fixedURL *url.URL) func(*http.Transport) { + return func(t *http.Transport) { + t.Proxy = http.ProxyURL(fixedURL) + } +} + +func ProxyFromEnvironment() func(*http.Transport) { + return func(t *http.Transport) { + t.Proxy = http.ProxyFromEnvironment + } +} + +func NewHttpClient(cfg *Config, fns ...func(*http.Transport)) *http.Client { + cfg = DefaultConfig.copy(cfg) + client := &http.Client{ + Transport: newTransportCustom(cfg, fns...), + //Disalbe Redirect + CheckRedirect: func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + }, + } + + if cfg.EnabledRedirect != nil && *cfg.EnabledRedirect { + client.CheckRedirect = nil + } + + return client +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/types.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/types.go new file mode 100644 index 000000000..a0c737f73 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/types.go @@ -0,0 +1,162 @@ +package oss + +import ( + "context" + "io" + "net/http" +) + +type OperationMetadata struct { + values map[any][]any +} + +func (m OperationMetadata) Get(key any) any { + if m.values == nil { + return nil + } + v := m.values[key] + if len(v) == 0 { + return nil + } + return v[0] +} + +func (m OperationMetadata) Values(key any) []any { + if m.values == nil { + return nil + } + return m.values[key] +} + +func (m *OperationMetadata) Add(key, value any) { + if m.values == nil { + m.values = map[any][]any{} + } + m.values[key] = append(m.values[key], value) +} + +func (m *OperationMetadata) Set(key, value any) { + if m.values == nil { + m.values = map[any][]any{} + } + m.values[key] = []any{value} +} + +func (m OperationMetadata) Has(key any) bool { + if m.values == nil { + return false + } + _, ok := m.values[key] + return ok +} + +func (m OperationMetadata) Clone() OperationMetadata { + vs := make(map[any][]any, len(m.values)) + for k, v := range m.values { + vv := make([]any, len(v)) + copy(vv, v) + vs[k] = vv + } + return OperationMetadata{ + values: vs, + } +} + +type RequestCommon struct { + Headers map[string]string + Parameters map[string]string + Payload io.Reader +} + +type RequestCommonInterface interface { + GetCommonFileds() (map[string]string, map[string]string, io.Reader) +} + +func (r *RequestCommon) GetCommonFileds() (map[string]string, map[string]string, io.Reader) { + return r.Headers, r.Parameters, r.Payload +} + +type ResultCommon struct { + Status string + StatusCode int + Headers http.Header + OpMetadata OperationMetadata +} + +type ResultCommonInterface interface { + CopyIn(status string, statusCode int, headers http.Header, meta OperationMetadata) +} + +func (r *ResultCommon) CopyIn(status string, statusCode int, headers http.Header, meta OperationMetadata) { + r.Status = status + r.StatusCode = statusCode + r.Headers = headers + r.OpMetadata = meta +} + +type OperationInput struct { + OpName string + Method string + Headers map[string]string + Parameters map[string]string + Body io.Reader + + Bucket *string + Key *string + + OpMetadata OperationMetadata +} + +type OperationOutput struct { + Input *OperationInput + + Status string + StatusCode int + Headers http.Header + Body io.ReadCloser + + OpMetadata OperationMetadata + + httpRequest *http.Request +} + +type RequestBodyTracker interface { + io.Writer + Reset() +} + +type DownloadAPIClient interface { + HeadObject(ctx context.Context, request *HeadObjectRequest, optFns ...func(*Options)) (*HeadObjectResult, error) + GetObject(ctx context.Context, request *GetObjectRequest, optFns ...func(*Options)) (*GetObjectResult, error) +} + +type UploadAPIClient interface { + HeadObject(ctx context.Context, request *HeadObjectRequest, optFns ...func(*Options)) (*HeadObjectResult, error) + PutObject(ctx context.Context, request *PutObjectRequest, optFns ...func(*Options)) (*PutObjectResult, error) + InitiateMultipartUpload(ctx context.Context, request *InitiateMultipartUploadRequest, optFns ...func(*Options)) (*InitiateMultipartUploadResult, error) + UploadPart(ctx context.Context, request *UploadPartRequest, optFns ...func(*Options)) (*UploadPartResult, error) + CompleteMultipartUpload(ctx context.Context, request *CompleteMultipartUploadRequest, optFns ...func(*Options)) (*CompleteMultipartUploadResult, error) + AbortMultipartUpload(ctx context.Context, request *AbortMultipartUploadRequest, optFns ...func(*Options)) (*AbortMultipartUploadResult, error) + ListParts(ctx context.Context, request *ListPartsRequest, optFns ...func(*Options)) (*ListPartsResult, error) +} + +type OpenFileAPIClient interface { + HeadObject(ctx context.Context, request *HeadObjectRequest, optFns ...func(*Options)) (*HeadObjectResult, error) + GetObject(ctx context.Context, request *GetObjectRequest, optFns ...func(*Options)) (*GetObjectResult, error) +} + +type AppendFileAPIClient interface { + HeadObject(ctx context.Context, request *HeadObjectRequest, optFns ...func(*Options)) (*HeadObjectResult, error) + AppendObject(ctx context.Context, request *AppendObjectRequest, optFns ...func(*Options)) (*AppendObjectResult, error) +} + +type CopyAPIClient interface { + HeadObject(ctx context.Context, request *HeadObjectRequest, optFns ...func(*Options)) (*HeadObjectResult, error) + CopyObject(ctx context.Context, request *CopyObjectRequest, optFns ...func(*Options)) (*CopyObjectResult, error) + InitiateMultipartUpload(ctx context.Context, request *InitiateMultipartUploadRequest, optFns ...func(*Options)) (*InitiateMultipartUploadResult, error) + UploadPartCopy(ctx context.Context, request *UploadPartCopyRequest, optFns ...func(*Options)) (*UploadPartCopyResult, error) + CompleteMultipartUpload(ctx context.Context, request *CompleteMultipartUploadRequest, optFns ...func(*Options)) (*CompleteMultipartUploadResult, error) + AbortMultipartUpload(ctx context.Context, request *AbortMultipartUploadRequest, optFns ...func(*Options)) (*AbortMultipartUploadResult, error) + ListParts(ctx context.Context, request *ListPartsRequest, optFns ...func(*Options)) (*ListPartsResult, error) + GetObjectTagging(ctx context.Context, request *GetObjectTaggingRequest, optFns ...func(*Options)) (*GetObjectTaggingResult, error) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/uploader.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/uploader.go new file mode 100644 index 000000000..28db8dd72 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/uploader.go @@ -0,0 +1,768 @@ +package oss + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "sort" + "strconv" + "sync" + "sync/atomic" +) + +type UploaderOptions struct { + PartSize int64 + + ParallelNum int + + LeavePartsOnError bool + + EnableCheckpoint bool + + CheckpointDir string + + ClientOptions []func(*Options) +} + +type Uploader struct { + options UploaderOptions + client UploadAPIClient + featureFlags FeatureFlagsType + isEncryptionClient bool +} + +// NewUploader creates a new Uploader instance to upload objects. +// Pass In additional functional options to customize the uploader's behavior. +func NewUploader(c UploadAPIClient, optFns ...func(*UploaderOptions)) *Uploader { + options := UploaderOptions{ + PartSize: DefaultUploadPartSize, + ParallelNum: DefaultUploadParallel, + LeavePartsOnError: false, + } + + for _, fn := range optFns { + fn(&options) + } + + u := &Uploader{ + client: c, + options: options, + isEncryptionClient: false, + } + + //Get Client Feature + switch t := c.(type) { + case *Client: + u.featureFlags = t.options.FeatureFlags + case *EncryptionClient: + u.featureFlags = t.Unwrap().options.FeatureFlags + u.isEncryptionClient = true + } + + return u +} + +type UploadResult struct { + UploadId *string + + ETag *string + + VersionId *string + + HashCRC64 *string + + ResultCommon +} + +type UploadError struct { + Err error + UploadId string + Path string +} + +func (m *UploadError) Error() string { + var extra string + if m.Err != nil { + extra = fmt.Sprintf(", cause: %s", m.Err.Error()) + } + return fmt.Sprintf("upload failed, upload id: %s%s", m.UploadId, extra) +} + +func (m *UploadError) Unwrap() error { + return m.Err +} + +func (u *Uploader) UploadFrom(ctx context.Context, request *PutObjectRequest, body io.Reader, optFns ...func(*UploaderOptions)) (*UploadResult, error) { + // Uploader wrapper + delegate, err := u.newDelegate(ctx, request, optFns...) + if err != nil { + return nil, err + } + + delegate.body = body + if err = delegate.applySource(); err != nil { + return nil, err + } + + return delegate.upload() +} + +func (u *Uploader) UploadFile(ctx context.Context, request *PutObjectRequest, filePath string, optFns ...func(*UploaderOptions)) (*UploadResult, error) { + // Uploader wrapper + delegate, err := u.newDelegate(ctx, request, optFns...) + if err != nil { + return nil, err + } + + // Source + if err = delegate.checkSource(filePath); err != nil { + return nil, err + } + + var file *os.File + if file, err = delegate.openReader(); err != nil { + return nil, err + } + delegate.body = file + + if err = delegate.applySource(); err != nil { + return nil, err + } + + if err = delegate.checkCheckpoint(); err != nil { + return nil, err + } + + if err = delegate.adjustSource(); err != nil { + return nil, err + } + + result, err := delegate.upload() + + return result, delegate.closeReader(file, err) +} + +type uploaderDelegate struct { + base *Uploader + options UploaderOptions + client UploadAPIClient + context context.Context + request *PutObjectRequest + + body io.Reader + readerPos int64 + totalSize int64 + hashCRC64 uint64 + transferred int64 + + // Source's Info, from file or reader + filePath string + fileInfo os.FileInfo + + // for resumable upload + uploadId string + partNumber int32 + cseContext *EncryptionMultiPartContext + uploadedParts []Part + + partPool byteSlicePool + + checkpoint *uploadCheckpoint +} + +type uploadIdInfo struct { + uploadId string + startNum int32 + cseContext *EncryptionMultiPartContext +} + +func (u *Uploader) newDelegate(ctx context.Context, request *PutObjectRequest, optFns ...func(*UploaderOptions)) (*uploaderDelegate, error) { + if request == nil { + return nil, NewErrParamNull("request") + } + + if request.Bucket == nil { + return nil, NewErrParamNull("request.Bucket") + } + + if request.Key == nil { + return nil, NewErrParamNull("request.Key") + } + + d := uploaderDelegate{ + base: u, + options: u.options, + client: u.client, + context: ctx, + request: request, + } + + for _, opt := range optFns { + opt(&d.options) + } + + if d.options.ParallelNum <= 0 { + d.options.ParallelNum = DefaultUploadParallel + } + if d.options.PartSize <= 0 { + d.options.PartSize = DefaultUploadPartSize + } + + if _, ok := d.request.Parameters["sequential"]; ok { + d.options.ParallelNum = 1 + } + + return &d, nil +} + +func (u *uploaderDelegate) checkSource(filePath string) error { + if filePath == "" { + return NewErrParamRequired("filePath") + } + + // if !FileExists(filePath) { + // return fmt.Errorf("File not exists, %v", filePath) + // } + + info, err := os.Stat(filePath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("File not exists, %v", filePath) + } + return err + } + + u.filePath = filePath + u.fileInfo = info + + return nil +} + +func (u *uploaderDelegate) applySource() error { + if u.body == nil { + return NewErrParamNull("the body is null") + } + + totalSize := GetReaderLen(u.body) + + //Part Size + partSize := u.options.PartSize + if totalSize > 0 { + for totalSize/partSize >= int64(MaxUploadParts) { + partSize += u.options.PartSize + } + } + + u.totalSize = totalSize + u.options.PartSize = partSize + + return nil +} + +func (u *uploaderDelegate) adjustSource() error { + // resume from upload id + if u.uploadId != "" { + // if the body supports seek + r, ok := u.body.(io.Seeker) + // not support + if !ok { + u.uploadId = "" + return nil + } + + // if upload id is valid + paginator := NewListPartsPaginator(u.client, &ListPartsRequest{ + Bucket: u.request.Bucket, + Key: u.request.Key, + UploadId: Ptr(u.uploadId), + }) + + // find consecutive sequence from min part number + var ( + checkPartNumber int32 = 1 + updateCRC64 bool = ((u.base.featureFlags & FeatureEnableCRC64CheckUpload) > 0) + hashCRC64 uint64 = 0 + page *ListPartsResult + err error + uploadedParts []Part + ) + outerLoop: + + for paginator.HasNext() { + page, err = paginator.NextPage(u.context, u.options.ClientOptions...) + if err != nil { + u.uploadId = "" + return nil + } + for _, p := range page.Parts { + if p.PartNumber != checkPartNumber || + p.Size != u.options.PartSize { + break outerLoop + } + checkPartNumber++ + uploadedParts = append(uploadedParts, p) + if updateCRC64 && p.HashCRC64 != nil { + value, _ := strconv.ParseUint(ToString(p.HashCRC64), 10, 64) + hashCRC64 = CRC64Combine(hashCRC64, value, uint64(p.Size)) + } + } + } + + partNumber := checkPartNumber - 1 + newOffset := int64(partNumber) * u.options.PartSize + if _, err := r.Seek(newOffset, io.SeekStart); err != nil { + u.uploadId = "" + return nil + } + + cseContext, err := u.resumeCSEContext(page) + if err != nil { + u.uploadId = "" + return nil + } + + u.partNumber = partNumber + u.readerPos = newOffset + u.hashCRC64 = hashCRC64 + u.cseContext = cseContext + u.uploadedParts = uploadedParts + } + return nil +} + +func (d *uploaderDelegate) checkCheckpoint() error { + if d.options.EnableCheckpoint { + d.checkpoint = newUploadCheckpoint(d.request, d.filePath, d.options.CheckpointDir, d.fileInfo, d.options.PartSize) + if err := d.checkpoint.load(); err != nil { + return err + } + + if d.checkpoint.Loaded { + d.uploadId = d.checkpoint.Info.Data.UploadInfo.UploadId + } + d.options.LeavePartsOnError = true + } + return nil +} + +func (d *uploaderDelegate) openReader() (*os.File, error) { + file, err := os.Open(d.filePath) + if err != nil { + return nil, err + } + + d.body = file + return file, nil +} + +func (d *uploaderDelegate) closeReader(file *os.File, err error) error { + if file != nil { + file.Close() + } + + if d.checkpoint != nil && err == nil { + d.checkpoint.remove() + } + + d.body = nil + d.checkpoint = nil + + return err +} + +func (d *uploaderDelegate) resumeCSEContext(result *ListPartsResult) (*EncryptionMultiPartContext, error) { + if !d.base.isEncryptionClient { + return nil, nil + } + sc, ok := d.client.(*EncryptionClient) + if !ok { + return nil, fmt.Errorf("Not EncryptionClient") + } + + envelope, err := getEnvelopeFromListParts(result) + if err != nil { + return nil, err + } + + cc, err := sc.defualtCCBuilder.ContentCipherEnv(envelope) + if err != nil { + return nil, err + } + + cseContext := &EncryptionMultiPartContext{ + ContentCipher: cc, + PartSize: ToInt64(result.ClientEncryptionPartSize), + DataSize: ToInt64(result.ClientEncryptionDataSize), + } + + if !cseContext.Valid() { + return nil, fmt.Errorf("EncryptionMultiPartContext is invalid") + } + + return cseContext, nil +} + +func (u *uploaderDelegate) upload() (*UploadResult, error) { + if u.totalSize >= 0 && u.totalSize < u.options.PartSize { + return u.singlePart() + } + return u.multiPart() +} + +func (u *uploaderDelegate) singlePart() (*UploadResult, error) { + request := &PutObjectRequest{} + copyRequest(request, u.request) + request.Body = u.body + if request.ContentType == nil { + request.ContentType = u.getContentType() + } + + result, err := u.client.PutObject(u.context, request, u.options.ClientOptions...) + + if err != nil { + return nil, u.wrapErr("", err) + } + + return &UploadResult{ + ETag: result.ETag, + VersionId: result.VersionId, + HashCRC64: result.HashCRC64, + ResultCommon: result.ResultCommon, + }, nil +} + +func (u *uploaderDelegate) nextReader() (io.ReadSeeker, int, func(), error) { + type readerAtSeeker interface { + io.ReaderAt + io.ReadSeeker + } + switch r := u.body.(type) { + case readerAtSeeker: + var err error + + n := u.options.PartSize + if u.totalSize >= 0 { + bytesLeft := u.totalSize - u.readerPos + if bytesLeft <= u.options.PartSize { + err = io.EOF + n = bytesLeft + } + } + + reader := io.NewSectionReader(r, u.readerPos, n) + cleanup := func() {} + + u.readerPos += n + + return reader, int(n), cleanup, err + + default: + if u.partPool == nil { + u.partPool = newByteSlicePool(u.options.PartSize) + u.partPool.ModifyCapacity(u.options.ParallelNum + 1) + } + + part, err := u.partPool.Get(u.context) + if err != nil { + return nil, 0, func() {}, err + } + + n, err := readFill(r, *part) + u.readerPos += int64(n) + + cleanup := func() { + u.partPool.Put(part) + } + + return bytes.NewReader((*part)[0:n]), n, cleanup, err + } +} + +type uploaderChunk struct { + partNum int32 + size int + body io.ReadSeeker + cleanup func() +} + +type uploadPartCRC struct { + partNumber int32 + size int + hashCRC64 *string +} + +type uploadPartCRCs []uploadPartCRC + +func (slice uploadPartCRCs) Len() int { + return len(slice) +} +func (slice uploadPartCRCs) Less(i, j int) bool { + return slice[i].partNumber < slice[j].partNumber +} +func (slice uploadPartCRCs) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +type saveErr struct { + Err error +} + +func (e saveErr) Error() string { + return fmt.Sprintf("saveErr: %v", e.Err) +} + +func (e saveErr) Unwrap() error { return e.Err } + +func (u *uploaderDelegate) multiPart() (*UploadResult, error) { + release := func() { + if u.partPool != nil { + u.partPool.Close() + } + } + defer release() + + var ( + wg sync.WaitGroup + mu sync.Mutex + parts UploadParts + errValue atomic.Value + crcParts uploadPartCRCs + enableCRC = (u.base.featureFlags & FeatureEnableCRC64CheckUpload) > 0 + ) + + // Init the multipart + uploadIdInfo, err := u.getUploadId() + if err != nil { + return nil, u.wrapErr("", err) + } + //fmt.Printf("getUploadId result: %v, %#v\n", uploadId, err) + uploadId := uploadIdInfo.uploadId + startPartNum := uploadIdInfo.startNum + + // Update Checkpoint + if u.checkpoint != nil { + u.checkpoint.Info.Data.UploadInfo.UploadId = uploadId + u.checkpoint.dump() + } + + saveErrFn := func(e error) { + if e == nil { + return + } + errValue.Store(saveErr{Err: e}) + } + + getErrFn := func() error { + v := errValue.Load() + if v == nil { + return nil + } + e, _ := v.(saveErr) + return e.Unwrap() + } + + // readChunk runs in worker goroutines to pull chunks off of the ch channel + readChunkFn := func(ch chan uploaderChunk) { + defer wg.Done() + for { + data, ok := <-ch + if !ok { + break + } + + if getErrFn() == nil { + upResult, err := u.client.UploadPart( + u.context, + &UploadPartRequest{ + Bucket: u.request.Bucket, + Key: u.request.Key, + UploadId: Ptr(uploadId), + PartNumber: data.partNum, + Body: data.body, + CSEMultiPartContext: uploadIdInfo.cseContext, + RequestPayer: u.request.RequestPayer, + }, + u.options.ClientOptions...) + //fmt.Printf("UploadPart result: %#v, %#v\n", upResult, err) + + if err == nil { + mu.Lock() + parts = append(parts, UploadPart{ETag: upResult.ETag, PartNumber: data.partNum}) + if enableCRC { + crcParts = append(crcParts, + uploadPartCRC{partNumber: data.partNum, hashCRC64: upResult.HashCRC64, size: data.size}) + } + if u.request.ProgressFn != nil { + u.transferred += int64(data.size) + u.request.ProgressFn(int64(data.size), u.transferred, u.totalSize) + } + mu.Unlock() + } else { + saveErrFn(err) + } + } + data.cleanup() + } + } + + ch := make(chan uploaderChunk, u.options.ParallelNum) + for i := 0; i < u.options.ParallelNum; i++ { + wg.Add(1) + go readChunkFn(ch) + } + + // Read and queue the parts + var ( + qnum int32 = startPartNum + qerr error = nil + ) + + // consume uploaded parts + if u.readerPos > 0 { + for _, p := range u.uploadedParts { + parts = append(parts, UploadPart{PartNumber: p.PartNumber, ETag: p.ETag}) + } + if u.request.ProgressFn != nil { + u.transferred = u.readerPos + u.request.ProgressFn(u.readerPos, u.transferred, u.totalSize) + } + } + + for getErrFn() == nil && qerr == nil { + var ( + reader io.ReadSeeker + nextChunkLen int + cleanup func() + ) + + reader, nextChunkLen, cleanup, qerr = u.nextReader() + // check err + if qerr != nil && qerr != io.EOF { + cleanup() + saveErrFn(qerr) + break + } + + // No need to upload empty part + if nextChunkLen == 0 { + cleanup() + break + } + + qnum++ + //fmt.Printf("send chunk: %d\n", qnum) + ch <- uploaderChunk{body: reader, partNum: qnum, cleanup: cleanup, size: nextChunkLen} + } + + // Close the channel, wait for workers + close(ch) + wg.Wait() + + // Complete upload + var cmResult *CompleteMultipartUploadResult + if err = getErrFn(); err == nil { + sort.Sort(parts) + cmRequest := &CompleteMultipartUploadRequest{} + copyRequest(cmRequest, u.request) + cmRequest.UploadId = Ptr(uploadId) + cmRequest.CompleteMultipartUpload = &CompleteMultipartUpload{Parts: parts} + cmResult, err = u.client.CompleteMultipartUpload(u.context, cmRequest, u.options.ClientOptions...) + } + //fmt.Printf("CompleteMultipartUpload cmResult: %#v, %#v\n", cmResult, err) + + if err != nil { + //Abort + if !u.options.LeavePartsOnError { + abortRequest := &AbortMultipartUploadRequest{} + copyRequest(abortRequest, u.request) + abortRequest.UploadId = Ptr(uploadId) + _, _ = u.client.AbortMultipartUpload(u.context, abortRequest, u.options.ClientOptions...) + } + return nil, u.wrapErr(uploadId, err) + } + + if enableCRC { + caclCRC := fmt.Sprint(u.combineCRC(crcParts)) + if err = checkResponseHeaderCRC64(caclCRC, cmResult.Headers); err != nil { + return nil, u.wrapErr(uploadId, err) + } + } + + return &UploadResult{ + UploadId: Ptr(uploadId), + ETag: cmResult.ETag, + VersionId: cmResult.VersionId, + HashCRC64: cmResult.HashCRC64, + ResultCommon: cmResult.ResultCommon, + }, nil +} + +func (u *uploaderDelegate) getUploadId() (info uploadIdInfo, err error) { + if u.uploadId != "" { + return uploadIdInfo{ + uploadId: u.uploadId, + startNum: u.partNumber, + cseContext: u.cseContext, + }, nil + } + + // if not exist or fail, create a new upload id + request := &InitiateMultipartUploadRequest{} + copyRequest(request, u.request) + if request.ContentType == nil { + request.ContentType = u.getContentType() + } + + if u.base.isEncryptionClient { + request.CSEPartSize = &u.options.PartSize + request.CSEDataSize = &u.totalSize + } + + result, err := u.client.InitiateMultipartUpload(u.context, request, u.options.ClientOptions...) + if err != nil { + return info, err + } + + return uploadIdInfo{ + uploadId: *result.UploadId, + startNum: 0, + cseContext: result.CSEMultiPartContext, + }, nil +} + +func (u *uploaderDelegate) getContentType() *string { + if u.filePath != "" { + if contentType := TypeByExtension(u.filePath); contentType != "" { + return Ptr(contentType) + } + } + return nil +} + +func (u *uploaderDelegate) wrapErr(uploadId string, err error) error { + return &UploadError{ + UploadId: uploadId, + Path: fmt.Sprintf("oss://%s/%s", *u.request.Bucket, *u.request.Key), + Err: err} +} + +func (u *uploaderDelegate) combineCRC(crcs uploadPartCRCs) uint64 { + if len(crcs) == 0 { + return 0 + } + sort.Sort(crcs) + crc := u.hashCRC64 + for _, c := range crcs { + if c.hashCRC64 == nil { + return 0 + } + if value, err := strconv.ParseUint(*c.hashCRC64, 10, 64); err == nil { + crc = CRC64Combine(crc, value, uint64(c.size)) + } else { + break + } + } + return crc +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils.go new file mode 100644 index 000000000..98c11840a --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils.go @@ -0,0 +1,405 @@ +package oss + +import ( + "bytes" + "context" + "encoding" + "errors" + "fmt" + "io" + "net/http" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "time" +) + +func init() { + for i := 0; i < len(noEscape); i++ { + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } + + defaultUserAgent = fmt.Sprintf("%s/%s (%s/%s/%s;%s)", SdkName, Version(), runtime.GOOS, + "-", runtime.GOARCH, runtime.Version()) +} + +var defaultUserAgent string +var noEscape [256]bool + +func sleepWithContext(ctx context.Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} + +// getNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC. +// gets the current time in Unix time, in seconds. +func getNowSec() int64 { + return time.Now().Unix() +} + +// getNowGMT gets the current time in GMT format. +func getNowGMT() string { + return time.Now().UTC().Format(http.TimeFormat) +} + +func escapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Pointer: + return v.IsNil() + } + return false +} + +func setTimeReflectValue(dst reflect.Value, value time.Time) (err error) { + dst0 := dst + if dst.Kind() == reflect.Pointer { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + if dst.CanAddr() { + pv := dst.Addr() + if pv.CanInterface() { + if val, ok := pv.Interface().(encoding.TextUnmarshaler); ok { + return val.UnmarshalText([]byte(value.Format(time.RFC3339))) + } + } + } + return errors.New("cannot unmarshal into " + dst0.Type().String()) +} + +func setReflectValue(dst reflect.Value, data string) (err error) { + dst0 := dst + src := []byte(data) + + if dst.Kind() == reflect.Pointer { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + switch dst.Kind() { + case reflect.Invalid: + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if len(src) == 0 { + dst.SetInt(0) + return nil + } + itmp, err := strconv.ParseInt(strings.TrimSpace(string(src)), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetInt(itmp) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + if len(src) == 0 { + dst.SetUint(0) + return nil + } + utmp, err := strconv.ParseUint(strings.TrimSpace(string(src)), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetUint(utmp) + case reflect.Bool: + if len(src) == 0 { + dst.SetBool(false) + return nil + } + value, err := strconv.ParseBool(strings.TrimSpace(string(src))) + if err != nil { + return err + } + dst.SetBool(value) + case reflect.String: + dst.SetString(string(src)) + } + return nil +} + +func setMapStringReflectValue(dst reflect.Value, key any, data any) (err error) { + dst0 := dst + + if dst.Kind() == reflect.Pointer { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + switch dst.Kind() { + case reflect.Invalid: + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Map: + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + mapValue := reflect.ValueOf(data) + mapKey := reflect.ValueOf(key) + dst.SetMapIndex(mapKey, mapValue) + } + return nil +} + +func isContextError(ctx context.Context, perr *error) bool { + if ctxErr := ctx.Err(); ctxErr != nil { + if *perr == nil { + *perr = ctxErr + } + return true + } + return false +} + +func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, io.SeekCurrent) + if err != nil { + return 0, err + } + + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + _, err = src.Seek(curPos, io.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} + +func ParseOffsetAndSizeFromHeaders(headers http.Header) (offset, size int64) { + return parseOffsetAndSizeFromHeaders(headers) +} + +func parseOffsetAndSizeFromHeaders(headers http.Header) (offset, size int64) { + size = -1 + var contentLength = headers.Get("Content-Length") + if len(contentLength) != 0 { + var err error + if size, err = strconv.ParseInt(contentLength, 10, 64); err != nil { + return 0, -1 + } + } + + var contentRange = headers.Get("Content-Range") + if len(contentRange) == 0 { + return 0, size + } + + if !strings.HasPrefix(contentRange, "bytes ") { + return 0, -1 + } + + // start offset + dash := strings.IndexRune(contentRange, '-') + if dash < 0 { + return 0, -1 + } + ret, err := strconv.ParseInt(contentRange[6:dash], 10, 64) + if err != nil { + return 0, -1 + } + offset = ret + + // total size + slash := strings.IndexRune(contentRange, '/') + if slash < 0 { + return 0, -1 + } + tsize := contentRange[slash+1:] + if tsize != "*" { + ret, err = strconv.ParseInt(contentRange[slash+1:], 10, 64) + if err != nil { + return 0, -1 + } + size = ret + } + + return offset, size +} + +func minInt64(a, b int64) int64 { + if a < b { + return a + } else { + return b + } +} + +func maxInt64(a, b int64) int64 { + if a > b { + return a + } else { + return b + } +} + +func minInt(a, b int) int { + if a < b { + return a + } else { + return b + } +} + +func maxInt(a, b int) int { + if a > b { + return a + } else { + return b + } +} + +// ParseRange parses a ContentRange from a ContentRange: header. +// It only accepts bytes 22-33/42 and bytes 22-33/* format. +func ParseContentRange(s string) (from int64, to int64, total int64, err error) { + if !strings.HasPrefix(s, "bytes ") { + return from, to, total, errors.New("invalid content range") + } + + slash := strings.IndexRune(s, '/') + if slash < 0 { + return from, to, total, errors.New("invalid content range") + } + + dash := strings.IndexRune(s, '-') + if dash < 0 { + return from, to, total, errors.New("invalid content range") + } + + if slash < dash { + return from, to, total, errors.New("invalid content range") + } + + // from + ret, err := strconv.ParseInt(s[6:dash], 10, 64) + if err != nil { + return from, to, total, errors.New("invalid content range") + } + from = ret + + // to + ret, err = strconv.ParseInt(s[dash+1:slash], 10, 64) + if err != nil { + return from, to, total, errors.New("invalid content range") + } + to = ret + + // total + last := s[slash+1:] + if last == "*" { + total = -1 + } else { + ret, err = strconv.ParseInt(s[slash+1:], 10, 64) + if err != nil { + return from, to, total, errors.New("invalid content range") + } + total = ret + } + + return from, to, total, nil +} + +// ParseRange parses a HTTPRange from a Range: header. +// It only accepts single ranges. +func ParseRange(s string) (r *HTTPRange, err error) { + const preamble = "bytes=" + if !strings.HasPrefix(s, preamble) { + return nil, errors.New("range: header invalid: doesn't start with " + preamble) + } + s = s[len(preamble):] + if strings.ContainsRune(s, ',') { + return nil, errors.New("range: header invalid: contains multiple ranges which isn't supported") + } + dash := strings.IndexRune(s, '-') + if dash < 0 { + return nil, errors.New("range: header invalid: contains no '-'") + } + start, end := strings.TrimSpace(s[:dash]), strings.TrimSpace(s[dash+1:]) + o := HTTPRange{Offset: 0, Count: 0} + if start != "" { + o.Offset, err = strconv.ParseInt(start, 10, 64) + if err != nil || o.Offset < 0 { + return nil, errors.New("range: header invalid: bad start") + } + } + if end != "" { + e, err := strconv.ParseInt(end, 10, 64) + if err != nil || e < 0 { + return nil, errors.New("range: header invalid: bad end") + } + o.Count = e - o.Offset + 1 + } + return &o, nil +} + +// FileExists returns whether the given file exists or not +func FileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return (info != nil && !info.IsDir()) +} + +// DirExists returns whether the given directory exists or not +func DirExists(dir string) bool { + info, err := os.Stat(dir) + if os.IsNotExist(err) { + return false + } + return (info != nil && info.IsDir()) +} + +// EmptyFile changes the size of the named file to zero. +func EmptyFile(filename string) bool { + err := os.Truncate(filename, 0) + return err == nil +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_copy.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_copy.go new file mode 100644 index 000000000..c65feaf85 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_copy.go @@ -0,0 +1,95 @@ +package oss + +import ( + "io" + "reflect" + "time" +) + +func copyRequest(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +func copyOfRequest(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + if dst.Kind() == reflect.String { + dst.SetString(e.String()) + } else { + dst.Set(reflect.New(e)) + } + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + dst.Set(tempValue) + } + } + if dst.Kind() != reflect.String && src.Elem().IsValid() { + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_crc.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_crc.go new file mode 100644 index 000000000..68aee396d --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_crc.go @@ -0,0 +1,140 @@ +package oss + +import ( + "hash" + "hash/crc64" +) + +// hashCRC64 represents the partial evaluation of a checksum. +type hashCRC64 struct { + init uint64 + crc uint64 + tab *crc64.Table +} + +// NewCRC64 NewCRC creates a new hash.Hash64 computing the CRC64 checksum +// using the polynomial represented by the Table. +func NewCRC64(init uint64) hash.Hash64 { + return &hashCRC64{ + init: init, + crc: init, + tab: crc64.MakeTable(crc64.ECMA), + } +} + +// Size returns the number of bytes sum will return. +func (d *hashCRC64) Size() int { + return crc64.Size +} + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (d *hashCRC64) BlockSize() int { + return 1 +} + +// Reset resets the hash to its initial state. +func (d *hashCRC64) Reset() { + d.crc = d.init +} + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (d *hashCRC64) Write(p []byte) (n int, err error) { + d.crc = crc64.Update(d.crc, d.tab, p) + return len(p), nil +} + +// Sum64 returns CRC64 value. +func (d *hashCRC64) Sum64() uint64 { + return d.crc +} + +// Sum returns hash value. +func (d *hashCRC64) Sum(in []byte) []byte { + s := d.Sum64() + return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// gf2Dim dimension of GF(2) vectors (length of CRC) +const gf2Dim int = 64 + +func gf2MatrixTimes(mat []uint64, vec uint64) uint64 { + var sum uint64 + for i := 0; vec != 0; i++ { + if vec&1 != 0 { + sum ^= mat[i] + } + + vec >>= 1 + } + return sum +} + +func gf2MatrixSquare(square []uint64, mat []uint64) { + for n := 0; n < gf2Dim; n++ { + square[n] = gf2MatrixTimes(mat, mat[n]) + } +} + +// CRC64Combine combines CRC64 +func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 { + var even [gf2Dim]uint64 // Even-power-of-two zeros operator + var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator + + // Degenerate case + if len2 == 0 { + return crc1 + } + + // Put operator for one zero bit in odd + odd[0] = crc64.ECMA // CRC64 polynomial + var row uint64 = 1 + for n := 1; n < gf2Dim; n++ { + odd[n] = row + row <<= 1 + } + + // Put operator for two zero bits in even + gf2MatrixSquare(even[:], odd[:]) + + // Put operator for four zero bits in odd + gf2MatrixSquare(odd[:], even[:]) + + // Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even + for { + // Apply zeros operator for this bit of len2 + gf2MatrixSquare(even[:], odd[:]) + + if len2&1 != 0 { + crc1 = gf2MatrixTimes(even[:], crc1) + } + + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + + // Another iteration of the loop with odd and even swapped + gf2MatrixSquare(odd[:], even[:]) + if len2&1 != 0 { + crc1 = gf2MatrixTimes(odd[:], crc1) + } + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + } + + // Return combined CRC + crc1 ^= crc2 + return crc1 +} + +var _ RequestBodyTracker = (*hashCRC64)(nil) diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_mime.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_mime.go new file mode 100644 index 000000000..450bc01d8 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_mime.go @@ -0,0 +1,595 @@ +package oss + +import ( + "mime" + "path" + "strings" +) + +var extToMimeType = map[string]string{ + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", + ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template", + ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", + ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12", + ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12", + ".apk": "application/vnd.android.package-archive", + ".hqx": "application/mac-binhex40", + ".cpt": "application/mac-compactpro", + ".doc": "application/msword", + ".ogg": "application/ogg", + ".pdf": "application/pdf", + ".rtf": "text/rtf", + ".mif": "application/vnd.mif", + ".xls": "application/vnd.ms-excel", + ".ppt": "application/vnd.ms-powerpoint", + ".odc": "application/vnd.oasis.opendocument.chart", + ".odb": "application/vnd.oasis.opendocument.database", + ".odf": "application/vnd.oasis.opendocument.formula", + ".odg": "application/vnd.oasis.opendocument.graphics", + ".otg": "application/vnd.oasis.opendocument.graphics-template", + ".odi": "application/vnd.oasis.opendocument.image", + ".odp": "application/vnd.oasis.opendocument.presentation", + ".otp": "application/vnd.oasis.opendocument.presentation-template", + ".ods": "application/vnd.oasis.opendocument.spreadsheet", + ".ots": "application/vnd.oasis.opendocument.spreadsheet-template", + ".odt": "application/vnd.oasis.opendocument.text", + ".odm": "application/vnd.oasis.opendocument.text-master", + ".ott": "application/vnd.oasis.opendocument.text-template", + ".oth": "application/vnd.oasis.opendocument.text-web", + ".sxw": "application/vnd.sun.xml.writer", + ".stw": "application/vnd.sun.xml.writer.template", + ".sxc": "application/vnd.sun.xml.calc", + ".stc": "application/vnd.sun.xml.calc.template", + ".sxd": "application/vnd.sun.xml.draw", + ".std": "application/vnd.sun.xml.draw.template", + ".sxi": "application/vnd.sun.xml.impress", + ".sti": "application/vnd.sun.xml.impress.template", + ".sxg": "application/vnd.sun.xml.writer.global", + ".sxm": "application/vnd.sun.xml.math", + ".sis": "application/vnd.symbian.install", + ".wbxml": "application/vnd.wap.wbxml", + ".wmlc": "application/vnd.wap.wmlc", + ".wmlsc": "application/vnd.wap.wmlscriptc", + ".bcpio": "application/x-bcpio", + ".torrent": "application/x-bittorrent", + ".bz2": "application/x-bzip2", + ".vcd": "application/x-cdlink", + ".pgn": "application/x-chess-pgn", + ".cpio": "application/x-cpio", + ".csh": "application/x-csh", + ".dvi": "application/x-dvi", + ".spl": "application/x-futuresplash", + ".gtar": "application/x-gtar", + ".hdf": "application/x-hdf", + ".jar": "application/x-java-archive", + ".jnlp": "application/x-java-jnlp-file", + ".js": "application/x-javascript", + ".ksp": "application/x-kspread", + ".chrt": "application/x-kchart", + ".kil": "application/x-killustrator", + ".latex": "application/x-latex", + ".rpm": "application/x-rpm", + ".sh": "application/x-sh", + ".shar": "application/x-shar", + ".swf": "application/x-shockwave-flash", + ".sit": "application/x-stuffit", + ".sv4cpio": "application/x-sv4cpio", + ".sv4crc": "application/x-sv4crc", + ".tar": "application/x-tar", + ".tcl": "application/x-tcl", + ".tex": "application/x-tex", + ".man": "application/x-troff-man", + ".me": "application/x-troff-me", + ".ms": "application/x-troff-ms", + ".ustar": "application/x-ustar", + ".src": "application/x-wais-source", + ".zip": "application/zip", + ".m3u": "audio/x-mpegurl", + ".ra": "audio/x-pn-realaudio", + ".wav": "audio/x-wav", + ".wma": "audio/x-ms-wma", + ".wax": "audio/x-ms-wax", + ".pdb": "chemical/x-pdb", + ".xyz": "chemical/x-xyz", + ".bmp": "image/bmp", + ".gif": "image/gif", + ".ief": "image/ief", + ".png": "image/png", + ".wbmp": "image/vnd.wap.wbmp", + ".ras": "image/x-cmu-raster", + ".pnm": "image/x-portable-anymap", + ".pbm": "image/x-portable-bitmap", + ".pgm": "image/x-portable-graymap", + ".ppm": "image/x-portable-pixmap", + ".rgb": "image/x-rgb", + ".xbm": "image/x-xbitmap", + ".xpm": "image/x-xpixmap", + ".xwd": "image/x-xwindowdump", + ".css": "text/css", + ".rtx": "text/richtext", + ".tsv": "text/tab-separated-values", + ".jad": "text/vnd.sun.j2me.app-descriptor", + ".wml": "text/vnd.wap.wml", + ".wmls": "text/vnd.wap.wmlscript", + ".etx": "text/x-setext", + ".mxu": "video/vnd.mpegurl", + ".flv": "video/x-flv", + ".wm": "video/x-ms-wm", + ".wmv": "video/x-ms-wmv", + ".wmx": "video/x-ms-wmx", + ".wvx": "video/x-ms-wvx", + ".avi": "video/x-msvideo", + ".movie": "video/x-sgi-movie", + ".ice": "x-conference/x-cooltalk", + ".3gp": "video/3gpp", + ".ai": "application/postscript", + ".aif": "audio/x-aiff", + ".aifc": "audio/x-aiff", + ".aiff": "audio/x-aiff", + ".asc": "text/plain", + ".atom": "application/atom+xml", + ".au": "audio/basic", + ".bin": "application/octet-stream", + ".cdf": "application/x-netcdf", + ".cgm": "image/cgm", + ".class": "application/octet-stream", + ".dcr": "application/x-director", + ".dif": "video/x-dv", + ".dir": "application/x-director", + ".djv": "image/vnd.djvu", + ".djvu": "image/vnd.djvu", + ".dll": "application/octet-stream", + ".dmg": "application/octet-stream", + ".dms": "application/octet-stream", + ".dtd": "application/xml-dtd", + ".dv": "video/x-dv", + ".dxr": "application/x-director", + ".eps": "application/postscript", + ".exe": "application/octet-stream", + ".ez": "application/andrew-inset", + ".gram": "application/srgs", + ".grxml": "application/srgs+xml", + ".gz": "application/x-gzip", + ".htm": "text/html", + ".html": "text/html", + ".ico": "image/x-icon", + ".ics": "text/calendar", + ".ifb": "text/calendar", + ".iges": "model/iges", + ".igs": "model/iges", + ".jp2": "image/jp2", + ".jpe": "image/jpeg", + ".jpeg": "image/jpeg", + ".jpg": "image/jpeg", + ".kar": "audio/midi", + ".lha": "application/octet-stream", + ".lzh": "application/octet-stream", + ".m4a": "audio/mp4a-latm", + ".m4p": "audio/mp4a-latm", + ".m4u": "video/vnd.mpegurl", + ".m4v": "video/x-m4v", + ".mac": "image/x-macpaint", + ".mathml": "application/mathml+xml", + ".mesh": "model/mesh", + ".mid": "audio/midi", + ".midi": "audio/midi", + ".mov": "video/quicktime", + ".mp2": "audio/mpeg", + ".mp3": "audio/mpeg", + ".mp4": "video/mp4", + ".mpe": "video/mpeg", + ".mpeg": "video/mpeg", + ".mpg": "video/mpeg", + ".mpga": "audio/mpeg", + ".msh": "model/mesh", + ".nc": "application/x-netcdf", + ".oda": "application/oda", + ".ogv": "video/ogv", + ".pct": "image/pict", + ".pic": "image/pict", + ".pict": "image/pict", + ".pnt": "image/x-macpaint", + ".pntg": "image/x-macpaint", + ".ps": "application/postscript", + ".qt": "video/quicktime", + ".qti": "image/x-quicktime", + ".qtif": "image/x-quicktime", + ".ram": "audio/x-pn-realaudio", + ".rdf": "application/rdf+xml", + ".rm": "application/vnd.rn-realmedia", + ".roff": "application/x-troff", + ".sgm": "text/sgml", + ".sgml": "text/sgml", + ".silo": "model/mesh", + ".skd": "application/x-koan", + ".skm": "application/x-koan", + ".skp": "application/x-koan", + ".skt": "application/x-koan", + ".smi": "application/smil", + ".smil": "application/smil", + ".snd": "audio/basic", + ".so": "application/octet-stream", + ".svg": "image/svg+xml", + ".t": "application/x-troff", + ".texi": "application/x-texinfo", + ".texinfo": "application/x-texinfo", + ".tif": "image/tiff", + ".tiff": "image/tiff", + ".tr": "application/x-troff", + ".txt": "text/plain", + ".vrml": "model/vrml", + ".vxml": "application/voicexml+xml", + ".webm": "video/webm", + ".wrl": "model/vrml", + ".xht": "application/xhtml+xml", + ".xhtml": "application/xhtml+xml", + ".xml": "application/xml", + ".xsl": "application/xml", + ".xslt": "application/xslt+xml", + ".xul": "application/vnd.mozilla.xul+xml", + ".webp": "image/webp", + ".323": "text/h323", + ".aab": "application/x-authoware-bin", + ".aam": "application/x-authoware-map", + ".aas": "application/x-authoware-seg", + ".acx": "application/internet-property-stream", + ".als": "audio/X-Alpha5", + ".amc": "application/x-mpeg", + ".ani": "application/octet-stream", + ".asd": "application/astound", + ".asf": "video/x-ms-asf", + ".asn": "application/astound", + ".asp": "application/x-asap", + ".asr": "video/x-ms-asf", + ".asx": "video/x-ms-asf", + ".avb": "application/octet-stream", + ".awb": "audio/amr-wb", + ".axs": "application/olescript", + ".bas": "text/plain", + ".bin ": "application/octet-stream", + ".bld": "application/bld", + ".bld2": "application/bld2", + ".bpk": "application/octet-stream", + ".c": "text/plain", + ".cal": "image/x-cals", + ".cat": "application/vnd.ms-pkiseccat", + ".ccn": "application/x-cnc", + ".cco": "application/x-cocoa", + ".cer": "application/x-x509-ca-cert", + ".cgi": "magnus-internal/cgi", + ".chat": "application/x-chat", + ".clp": "application/x-msclip", + ".cmx": "image/x-cmx", + ".co": "application/x-cult3d-object", + ".cod": "image/cis-cod", + ".conf": "text/plain", + ".cpp": "text/plain", + ".crd": "application/x-mscardfile", + ".crl": "application/pkix-crl", + ".crt": "application/x-x509-ca-cert", + ".csm": "chemical/x-csml", + ".csml": "chemical/x-csml", + ".cur": "application/octet-stream", + ".dcm": "x-lml/x-evm", + ".dcx": "image/x-dcx", + ".der": "application/x-x509-ca-cert", + ".dhtml": "text/html", + ".dot": "application/msword", + ".dwf": "drawing/x-dwf", + ".dwg": "application/x-autocad", + ".dxf": "application/x-autocad", + ".ebk": "application/x-expandedbook", + ".emb": "chemical/x-embl-dl-nucleotide", + ".embl": "chemical/x-embl-dl-nucleotide", + ".epub": "application/epub+zip", + ".eri": "image/x-eri", + ".es": "audio/echospeech", + ".esl": "audio/echospeech", + ".etc": "application/x-earthtime", + ".evm": "x-lml/x-evm", + ".evy": "application/envoy", + ".fh4": "image/x-freehand", + ".fh5": "image/x-freehand", + ".fhc": "image/x-freehand", + ".fif": "application/fractals", + ".flr": "x-world/x-vrml", + ".fm": "application/x-maker", + ".fpx": "image/x-fpx", + ".fvi": "video/isivideo", + ".gau": "chemical/x-gaussian-input", + ".gca": "application/x-gca-compressed", + ".gdb": "x-lml/x-gdb", + ".gps": "application/x-gps", + ".h": "text/plain", + ".hdm": "text/x-hdml", + ".hdml": "text/x-hdml", + ".hlp": "application/winhlp", + ".hta": "application/hta", + ".htc": "text/x-component", + ".hts": "text/html", + ".htt": "text/webviewhtml", + ".ifm": "image/gif", + ".ifs": "image/ifs", + ".iii": "application/x-iphone", + ".imy": "audio/melody", + ".ins": "application/x-internet-signup", + ".ips": "application/x-ipscript", + ".ipx": "application/x-ipix", + ".isp": "application/x-internet-signup", + ".it": "audio/x-mod", + ".itz": "audio/x-mod", + ".ivr": "i-world/i-vrml", + ".j2k": "image/j2k", + ".jam": "application/x-jam", + ".java": "text/plain", + ".jfif": "image/pipeg", + ".jpz": "image/jpeg", + ".jwc": "application/jwc", + ".kjx": "application/x-kjx", + ".lak": "x-lml/x-lak", + ".lcc": "application/fastman", + ".lcl": "application/x-digitalloca", + ".lcr": "application/x-digitalloca", + ".lgh": "application/lgh", + ".lml": "x-lml/x-lml", + ".lmlpack": "x-lml/x-lmlpack", + ".log": "text/plain", + ".lsf": "video/x-la-asf", + ".lsx": "video/x-la-asf", + ".m13": "application/x-msmediaview", + ".m14": "application/x-msmediaview", + ".m15": "audio/x-mod", + ".m3url": "audio/x-mpegurl", + ".m4b": "audio/mp4a-latm", + ".ma1": "audio/ma1", + ".ma2": "audio/ma2", + ".ma3": "audio/ma3", + ".ma5": "audio/ma5", + ".map": "magnus-internal/imagemap", + ".mbd": "application/mbedlet", + ".mct": "application/x-mascot", + ".mdb": "application/x-msaccess", + ".mdz": "audio/x-mod", + ".mel": "text/x-vmel", + ".mht": "message/rfc822", + ".mhtml": "message/rfc822", + ".mi": "application/x-mif", + ".mil": "image/x-cals", + ".mio": "audio/x-mio", + ".mmf": "application/x-skt-lbs", + ".mng": "video/x-mng", + ".mny": "application/x-msmoney", + ".moc": "application/x-mocha", + ".mocha": "application/x-mocha", + ".mod": "audio/x-mod", + ".mof": "application/x-yumekara", + ".mol": "chemical/x-mdl-molfile", + ".mop": "chemical/x-mopac-input", + ".mpa": "video/mpeg", + ".mpc": "application/vnd.mpohun.certificate", + ".mpg4": "video/mp4", + ".mpn": "application/vnd.mophun.application", + ".mpp": "application/vnd.ms-project", + ".mps": "application/x-mapserver", + ".mpv2": "video/mpeg", + ".mrl": "text/x-mrml", + ".mrm": "application/x-mrm", + ".msg": "application/vnd.ms-outlook", + ".mts": "application/metastream", + ".mtx": "application/metastream", + ".mtz": "application/metastream", + ".mvb": "application/x-msmediaview", + ".mzv": "application/metastream", + ".nar": "application/zip", + ".nbmp": "image/nbmp", + ".ndb": "x-lml/x-ndb", + ".ndwn": "application/ndwn", + ".nif": "application/x-nif", + ".nmz": "application/x-scream", + ".nokia-op-logo": "image/vnd.nok-oplogo-color", + ".npx": "application/x-netfpx", + ".nsnd": "audio/nsnd", + ".nva": "application/x-neva1", + ".nws": "message/rfc822", + ".oom": "application/x-AtlasMate-Plugin", + ".p10": "application/pkcs10", + ".p12": "application/x-pkcs12", + ".p7b": "application/x-pkcs7-certificates", + ".p7c": "application/x-pkcs7-mime", + ".p7m": "application/x-pkcs7-mime", + ".p7r": "application/x-pkcs7-certreqresp", + ".p7s": "application/x-pkcs7-signature", + ".pac": "audio/x-pac", + ".pae": "audio/x-epac", + ".pan": "application/x-pan", + ".pcx": "image/x-pcx", + ".pda": "image/x-pda", + ".pfr": "application/font-tdpfr", + ".pfx": "application/x-pkcs12", + ".pko": "application/ynd.ms-pkipko", + ".pm": "application/x-perl", + ".pma": "application/x-perfmon", + ".pmc": "application/x-perfmon", + ".pmd": "application/x-pmd", + ".pml": "application/x-perfmon", + ".pmr": "application/x-perfmon", + ".pmw": "application/x-perfmon", + ".pnz": "image/png", + ".pot,": "application/vnd.ms-powerpoint", + ".pps": "application/vnd.ms-powerpoint", + ".pqf": "application/x-cprplayer", + ".pqi": "application/cprplayer", + ".prc": "application/x-prc", + ".prf": "application/pics-rules", + ".prop": "text/plain", + ".proxy": "application/x-ns-proxy-autoconfig", + ".ptlk": "application/listenup", + ".pub": "application/x-mspublisher", + ".pvx": "video/x-pv-pvx", + ".qcp": "audio/vnd.qcelp", + ".r3t": "text/vnd.rn-realtext3d", + ".rar": "application/octet-stream", + ".rc": "text/plain", + ".rf": "image/vnd.rn-realflash", + ".rlf": "application/x-richlink", + ".rmf": "audio/x-rmf", + ".rmi": "audio/mid", + ".rmm": "audio/x-pn-realaudio", + ".rmvb": "audio/x-pn-realaudio", + ".rnx": "application/vnd.rn-realplayer", + ".rp": "image/vnd.rn-realpix", + ".rt": "text/vnd.rn-realtext", + ".rte": "x-lml/x-gps", + ".rtg": "application/metastream", + ".rv": "video/vnd.rn-realvideo", + ".rwc": "application/x-rogerwilco", + ".s3m": "audio/x-mod", + ".s3z": "audio/x-mod", + ".sca": "application/x-supercard", + ".scd": "application/x-msschedule", + ".sct": "text/scriptlet", + ".sdf": "application/e-score", + ".sea": "application/x-stuffit", + ".setpay": "application/set-payment-initiation", + ".setreg": "application/set-registration-initiation", + ".shtml": "text/html", + ".shtm": "text/html", + ".shw": "application/presentations", + ".si6": "image/si6", + ".si7": "image/vnd.stiwap.sis", + ".si9": "image/vnd.lgtwap.sis", + ".slc": "application/x-salsa", + ".smd": "audio/x-smd", + ".smp": "application/studiom", + ".smz": "audio/x-smd", + ".spc": "application/x-pkcs7-certificates", + ".spr": "application/x-sprite", + ".sprite": "application/x-sprite", + ".sdp": "application/sdp", + ".spt": "application/x-spt", + ".sst": "application/vnd.ms-pkicertstore", + ".stk": "application/hyperstudio", + ".stl": "application/vnd.ms-pkistl", + ".stm": "text/html", + ".svf": "image/vnd", + ".svh": "image/svh", + ".svr": "x-world/x-svr", + ".swfl": "application/x-shockwave-flash", + ".tad": "application/octet-stream", + ".talk": "text/x-speech", + ".taz": "application/x-tar", + ".tbp": "application/x-timbuktu", + ".tbt": "application/x-timbuktu", + ".tgz": "application/x-compressed", + ".thm": "application/vnd.eri.thm", + ".tki": "application/x-tkined", + ".tkined": "application/x-tkined", + ".toc": "application/toc", + ".toy": "image/toy", + ".trk": "x-lml/x-gps", + ".trm": "application/x-msterminal", + ".tsi": "audio/tsplayer", + ".tsp": "application/dsptype", + ".ttf": "application/octet-stream", + ".ttz": "application/t-time", + ".uls": "text/iuls", + ".ult": "audio/x-mod", + ".uu": "application/x-uuencode", + ".uue": "application/x-uuencode", + ".vcf": "text/x-vcard", + ".vdo": "video/vdo", + ".vib": "audio/vib", + ".viv": "video/vivo", + ".vivo": "video/vivo", + ".vmd": "application/vocaltec-media-desc", + ".vmf": "application/vocaltec-media-file", + ".vmi": "application/x-dreamcast-vms-info", + ".vms": "application/x-dreamcast-vms", + ".vox": "audio/voxware", + ".vqe": "audio/x-twinvq-plugin", + ".vqf": "audio/x-twinvq", + ".vql": "audio/x-twinvq", + ".vre": "x-world/x-vream", + ".vrt": "x-world/x-vrt", + ".vrw": "x-world/x-vream", + ".vts": "workbook/formulaone", + ".wcm": "application/vnd.ms-works", + ".wdb": "application/vnd.ms-works", + ".web": "application/vnd.xara", + ".wi": "image/wavelet", + ".wis": "application/x-InstallShield", + ".wks": "application/vnd.ms-works", + ".wmd": "application/x-ms-wmd", + ".wmf": "application/x-msmetafile", + ".wmlscript": "text/vnd.wap.wmlscript", + ".wmz": "application/x-ms-wmz", + ".wpng": "image/x-up-wpng", + ".wps": "application/vnd.ms-works", + ".wpt": "x-lml/x-gps", + ".wri": "application/x-mswrite", + ".wrz": "x-world/x-vrml", + ".ws": "text/vnd.wap.wmlscript", + ".wsc": "application/vnd.wap.wmlscriptc", + ".wv": "video/wavelet", + ".wxl": "application/x-wxl", + ".x-gzip": "application/x-gzip", + ".xaf": "x-world/x-vrml", + ".xar": "application/vnd.xara", + ".xdm": "application/x-xdma", + ".xdma": "application/x-xdma", + ".xdw": "application/vnd.fujixerox.docuworks", + ".xhtm": "application/xhtml+xml", + ".xla": "application/vnd.ms-excel", + ".xlc": "application/vnd.ms-excel", + ".xll": "application/x-excel", + ".xlm": "application/vnd.ms-excel", + ".xlt": "application/vnd.ms-excel", + ".xlw": "application/vnd.ms-excel", + ".xm": "audio/x-mod", + ".xmz": "audio/x-mod", + ".xof": "x-world/x-vrml", + ".xpi": "application/x-xpinstall", + ".xsit": "text/xml", + ".yz1": "application/x-yz1", + ".z": "application/x-compress", + ".zac": "application/x-zaurus-zac", + ".json": "application/json", +} + +// TypeByExtension returns the MIME type associated with the file extension ext. +// gets the file's MIME type for HTTP header Content-Type +func TypeByExtension(filePath string) string { + ext := path.Ext(filePath) + typ := mime.TypeByExtension(ext) + if typ == "" { + typ = extToMimeType[strings.ToLower(ext)] + } else { + if strings.HasPrefix(typ, "text/") && strings.Contains(typ, "charset=") { + typ = removeCharsetInMimeType(typ) + } + } + return typ +} + +// Remove charset from mime type +func removeCharsetInMimeType(typ string) (str string) { + temArr := strings.Split(typ, ";") + var builder strings.Builder + for i, s := range temArr { + tmpStr := strings.Trim(s, " ") + if strings.Contains(tmpStr, "charset=") { + continue + } + if i == 0 { + builder.WriteString(s) + } else { + builder.WriteString("; " + s) + } + } + return builder.String() +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_pool.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_pool.go new file mode 100644 index 000000000..55ea6bb64 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/utils_pool.go @@ -0,0 +1,248 @@ +package oss + +import ( + "context" + "fmt" + "sync" +) + +type byteSlicePool interface { + Get(context.Context) (*[]byte, error) + Put(*[]byte) + ModifyCapacity(int) + SliceSize() int64 + Close() +} + +type maxSlicePool struct { + allocator sliceAllocator + + slices chan *[]byte + allocations chan struct{} + capacityChange chan struct{} + + max int + sliceSize int64 + + mtx sync.RWMutex +} + +func newMaxSlicePool(sliceSize int64) *maxSlicePool { + p := &maxSlicePool{sliceSize: sliceSize} + p.allocator = p.newSlice + + return p +} + +var errZeroCapacity = fmt.Errorf("get called on zero capacity pool") + +func (p *maxSlicePool) Get(ctx context.Context) (*[]byte, error) { + // check if context is canceled before attempting to get a slice + // this ensures priority is given to the cancel case first + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + p.mtx.RLock() + + for { + select { + case bs, ok := <-p.slices: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return bs, nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // pass + } + + select { + case _, ok := <-p.allocations: + p.mtx.RUnlock() + if !ok { + // attempt to get on a zero capacity pool + return nil, errZeroCapacity + } + return p.allocator(), nil + case <-ctx.Done(): + p.mtx.RUnlock() + return nil, ctx.Err() + default: + // In the event that there are no slices or allocations available + // This prevents some deadlock situations that can occur around sync.RWMutex + // When a lock request occurs on ModifyCapacity, no new readers are allowed to acquire a read lock. + // By releasing the read lock here and waiting for a notification, we prevent a deadlock situation where + // Get could hold the read lock indefinitely waiting for capacity, ModifyCapacity is waiting for a write lock, + // and a Put is blocked trying to get a read-lock which is blocked by ModifyCapacity. + + // Short-circuit if the pool capacity is zero. + if p.max == 0 { + p.mtx.RUnlock() + return nil, errZeroCapacity + } + + // Since we will be releasing the read-lock we need to take the reference to the channel. + // Since channels are references we will still get notified if slices are added, or if + // the channel is closed due to a capacity modification. This specifically avoids a data race condition + // where ModifyCapacity both closes a channel and initializes a new one while we don't have a read-lock. + c := p.capacityChange + + p.mtx.RUnlock() + + select { + case _ = <-c: + p.mtx.RLock() + case <-ctx.Done(): + return nil, ctx.Err() + } + } + } +} + +func (p *maxSlicePool) Put(bs *[]byte) { + p.mtx.RLock() + defer p.mtx.RUnlock() + + if p.max == 0 { + return + } + + select { + case p.slices <- bs: + p.notifyCapacity() + default: + // If the new channel when attempting to add the slice then we drop the slice. + // The logic here is to prevent a deadlock situation if channel is already at max capacity. + // Allows us to reap allocations that are returned and are no longer needed. + } +} + +func (p *maxSlicePool) ModifyCapacity(delta int) { + if delta == 0 { + return + } + + p.mtx.Lock() + defer p.mtx.Unlock() + + p.max += delta + + if p.max == 0 { + p.empty() + return + } + + if p.capacityChange != nil { + close(p.capacityChange) + } + p.capacityChange = make(chan struct{}, p.max) + + origAllocations := p.allocations + p.allocations = make(chan struct{}, p.max) + + newAllocs := len(origAllocations) + delta + for i := 0; i < newAllocs; i++ { + p.allocations <- struct{}{} + } + + if origAllocations != nil { + close(origAllocations) + } + + origSlices := p.slices + p.slices = make(chan *[]byte, p.max) + if origSlices == nil { + return + } + + close(origSlices) + for bs := range origSlices { + select { + case p.slices <- bs: + default: + // If the new channel blocks while adding slices from the old channel + // then we drop the slice. The logic here is to prevent a deadlock situation + // if the new channel has a smaller capacity then the old. + } + } +} + +func (p *maxSlicePool) notifyCapacity() { + select { + case p.capacityChange <- struct{}{}: + default: + // This *shouldn't* happen as the channel is both buffered to the max pool capacity size and is resized + // on capacity modifications. This is just a safety to ensure that a blocking situation can't occur. + } +} + +func (p *maxSlicePool) SliceSize() int64 { + return p.sliceSize +} + +func (p *maxSlicePool) Close() { + p.mtx.Lock() + defer p.mtx.Unlock() + p.empty() +} + +func (p *maxSlicePool) empty() { + p.max = 0 + + if p.capacityChange != nil { + close(p.capacityChange) + p.capacityChange = nil + } + + if p.allocations != nil { + close(p.allocations) + for range p.allocations { + // drain channel + } + p.allocations = nil + } + + if p.slices != nil { + close(p.slices) + for range p.slices { + // drain channel + } + p.slices = nil + } +} + +func (p *maxSlicePool) newSlice() *[]byte { + bs := make([]byte, p.sliceSize) + return &bs +} + +type returnCapacityPoolCloser struct { + byteSlicePool + returnCapacity int +} + +func (n *returnCapacityPoolCloser) ModifyCapacity(delta int) { + if delta > 0 { + n.returnCapacity = -1 * delta + } + n.byteSlicePool.ModifyCapacity(delta) +} + +func (n *returnCapacityPoolCloser) Close() { + if n.returnCapacity < 0 { + n.byteSlicePool.ModifyCapacity(n.returnCapacity) + } +} + +type sliceAllocator func() *[]byte + +var newByteSlicePool = func(sliceSize int64) byteSlicePool { + return newMaxSlicePool(sliceSize) +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/validation.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/validation.go new file mode 100644 index 000000000..e91f1c6be --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/validation.go @@ -0,0 +1,84 @@ +package oss + +import ( + "net/url" + "strings" +) + +func isValidRegion(region string) bool { + for _, v := range region { + if !(('a' <= v && v <= 'z') || ('0' <= v && v <= '9') || v == '-') { + return false + } + } + return region != "" +} + +func isValidEndpoint(endpoint *url.URL) bool { + return (endpoint != nil) +} + +func isValidBucketName(bucketName *string) bool { + if bucketName == nil { + return false + } + + nameLen := len(*bucketName) + if nameLen < 3 || nameLen > 63 { + return false + } + + if (*bucketName)[0] == '-' || (*bucketName)[nameLen-1] == '-' { + return false + } + + for _, v := range *bucketName { + if !(('a' <= v && v <= 'z') || ('0' <= v && v <= '9') || v == '-') { + return false + } + } + return true +} + +func isValidObjectName(objectName *string) bool { + if objectName == nil || len(*objectName) == 0 { + return false + } + return true +} + +func isValidRange(r *string) bool { + if _, err := ParseRange(*r); err != nil { + return false + } + return true +} + +var supportedMethod = map[string]struct{}{ + "GET": {}, + "PUT": {}, + "HEAD": {}, + "POST": {}, + "DELETE": {}, + "OPTIONS": {}, +} + +func isValidMethod(method string) bool { + if _, ok := supportedMethod[method]; ok { + return true + } + return false +} + +var supportedCopyDirective = map[string]struct{}{ + "COPY": {}, + "REPLACE": {}, +} + +func isValidCopyDirective(value string) bool { + upper := strings.ToUpper(value) + if _, ok := supportedCopyDirective[upper]; ok { + return true + } + return false +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/version.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/version.go new file mode 100644 index 000000000..0f2dd3057 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/version.go @@ -0,0 +1,34 @@ +package oss + +import ( + "bytes" + "fmt" + "sync" +) + +const ( + major = "1" + minor = "2" + patch = "3" + tag = "" + + SdkName = "alibabacloud-go-sdk-v2" +) + +var once sync.Once +var version string + +func Version() string { + once.Do(func() { + ver := fmt.Sprintf("%s.%s.%s", major, minor, patch) + verBuilder := bytes.NewBufferString(ver) + if tag != "" && tag != "-" { + _, err := verBuilder.WriteString(tag) + if err != nil { + verBuilder = bytes.NewBufferString(ver) + } + } + version = verBuilder.String() + }) + return version +} diff --git a/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/xml_utils.go b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/xml_utils.go new file mode 100644 index 000000000..34aaa48f8 --- /dev/null +++ b/vendor/github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/xml_utils.go @@ -0,0 +1,246 @@ +package oss + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "strings" + "unicode" + "unicode/utf8" +) + +type XmlDecoderLite struct { + reader io.Reader + attributePrefix string + useRawToken bool +} + +func NewXmlDecoderLite(r io.Reader) *XmlDecoderLite { + return &XmlDecoderLite{ + reader: r, + attributePrefix: "+@", + useRawToken: true, + } +} + +func (dec *XmlDecoderLite) Decode(root *XmlNode) error { + return dec.decodeXML(root) +} + +type XmlNode struct { + Children []*XmlChildren + Data []string +} + +type XmlChildren struct { + K string + V []*XmlNode +} + +func (n *XmlNode) addChild(s string, c *XmlNode) { + if n.Children == nil { + n.Children = make([]*XmlChildren, 0) + } + for _, childEntry := range n.Children { + if childEntry.K == s { + childEntry.V = append(childEntry.V, c) + return + } + } + n.Children = append(n.Children, &XmlChildren{K: s, V: []*XmlNode{c}}) +} + +func (n *XmlNode) value() any { + if len(n.Children) > 0 { + return n.GetMap() + } + if n.Data != nil { + return n.Data[0] + } + return nil +} + +func (n *XmlNode) GetMap() map[string]any { + node := map[string]any{} + for _, kv := range n.Children { + label := kv.K + children := kv.V + if len(children) > 1 { + vals := make([]any, 0) + for _, child := range children { + vals = append(vals, child.value()) + } + node[label] = vals + } else { + node[label] = children[0].value() + } + } + return node +} + +type element struct { + parent *element + n *XmlNode + label string +} + +func (dec *XmlDecoderLite) decodeXML(root *XmlNode) error { + xmlDec := xml.NewDecoder(dec.reader) + + started := false + + // Create first element from the root node + elem := &element{ + parent: nil, + n: root, + } + + getToken := func() (xml.Token, error) { + if dec.useRawToken { + return xmlDec.RawToken() + } + return xmlDec.Token() + } + + for { + t, e := getToken() + if e != nil && !errors.Is(e, io.EOF) { + return e + } + if t == nil { + break + } + + switch se := t.(type) { + case xml.StartElement: + elem = &element{ + parent: elem, + n: &XmlNode{}, + label: se.Name.Local, + } + + for _, a := range se.Attr { + elem.n.addChild(dec.attributePrefix+a.Name.Local, &XmlNode{Data: []string{a.Value}}) + } + case xml.CharData: + newBit := trimNonGraphic(string(se)) + if !started && len(newBit) > 0 { + return fmt.Errorf("invalid XML: Encountered chardata [%v] outside of XML node", newBit) + } + + if len(newBit) > 0 { + elem.n.Data = append(elem.n.Data, newBit) + } + case xml.EndElement: + if elem.parent != nil { + elem.parent.n.addChild(elem.label, elem.n) + } + elem = elem.parent + } + started = true + } + + return nil +} + +func trimNonGraphic(s string) string { + if s == "" { + return s + } + + var first *int + var last int + for i, r := range []rune(s) { + if !unicode.IsGraphic(r) || unicode.IsSpace(r) { + continue + } + + if first == nil { + f := i + first = &f + last = i + } else { + last = i + } + } + + if first == nil { + return "" + } + + return string([]rune(s)[*first : last+1]) +} + +var ( + escQuot = []byte(""") // shorter than """ + escApos = []byte("'") // shorter than "'" + escAmp = []byte("&") + escLT = []byte("<") + escGT = []byte(">") + escTab = []byte(" ") + escNL = []byte(" ") + escCR = []byte(" ") + escFFFD = []byte("\uFFFD") // Unicode replacement character +) + +// escapeXml EscapeString writes to p the properly escaped XML equivalent +// of the plain text data s. +func escapeXml(s string) string { + var p strings.Builder + var esc []byte + hextable := "0123456789ABCDEF" + escPattern := []byte("�") + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRuneInString(s[i:]) + i += width + switch r { + case '"': + esc = escQuot + case '\'': + esc = escApos + case '&': + esc = escAmp + case '<': + esc = escLT + case '>': + esc = escGT + case '\t': + esc = escTab + case '\n': + esc = escNL + case '\r': + esc = escCR + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + if r >= 0x00 && r < 0x20 { + escPattern[3] = hextable[r>>4] + escPattern[4] = hextable[r&0x0f] + esc = escPattern + } else { + esc = escFFFD + } + break + } + continue + } + p.WriteString(s[last : i-width]) + p.Write(esc) + last = i + } + p.WriteString(s[last:]) + return p.String() +} + +// Decide whether the given rune is in the XML Character Range, per +// the Char production of https://www.xml.com/axml/testaxml.htm, +// Section 2.2 Characters. +func isInCharacterRange(r rune) (inrange bool) { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xD7FF || + r >= 0xE000 && r <= 0xFFFD || + r >= 0x10000 && r <= 0x10FFFF +} diff --git a/vendor/github.com/aliyun/credentials-go/LICENSE b/vendor/github.com/aliyun/credentials-go/LICENSE new file mode 100644 index 000000000..0c44dcefe --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aliyun/credentials-go/credentials/internal/http/http.go b/vendor/github.com/aliyun/credentials-go/credentials/internal/http/http.go new file mode 100644 index 000000000..3d5ed0f2d --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/internal/http/http.go @@ -0,0 +1,145 @@ +package http + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/alibabacloud-go/debug/debug" + "github.com/aliyun/credentials-go/credentials/internal/utils" +) + +type Request struct { + Method string // http request method + URL string // http url + Protocol string // http or https + Host string // http host + ReadTimeout time.Duration + ConnectTimeout time.Duration + Proxy string // http proxy + Form map[string]string // http form + Body []byte // request body for JSON or stream + Path string + Queries map[string]string + Headers map[string]string +} + +func (req *Request) BuildRequestURL() string { + httpUrl := fmt.Sprintf("%s://%s%s", req.Protocol, req.Host, req.Path) + if req.URL != "" { + httpUrl = req.URL + } + + querystring := utils.GetURLFormedMap(req.Queries) + if querystring != "" { + httpUrl = httpUrl + "?" + querystring + } + + return fmt.Sprintf("%s %s", req.Method, httpUrl) +} + +type Response struct { + StatusCode int + Headers map[string]string + Body []byte +} + +var newRequest = http.NewRequest + +type do func(req *http.Request) (*http.Response, error) + +var hookDo = func(fn do) do { + return fn +} + +var debuglog = debug.Init("credential") + +func Do(req *Request) (res *Response, err error) { + querystring := utils.GetURLFormedMap(req.Queries) + // do request + httpUrl := fmt.Sprintf("%s://%s%s?%s", req.Protocol, req.Host, req.Path, querystring) + if req.URL != "" { + httpUrl = req.URL + } + + var body io.Reader + if req.Method == "GET" { + body = strings.NewReader("") + } else if req.Body != nil { + body = bytes.NewReader(req.Body) + } else { + body = strings.NewReader(utils.GetURLFormedMap(req.Form)) + } + + httpRequest, err := newRequest(req.Method, httpUrl, body) + if err != nil { + return + } + + if req.Form != nil { + httpRequest.Header["Content-Type"] = []string{"application/x-www-form-urlencoded"} + } + + for key, value := range req.Headers { + if value != "" { + debuglog("> %s: %s", key, value) + httpRequest.Header.Set(key, value) + } + } + + httpClient := &http.Client{} + + if req.ReadTimeout != 0 { + httpClient.Timeout = req.ReadTimeout + req.ConnectTimeout + } + + transport := http.DefaultTransport.(*http.Transport).Clone() + if req.Proxy != "" { + var proxy *url.URL + proxy, err = url.Parse(req.Proxy) + if err != nil { + return + } + transport.Proxy = http.ProxyURL(proxy) + } + + if req.ConnectTimeout != 0 { + transport.DialContext = func(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{ + Timeout: req.ConnectTimeout, + DualStack: true, + }).DialContext(ctx, network, address) + } + } + + httpClient.Transport = transport + + httpResponse, err := hookDo(httpClient.Do)(httpRequest) + if err != nil { + return + } + + defer httpResponse.Body.Close() + + responseBody, err := ioutil.ReadAll(httpResponse.Body) + if err != nil { + return + } + res = &Response{ + StatusCode: httpResponse.StatusCode, + Headers: make(map[string]string), + Body: responseBody, + } + for key, v := range httpResponse.Header { + res.Headers[key] = v[0] + } + + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/path.go b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/path.go new file mode 100644 index 000000000..a94088c6b --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/path.go @@ -0,0 +1,18 @@ +package utils + +import ( + "os" + "runtime" +) + +var getOS = func() string { + return runtime.GOOS +} + +func GetHomePath() string { + if getOS() == "windows" { + return os.Getenv("USERPROFILE") + } + + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/runtime.go b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/runtime.go new file mode 100644 index 000000000..432395cf4 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/runtime.go @@ -0,0 +1,36 @@ +package utils + +import ( + "context" + "net" + "time" +) + +// Runtime is for setting timeout, proxy and host +type Runtime struct { + ReadTimeout int + ConnectTimeout int + Proxy string + Host string + STSEndpoint string +} + +// NewRuntime returns a Runtime +func NewRuntime(readTimeout, connectTimeout int, proxy string, host string) *Runtime { + return &Runtime{ + ReadTimeout: readTimeout, + ConnectTimeout: connectTimeout, + Proxy: proxy, + Host: host, + } +} + +// Timeout is for connect Timeout +func Timeout(connectTimeout time.Duration) func(cxt context.Context, net, addr string) (c net.Conn, err error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{ + Timeout: connectTimeout, + DualStack: true, + }).DialContext(ctx, network, address) + } +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/utils.go b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/utils.go new file mode 100644 index 000000000..fffee1eda --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/internal/utils/utils.go @@ -0,0 +1,204 @@ +package utils + +import ( + "bytes" + "crypto" + "crypto/hmac" + "crypto/md5" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + mathrand "math/rand" + "net/url" + "os" + "runtime" + "strconv" + "sync/atomic" + "time" +) + +type uuid [16]byte + +const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +var hookRead = func(fn func(p []byte) (n int, err error)) func(p []byte) (n int, err error) { + return fn +} + +var hookRSA = func(fn func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)) func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) { + return fn +} + +// GetUUID returns a uuid +func GetUUID() (uuidHex string) { + uuid := newUUID() + uuidHex = hex.EncodeToString(uuid[:]) + return +} + +// RandStringBytes returns a rand string +func RandStringBytes(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[mathrand.Intn(len(letterBytes))] + } + return string(b) +} + +// ShaHmac1 return a string which has been hashed +func ShaHmac1(source, secret string) string { + key := []byte(secret) + hmac := hmac.New(sha1.New, key) + hmac.Write([]byte(source)) + signedBytes := hmac.Sum(nil) + signedString := base64.StdEncoding.EncodeToString(signedBytes) + return signedString +} + +// Sha256WithRsa return a string which has been hashed with Rsa +func Sha256WithRsa(source, secret string) string { + decodeString, err := base64.StdEncoding.DecodeString(secret) + if err != nil { + panic(err) + } + private, err := x509.ParsePKCS8PrivateKey(decodeString) + if err != nil { + panic(err) + } + + h := crypto.Hash.New(crypto.SHA256) + h.Write([]byte(source)) + hashed := h.Sum(nil) + signature, err := hookRSA(rsa.SignPKCS1v15)(rand.Reader, private.(*rsa.PrivateKey), + crypto.SHA256, hashed) + if err != nil { + panic(err) + } + + return base64.StdEncoding.EncodeToString(signature) +} + +// GetMD5Base64 returns a string which has been base64 +func GetMD5Base64(bytes []byte) (base64Value string) { + md5Ctx := md5.New() + md5Ctx.Write(bytes) + md5Value := md5Ctx.Sum(nil) + base64Value = base64.StdEncoding.EncodeToString(md5Value) + return +} + +// GetTimeInFormatISO8601 returns a time string +func GetTimeInFormatISO8601() (timeStr string) { + gmt := time.FixedZone("GMT", 0) + + return time.Now().In(gmt).Format("2006-01-02T15:04:05Z") +} + +// GetURLFormedMap returns a url encoded string +func GetURLFormedMap(source map[string]string) (urlEncoded string) { + urlEncoder := url.Values{} + for key, value := range source { + urlEncoder.Add(key, value) + } + urlEncoded = urlEncoder.Encode() + return +} + +func newUUID() uuid { + ns := uuid{} + safeRandom(ns[:]) + u := newFromHash(md5.New(), ns, RandStringBytes(16)) + u[6] = (u[6] & 0x0f) | (byte(2) << 4) + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + + return u +} + +func newFromHash(h hash.Hash, ns uuid, name string) uuid { + u := uuid{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +func safeRandom(dest []byte) { + if _, err := hookRead(rand.Read)(dest); err != nil { + panic(err) + } +} + +func (u uuid) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +var processStartTime int64 = time.Now().UnixNano() / 1e6 +var seqId int64 = 0 + +func getGID() uint64 { + // https://blog.sgmansfield.com/2015/12/goroutine-ids/ + b := make([]byte, 64) + b = b[:runtime.Stack(b, false)] + b = bytes.TrimPrefix(b, []byte("goroutine ")) + b = b[:bytes.IndexByte(b, ' ')] + n, _ := strconv.ParseUint(string(b), 10, 64) + return n +} + +func GetNonce() (uuidHex string) { + routineId := getGID() + currentTime := time.Now().UnixNano() / 1e6 + seq := atomic.AddInt64(&seqId, 1) + randNum := mathrand.Int63() + msg := fmt.Sprintf("%d-%d-%d-%d-%d", processStartTime, routineId, currentTime, seq, randNum) + h := md5.New() + h.Write([]byte(msg)) + return hex.EncodeToString(h.Sum(nil)) +} + +// Get first non-empty value +func GetDefaultString(values ...string) string { + for _, v := range values { + if v != "" { + return v + } + } + + return "" +} + +// set back the memoried enviroment variables +type Rollback func() + +func Memory(keys ...string) Rollback { + // remenber enviroment variables + m := make(map[string]string) + for _, key := range keys { + m[key] = os.Getenv(key) + } + + return func() { + for _, key := range keys { + os.Setenv(key, m[key]) + } + } +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/cli_profile.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/cli_profile.go new file mode 100644 index 000000000..facac3181 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/cli_profile.go @@ -0,0 +1,266 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/aliyun/credentials-go/credentials/internal/utils" +) + +type CLIProfileCredentialsProvider struct { + profileFile string + profileName string + innerProvider CredentialsProvider +} + +type CLIProfileCredentialsProviderBuilder struct { + provider *CLIProfileCredentialsProvider +} + +func (b *CLIProfileCredentialsProviderBuilder) WithProfileFile(profileFile string) *CLIProfileCredentialsProviderBuilder { + b.provider.profileFile = profileFile + return b +} + +func (b *CLIProfileCredentialsProviderBuilder) WithProfileName(profileName string) *CLIProfileCredentialsProviderBuilder { + b.provider.profileName = profileName + return b +} + +func (b *CLIProfileCredentialsProviderBuilder) Build() (provider *CLIProfileCredentialsProvider, err error) { + // 优先级: + // 1. 使用显示指定的 profileFile + // 2. 使用环境变量(ALIBABA_CLOUD_CONFIG_FILE)指定的 profileFile + // 3. 兜底使用 path.Join(homeDir, ".aliyun/config") 作为 profileFile + if b.provider.profileFile == "" { + b.provider.profileFile = os.Getenv("ALIBABA_CLOUD_CONFIG_FILE") + } + // 优先级: + // 1. 使用显示指定的 profileName + // 2. 使用环境变量(ALIBABA_CLOUD_PROFILE)制定的 profileName + // 3. 使用 CLI 配置中的当前 profileName + if b.provider.profileName == "" { + b.provider.profileName = os.Getenv("ALIBABA_CLOUD_PROFILE") + } + + if strings.ToLower(os.Getenv("ALIBABA_CLOUD_CLI_PROFILE_DISABLED")) == "true" { + err = errors.New("the CLI profile is disabled") + return + } + + provider = b.provider + return +} + +func NewCLIProfileCredentialsProviderBuilder() *CLIProfileCredentialsProviderBuilder { + return &CLIProfileCredentialsProviderBuilder{ + provider: &CLIProfileCredentialsProvider{}, + } +} + +type profile struct { + Name string `json:"name"` + Mode string `json:"mode"` + AccessKeyID string `json:"access_key_id"` + AccessKeySecret string `json:"access_key_secret"` + SecurityToken string `json:"sts_token"` + RegionID string `json:"region_id"` + RoleArn string `json:"ram_role_arn"` + RoleSessionName string `json:"ram_session_name"` + DurationSeconds int `json:"expired_seconds"` + StsRegion string `json:"sts_region"` + EnableVpc bool `json:"enable_vpc"` + SourceProfile string `json:"source_profile"` + RoleName string `json:"ram_role_name"` + OIDCTokenFile string `json:"oidc_token_file"` + OIDCProviderARN string `json:"oidc_provider_arn"` + Policy string `json:"policy"` + ExternalId string `json:"external_id"` + SignInUrl string `json:"cloud_sso_sign_in_url"` + AccountId string `json:"cloud_sso_account_id"` + AccessConfig string `json:"cloud_sso_access_config"` + AccessToken string `json:"access_token"` + AccessTokenExpire int64 `json:"cloud_sso_access_token_expire"` +} + +type configuration struct { + Current string `json:"current"` + Profiles []*profile `json:"profiles"` +} + +func newConfigurationFromPath(cfgPath string) (conf *configuration, err error) { + bytes, err := ioutil.ReadFile(cfgPath) + if err != nil { + err = fmt.Errorf("reading aliyun cli config from '%s' failed %v", cfgPath, err) + return + } + + conf = &configuration{} + + err = json.Unmarshal(bytes, conf) + if err != nil { + err = fmt.Errorf("unmarshal aliyun cli config from '%s' failed: %s", cfgPath, string(bytes)) + return + } + + if conf.Profiles == nil || len(conf.Profiles) == 0 { + err = fmt.Errorf("no any configured profiles in '%s'", cfgPath) + return + } + + return +} + +func (conf *configuration) getProfile(name string) (profile *profile, err error) { + for _, p := range conf.Profiles { + if p.Name == name { + profile = p + return + } + } + + err = fmt.Errorf("unable to get profile with '%s'", name) + return +} + +func (provider *CLIProfileCredentialsProvider) getCredentialsProvider(conf *configuration, profileName string) (credentialsProvider CredentialsProvider, err error) { + p, err := conf.getProfile(profileName) + if err != nil { + return + } + + switch p.Mode { + case "AK": + credentialsProvider, err = NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(p.AccessKeyID). + WithAccessKeySecret(p.AccessKeySecret). + Build() + case "StsToken": + credentialsProvider, err = NewStaticSTSCredentialsProviderBuilder(). + WithAccessKeyId(p.AccessKeyID). + WithAccessKeySecret(p.AccessKeySecret). + WithSecurityToken(p.SecurityToken). + Build() + case "RamRoleArn": + previousProvider, err1 := NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(p.AccessKeyID). + WithAccessKeySecret(p.AccessKeySecret). + Build() + if err1 != nil { + return nil, err1 + } + + credentialsProvider, err = NewRAMRoleARNCredentialsProviderBuilder(). + WithCredentialsProvider(previousProvider). + WithRoleArn(p.RoleArn). + WithRoleSessionName(p.RoleSessionName). + WithDurationSeconds(p.DurationSeconds). + WithStsRegionId(p.StsRegion). + WithEnableVpc(p.EnableVpc). + WithPolicy(p.Policy). + WithExternalId(p.ExternalId). + Build() + case "EcsRamRole": + credentialsProvider, err = NewECSRAMRoleCredentialsProviderBuilder().WithRoleName(p.RoleName).Build() + case "OIDC": + credentialsProvider, err = NewOIDCCredentialsProviderBuilder(). + WithOIDCTokenFilePath(p.OIDCTokenFile). + WithOIDCProviderARN(p.OIDCProviderARN). + WithRoleArn(p.RoleArn). + WithStsRegionId(p.StsRegion). + WithEnableVpc(p.EnableVpc). + WithDurationSeconds(p.DurationSeconds). + WithRoleSessionName(p.RoleSessionName). + WithPolicy(p.Policy). + Build() + case "ChainableRamRoleArn": + previousProvider, err1 := provider.getCredentialsProvider(conf, p.SourceProfile) + if err1 != nil { + err = fmt.Errorf("get source profile failed: %s", err1.Error()) + return + } + credentialsProvider, err = NewRAMRoleARNCredentialsProviderBuilder(). + WithCredentialsProvider(previousProvider). + WithRoleArn(p.RoleArn). + WithRoleSessionName(p.RoleSessionName). + WithDurationSeconds(p.DurationSeconds). + WithStsRegionId(p.StsRegion). + WithEnableVpc(p.EnableVpc). + WithPolicy(p.Policy). + WithExternalId(p.ExternalId). + Build() + case "CloudSSO": + credentialsProvider, err = NewCloudSSOCredentialsProviderBuilder(). + WithSignInUrl(p.SignInUrl). + WithAccountId(p.AccountId). + WithAccessConfig(p.AccessConfig). + WithAccessToken(p.AccessToken). + WithAccessTokenExpire(p.AccessTokenExpire). + Build() + default: + err = fmt.Errorf("unsupported profile mode '%s'", p.Mode) + } + + return +} + +// 默认设置为 GetHomePath,测试时便于 mock +var getHomePath = utils.GetHomePath + +func (provider *CLIProfileCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.innerProvider == nil { + cfgPath := provider.profileFile + if cfgPath == "" { + homeDir := getHomePath() + if homeDir == "" { + err = fmt.Errorf("cannot found home dir") + return + } + + cfgPath = path.Join(homeDir, ".aliyun/config.json") + } + + conf, err1 := newConfigurationFromPath(cfgPath) + if err1 != nil { + err = err1 + return + } + + if provider.profileName == "" { + provider.profileName = conf.Current + } + + provider.innerProvider, err = provider.getCredentialsProvider(conf, provider.profileName) + if err != nil { + return + } + } + + innerCC, err := provider.innerProvider.GetCredentials() + if err != nil { + return + } + + providerName := innerCC.ProviderName + if providerName == "" { + providerName = provider.innerProvider.GetProviderName() + } + + cc = &Credentials{ + AccessKeyId: innerCC.AccessKeyId, + AccessKeySecret: innerCC.AccessKeySecret, + SecurityToken: innerCC.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), providerName), + } + + return +} + +func (provider *CLIProfileCredentialsProvider) GetProviderName() string { + return "cli_profile" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/cloud_sso.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/cloud_sso.go new file mode 100644 index 000000000..7bc29b243 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/cloud_sso.go @@ -0,0 +1,216 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" +) + +type CloudSSOCredentialsProvider struct { + signInUrl string + accountId string + accessConfig string + accessToken string + accessTokenExpire int64 + + lastUpdateTimestamp int64 + expirationTimestamp int64 + sessionCredentials *sessionCredentials + // for http options + httpOptions *HttpOptions +} + +type CloudSSOCredentialsProviderBuilder struct { + provider *CloudSSOCredentialsProvider +} + +type cloudCredentialOptions struct { + AccountId string `json:"AccountId"` + AccessConfigurationId string `json:"AccessConfigurationId"` +} + +type cloudCredentials struct { + AccessKeyId string `json:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken"` + Expiration string `json:"Expiration"` +} + +type cloudCredentialResponse struct { + CloudCredential *cloudCredentials `json:"CloudCredential"` + RequestId string `json:"RequestId"` +} + +func NewCloudSSOCredentialsProviderBuilder() *CloudSSOCredentialsProviderBuilder { + return &CloudSSOCredentialsProviderBuilder{ + provider: &CloudSSOCredentialsProvider{}, + } +} + +func (b *CloudSSOCredentialsProviderBuilder) WithSignInUrl(signInUrl string) *CloudSSOCredentialsProviderBuilder { + b.provider.signInUrl = signInUrl + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithAccountId(accountId string) *CloudSSOCredentialsProviderBuilder { + b.provider.accountId = accountId + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithAccessConfig(accessConfig string) *CloudSSOCredentialsProviderBuilder { + b.provider.accessConfig = accessConfig + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithAccessToken(accessToken string) *CloudSSOCredentialsProviderBuilder { + b.provider.accessToken = accessToken + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithAccessTokenExpire(accessTokenExpire int64) *CloudSSOCredentialsProviderBuilder { + b.provider.accessTokenExpire = accessTokenExpire + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *CloudSSOCredentialsProviderBuilder { + b.provider.httpOptions = httpOptions + return b +} + +func (b *CloudSSOCredentialsProviderBuilder) Build() (provider *CloudSSOCredentialsProvider, err error) { + if b.provider.accessToken == "" || b.provider.accessTokenExpire == 0 || b.provider.accessTokenExpire-time.Now().Unix() <= 0 { + err = errors.New("CloudSSO access token is empty or expired, please re-login with cli") + return + } + + if b.provider.signInUrl == "" || b.provider.accountId == "" || b.provider.accessConfig == "" { + err = errors.New("CloudSSO sign in url or account id or access config is empty") + return + } + + provider = b.provider + return +} + +func (provider *CloudSSOCredentialsProvider) getCredentials() (session *sessionCredentials, err error) { + url, err := url.Parse(provider.signInUrl) + if err != nil { + return nil, err + } + + req := &httputil.Request{ + Method: "POST", + Protocol: url.Scheme, + Host: url.Host, + Path: "/cloud-credentials", + Headers: map[string]string{}, + } + + connectTimeout := 5 * time.Second + readTimeout := 10 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + body := cloudCredentialOptions{ + AccountId: provider.accountId, + AccessConfigurationId: provider.accessConfig, + } + + bodyBytes, err := json.Marshal(body) + if err != nil { + return nil, fmt.Errorf("failed to marshal options: %w", err) + } + + req.Body = bodyBytes + + // set headers + req.Headers["Accept"] = "application/json" + req.Headers["Content-Type"] = "application/json" + req.Headers["Authorization"] = fmt.Sprintf("Bearer %s", provider.accessToken) + res, err := httpDo(req) + if err != nil { + return + } + + if res.StatusCode != http.StatusOK { + message := "get session token from sso failed: " + err = errors.New(message + string(res.Body)) + return + } + var data cloudCredentialResponse + err = json.Unmarshal(res.Body, &data) + if err != nil { + err = fmt.Errorf("get session token from sso failed, json.Unmarshal fail: %s", err.Error()) + return + } + if data.CloudCredential == nil { + err = fmt.Errorf("get session token from sso failed, fail to get credentials") + return + } + + if data.CloudCredential.AccessKeyId == "" || data.CloudCredential.AccessKeySecret == "" || data.CloudCredential.SecurityToken == "" { + err = fmt.Errorf("refresh session token err, fail to get credentials") + return + } + + session = &sessionCredentials{ + AccessKeyId: data.CloudCredential.AccessKeyId, + AccessKeySecret: data.CloudCredential.AccessKeySecret, + SecurityToken: data.CloudCredential.SecurityToken, + Expiration: data.CloudCredential.Expiration, + } + return +} + +func (provider *CloudSSOCredentialsProvider) needUpdateCredential() (result bool) { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *CloudSSOCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.sessionCredentials == nil || provider.needUpdateCredential() { + sessionCredentials, err1 := provider.getCredentials() + if err1 != nil { + return nil, err1 + } + + provider.sessionCredentials = sessionCredentials + expirationTime, err2 := time.Parse("2006-01-02T15:04:05Z", sessionCredentials.Expiration) + if err2 != nil { + return nil, err2 + } + + provider.lastUpdateTimestamp = time.Now().Unix() + provider.expirationTimestamp = expirationTime.Unix() + } + + cc = &Credentials{ + AccessKeyId: provider.sessionCredentials.AccessKeyId, + AccessKeySecret: provider.sessionCredentials.AccessKeySecret, + SecurityToken: provider.sessionCredentials.SecurityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *CloudSSOCredentialsProvider) GetProviderName() string { + return "cloud_sso" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/credentials.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/credentials.go new file mode 100644 index 000000000..26592fd22 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/credentials.go @@ -0,0 +1,22 @@ +package providers + +// 下一版本 Credentials 包 +// - 分离 bearer token +// - 从 config 传递迁移到真正的 credentials provider 模式 +// - 删除 GetAccessKeyId()/GetAccessKeySecret()/GetSecurityToken() 方法,只保留 GetCredentials() + +// The credentials struct +type Credentials struct { + AccessKeyId string + AccessKeySecret string + SecurityToken string + ProviderName string +} + +// The credentials provider interface, return credentials and provider name +type CredentialsProvider interface { + // Get credentials + GetCredentials() (*Credentials, error) + // Get credentials provider name + GetProviderName() string +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/default.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/default.go new file mode 100644 index 000000000..597625f6f --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/default.go @@ -0,0 +1,113 @@ +package providers + +import ( + "fmt" + "os" + "strings" +) + +type DefaultCredentialsProvider struct { + providerChain []CredentialsProvider + lastUsedProvider CredentialsProvider +} + +func NewDefaultCredentialsProvider() (provider *DefaultCredentialsProvider) { + providers := []CredentialsProvider{} + + // Add static ak or sts credentials provider + envProvider, err := NewEnvironmentVariableCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, envProvider) + } + + // oidc check + oidcProvider, err := NewOIDCCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, oidcProvider) + } + + // cli credentials provider + cliProfileProvider, err := NewCLIProfileCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, cliProfileProvider) + } + + // profile credentials provider + profileProvider, err := NewProfileCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, profileProvider) + } + + // Add IMDS + ecsRamRoleProvider, err := NewECSRAMRoleCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, ecsRamRoleProvider) + } + + // credentials uri + if os.Getenv("ALIBABA_CLOUD_CREDENTIALS_URI") != "" { + credentialsUriProvider, err := NewURLCredentialsProviderBuilder().Build() + if err == nil { + providers = append(providers, credentialsUriProvider) + } + } + + return &DefaultCredentialsProvider{ + providerChain: providers, + } +} + +func (provider *DefaultCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.lastUsedProvider != nil { + inner, err1 := provider.lastUsedProvider.GetCredentials() + if err1 != nil { + err = err1 + return + } + + providerName := inner.ProviderName + if providerName == "" { + providerName = provider.lastUsedProvider.GetProviderName() + } + + cc = &Credentials{ + AccessKeyId: inner.AccessKeyId, + AccessKeySecret: inner.AccessKeySecret, + SecurityToken: inner.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), providerName), + } + return + } + + errors := []string{} + for _, p := range provider.providerChain { + provider.lastUsedProvider = p + inner, errInLoop := p.GetCredentials() + if errInLoop != nil { + errors = append(errors, errInLoop.Error()) + // 如果有错误,进入下一个获取过程 + continue + } + + if inner != nil { + providerName := inner.ProviderName + if providerName == "" { + providerName = p.GetProviderName() + } + cc = &Credentials{ + AccessKeyId: inner.AccessKeyId, + AccessKeySecret: inner.AccessKeySecret, + SecurityToken: inner.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), providerName), + } + return + } + } + + err = fmt.Errorf("unable to get credentials from any of the providers in the chain: %s", strings.Join(errors, ", ")) + return +} + +func (provider *DefaultCredentialsProvider) GetProviderName() string { + return "default" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/ecs_ram_role.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/ecs_ram_role.go new file mode 100644 index 000000000..9a917b2bf --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/ecs_ram_role.go @@ -0,0 +1,283 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "strconv" + "strings" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" +) + +type ECSRAMRoleCredentialsProvider struct { + roleName string + disableIMDSv1 bool + // for sts + session *sessionCredentials + expirationTimestamp int64 + // for http options + httpOptions *HttpOptions +} + +type ECSRAMRoleCredentialsProviderBuilder struct { + provider *ECSRAMRoleCredentialsProvider +} + +func NewECSRAMRoleCredentialsProviderBuilder() *ECSRAMRoleCredentialsProviderBuilder { + return &ECSRAMRoleCredentialsProviderBuilder{ + provider: &ECSRAMRoleCredentialsProvider{}, + } +} + +func (builder *ECSRAMRoleCredentialsProviderBuilder) WithRoleName(roleName string) *ECSRAMRoleCredentialsProviderBuilder { + builder.provider.roleName = roleName + return builder +} + +func (builder *ECSRAMRoleCredentialsProviderBuilder) WithDisableIMDSv1(disableIMDSv1 bool) *ECSRAMRoleCredentialsProviderBuilder { + builder.provider.disableIMDSv1 = disableIMDSv1 + return builder +} + +func (builder *ECSRAMRoleCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *ECSRAMRoleCredentialsProviderBuilder { + builder.provider.httpOptions = httpOptions + return builder +} + +const defaultMetadataTokenDuration = 21600 // 6 hours + +func (builder *ECSRAMRoleCredentialsProviderBuilder) Build() (provider *ECSRAMRoleCredentialsProvider, err error) { + + if strings.ToLower(os.Getenv("ALIBABA_CLOUD_ECS_METADATA_DISABLED")) == "true" { + err = errors.New("IMDS credentials is disabled") + return + } + + // 设置 roleName 默认值 + if builder.provider.roleName == "" { + builder.provider.roleName = os.Getenv("ALIBABA_CLOUD_ECS_METADATA") + } + + if !builder.provider.disableIMDSv1 { + builder.provider.disableIMDSv1 = strings.ToLower(os.Getenv("ALIBABA_CLOUD_IMDSV1_DISABLED")) == "true" + } + + provider = builder.provider + return +} + +type ecsRAMRoleResponse struct { + Code *string `json:"Code"` + AccessKeyId *string `json:"AccessKeyId"` + AccessKeySecret *string `json:"AccessKeySecret"` + SecurityToken *string `json:"SecurityToken"` + LastUpdated *string `json:"LastUpdated"` + Expiration *string `json:"Expiration"` +} + +func (provider *ECSRAMRoleCredentialsProvider) needUpdateCredential() bool { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *ECSRAMRoleCredentialsProvider) getRoleName() (roleName string, err error) { + req := &httputil.Request{ + Method: "GET", + Protocol: "http", + Host: "100.100.100.200", + Path: "/latest/meta-data/ram/security-credentials/", + Headers: map[string]string{}, + } + + connectTimeout := 1 * time.Second + readTimeout := 1 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + metadataToken, err := provider.getMetadataToken() + if err != nil { + return "", err + } + if metadataToken != "" { + req.Headers["x-aliyun-ecs-metadata-token"] = metadataToken + } + + res, err := httpDo(req) + if err != nil { + err = fmt.Errorf("get role name failed: %s", err.Error()) + return + } + + if res.StatusCode != 200 { + err = fmt.Errorf("get role name failed: %s %d", req.BuildRequestURL(), res.StatusCode) + return + } + + roleName = strings.TrimSpace(string(res.Body)) + return +} + +func (provider *ECSRAMRoleCredentialsProvider) getCredentials() (session *sessionCredentials, err error) { + roleName := provider.roleName + if roleName == "" { + roleName, err = provider.getRoleName() + if err != nil { + return + } + } + + req := &httputil.Request{ + Method: "GET", + Protocol: "http", + Host: "100.100.100.200", + Path: "/latest/meta-data/ram/security-credentials/" + roleName, + Headers: map[string]string{}, + } + + connectTimeout := 1 * time.Second + readTimeout := 1 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + metadataToken, err := provider.getMetadataToken() + if err != nil { + return nil, err + } + if metadataToken != "" { + req.Headers["x-aliyun-ecs-metadata-token"] = metadataToken + } + + res, err := httpDo(req) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) + return + } + + if res.StatusCode != 200 { + err = fmt.Errorf("refresh Ecs sts token err, httpStatus: %d, message = %s", res.StatusCode, string(res.Body)) + return + } + + var data ecsRAMRoleResponse + err = json.Unmarshal(res.Body, &data) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err, json.Unmarshal fail: %s", err.Error()) + return + } + + if data.AccessKeyId == nil || data.AccessKeySecret == nil || data.SecurityToken == nil { + err = fmt.Errorf("refresh Ecs sts token err, fail to get credentials") + return + } + + if *data.Code != "Success" { + err = fmt.Errorf("refresh Ecs sts token err, Code is not Success") + return + } + + session = &sessionCredentials{ + AccessKeyId: *data.AccessKeyId, + AccessKeySecret: *data.AccessKeySecret, + SecurityToken: *data.SecurityToken, + Expiration: *data.Expiration, + } + return +} + +func (provider *ECSRAMRoleCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.session == nil || provider.needUpdateCredential() { + session, err1 := provider.getCredentials() + if err1 != nil { + return nil, err1 + } + + provider.session = session + expirationTime, err2 := time.Parse("2006-01-02T15:04:05Z", session.Expiration) + if err2 != nil { + return nil, err2 + } + provider.expirationTimestamp = expirationTime.Unix() + } + + cc = &Credentials{ + AccessKeyId: provider.session.AccessKeyId, + AccessKeySecret: provider.session.AccessKeySecret, + SecurityToken: provider.session.SecurityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *ECSRAMRoleCredentialsProvider) GetProviderName() string { + return "ecs_ram_role" +} + +func (provider *ECSRAMRoleCredentialsProvider) getMetadataToken() (metadataToken string, err error) { + // PUT http://100.100.100.200/latest/api/token + req := &httputil.Request{ + Method: "PUT", + Protocol: "http", + Host: "100.100.100.200", + Path: "/latest/api/token", + Headers: map[string]string{ + "X-aliyun-ecs-metadata-token-ttl-seconds": strconv.Itoa(defaultMetadataTokenDuration), + }, + } + + connectTimeout := 1 * time.Second + readTimeout := 1 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + res, _err := httpDo(req) + if _err != nil { + if provider.disableIMDSv1 { + err = fmt.Errorf("get metadata token failed: %s", _err.Error()) + } + return + } + if res.StatusCode != 200 { + if provider.disableIMDSv1 { + err = fmt.Errorf("refresh Ecs sts token err, httpStatus: %d, message = %s", res.StatusCode, string(res.Body)) + } + return + } + metadataToken = string(res.Body) + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/env.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/env.go new file mode 100644 index 000000000..27fe33b9e --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/env.go @@ -0,0 +1,55 @@ +package providers + +import ( + "fmt" + "os" +) + +type EnvironmentVariableCredentialsProvider struct { +} + +type EnvironmentVariableCredentialsProviderBuilder struct { + provider *EnvironmentVariableCredentialsProvider +} + +func NewEnvironmentVariableCredentialsProviderBuilder() *EnvironmentVariableCredentialsProviderBuilder { + return &EnvironmentVariableCredentialsProviderBuilder{ + provider: &EnvironmentVariableCredentialsProvider{}, + } +} + +func (builder *EnvironmentVariableCredentialsProviderBuilder) Build() (provider *EnvironmentVariableCredentialsProvider, err error) { + provider = builder.provider + return +} + +func (provider *EnvironmentVariableCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + accessKeyId := os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_ID") + + if accessKeyId == "" { + err = fmt.Errorf("unable to get credentials from enviroment variables, Access key ID must be specified via environment variable (ALIBABA_CLOUD_ACCESS_KEY_ID)") + return + } + + accessKeySecret := os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_SECRET") + + if accessKeySecret == "" { + err = fmt.Errorf("unable to get credentials from enviroment variables, Access key secret must be specified via environment variable (ALIBABA_CLOUD_ACCESS_KEY_SECRET)") + return + } + + securityToken := os.Getenv("ALIBABA_CLOUD_SECURITY_TOKEN") + + cc = &Credentials{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + SecurityToken: securityToken, + ProviderName: provider.GetProviderName(), + } + + return +} + +func (provider *EnvironmentVariableCredentialsProvider) GetProviderName() string { + return "env" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/hook.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/hook.go new file mode 100644 index 000000000..6839abd3e --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/hook.go @@ -0,0 +1,7 @@ +package providers + +import ( + httputil "github.com/aliyun/credentials-go/credentials/internal/http" +) + +var httpDo = httputil.Do diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/oidc.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/oidc.go new file mode 100644 index 000000000..ae7194c24 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/oidc.go @@ -0,0 +1,278 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strconv" + "strings" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" + "github.com/aliyun/credentials-go/credentials/internal/utils" +) + +type OIDCCredentialsProvider struct { + oidcProviderARN string + oidcTokenFilePath string + roleArn string + roleSessionName string + durationSeconds int + policy string + // for sts endpoint + stsRegionId string + enableVpc bool + stsEndpoint string + + lastUpdateTimestamp int64 + expirationTimestamp int64 + sessionCredentials *sessionCredentials + // for http options + httpOptions *HttpOptions +} + +type OIDCCredentialsProviderBuilder struct { + provider *OIDCCredentialsProvider +} + +func NewOIDCCredentialsProviderBuilder() *OIDCCredentialsProviderBuilder { + return &OIDCCredentialsProviderBuilder{ + provider: &OIDCCredentialsProvider{}, + } +} + +func (b *OIDCCredentialsProviderBuilder) WithOIDCProviderARN(oidcProviderArn string) *OIDCCredentialsProviderBuilder { + b.provider.oidcProviderARN = oidcProviderArn + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithOIDCTokenFilePath(oidcTokenFilePath string) *OIDCCredentialsProviderBuilder { + b.provider.oidcTokenFilePath = oidcTokenFilePath + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithRoleArn(roleArn string) *OIDCCredentialsProviderBuilder { + b.provider.roleArn = roleArn + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithRoleSessionName(roleSessionName string) *OIDCCredentialsProviderBuilder { + b.provider.roleSessionName = roleSessionName + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithDurationSeconds(durationSeconds int) *OIDCCredentialsProviderBuilder { + b.provider.durationSeconds = durationSeconds + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithStsRegionId(regionId string) *OIDCCredentialsProviderBuilder { + b.provider.stsRegionId = regionId + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithEnableVpc(enableVpc bool) *OIDCCredentialsProviderBuilder { + b.provider.enableVpc = enableVpc + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithPolicy(policy string) *OIDCCredentialsProviderBuilder { + b.provider.policy = policy + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithSTSEndpoint(stsEndpoint string) *OIDCCredentialsProviderBuilder { + b.provider.stsEndpoint = stsEndpoint + return b +} + +func (b *OIDCCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *OIDCCredentialsProviderBuilder { + b.provider.httpOptions = httpOptions + return b +} + +func (b *OIDCCredentialsProviderBuilder) Build() (provider *OIDCCredentialsProvider, err error) { + if b.provider.roleSessionName == "" { + b.provider.roleSessionName = "credentials-go-" + strconv.FormatInt(time.Now().UnixNano()/1000, 10) + } + + if b.provider.oidcTokenFilePath == "" { + b.provider.oidcTokenFilePath = os.Getenv("ALIBABA_CLOUD_OIDC_TOKEN_FILE") + } + + if b.provider.oidcTokenFilePath == "" { + err = errors.New("the OIDCTokenFilePath is empty") + return + } + + if b.provider.oidcProviderARN == "" { + b.provider.oidcProviderARN = os.Getenv("ALIBABA_CLOUD_OIDC_PROVIDER_ARN") + } + + if b.provider.oidcProviderARN == "" { + err = errors.New("the OIDCProviderARN is empty") + return + } + + if b.provider.roleArn == "" { + b.provider.roleArn = os.Getenv("ALIBABA_CLOUD_ROLE_ARN") + } + + if b.provider.roleArn == "" { + err = errors.New("the RoleArn is empty") + return + } + + if b.provider.durationSeconds == 0 { + b.provider.durationSeconds = 3600 + } + + if b.provider.durationSeconds < 900 { + err = errors.New("the Assume Role session duration should be in the range of 15min - max duration seconds") + } + + if b.provider.stsEndpoint == "" { + if !b.provider.enableVpc { + b.provider.enableVpc = strings.ToLower(os.Getenv("ALIBABA_CLOUD_VPC_ENDPOINT_ENABLED")) == "true" + } + prefix := "sts" + if b.provider.enableVpc { + prefix = "sts-vpc" + } + if b.provider.stsRegionId != "" { + b.provider.stsEndpoint = fmt.Sprintf("%s.%s.aliyuncs.com", prefix, b.provider.stsRegionId) + } else if region := os.Getenv("ALIBABA_CLOUD_STS_REGION"); region != "" { + b.provider.stsEndpoint = fmt.Sprintf("%s.%s.aliyuncs.com", prefix, region) + } else { + b.provider.stsEndpoint = "sts.aliyuncs.com" + } + } + + provider = b.provider + return +} + +func (provider *OIDCCredentialsProvider) getCredentials() (session *sessionCredentials, err error) { + req := &httputil.Request{ + Method: "POST", + Protocol: "https", + Host: provider.stsEndpoint, + Headers: map[string]string{}, + } + + connectTimeout := 5 * time.Second + readTimeout := 10 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + queries := make(map[string]string) + queries["Version"] = "2015-04-01" + queries["Action"] = "AssumeRoleWithOIDC" + queries["Format"] = "JSON" + queries["Timestamp"] = utils.GetTimeInFormatISO8601() + req.Queries = queries + + bodyForm := make(map[string]string) + bodyForm["RoleArn"] = provider.roleArn + bodyForm["OIDCProviderArn"] = provider.oidcProviderARN + token, err := ioutil.ReadFile(provider.oidcTokenFilePath) + if err != nil { + return + } + + bodyForm["OIDCToken"] = string(token) + if provider.policy != "" { + bodyForm["Policy"] = provider.policy + } + + bodyForm["RoleSessionName"] = provider.roleSessionName + bodyForm["DurationSeconds"] = strconv.Itoa(provider.durationSeconds) + req.Form = bodyForm + + // set headers + req.Headers["Accept-Encoding"] = "identity" + res, err := httpDo(req) + if err != nil { + return + } + + if res.StatusCode != http.StatusOK { + message := "get session token failed: " + err = errors.New(message + string(res.Body)) + return + } + var data assumeRoleResponse + err = json.Unmarshal(res.Body, &data) + if err != nil { + err = fmt.Errorf("get oidc sts token err, json.Unmarshal fail: %s", err.Error()) + return + } + if data.Credentials == nil { + err = fmt.Errorf("get oidc sts token err, fail to get credentials") + return + } + + if data.Credentials.AccessKeyId == nil || data.Credentials.AccessKeySecret == nil || data.Credentials.SecurityToken == nil { + err = fmt.Errorf("refresh RoleArn sts token err, fail to get credentials") + return + } + + session = &sessionCredentials{ + AccessKeyId: *data.Credentials.AccessKeyId, + AccessKeySecret: *data.Credentials.AccessKeySecret, + SecurityToken: *data.Credentials.SecurityToken, + Expiration: *data.Credentials.Expiration, + } + return +} + +func (provider *OIDCCredentialsProvider) needUpdateCredential() (result bool) { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *OIDCCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.sessionCredentials == nil || provider.needUpdateCredential() { + sessionCredentials, err1 := provider.getCredentials() + if err1 != nil { + return nil, err1 + } + + provider.sessionCredentials = sessionCredentials + expirationTime, err2 := time.Parse("2006-01-02T15:04:05Z", sessionCredentials.Expiration) + if err2 != nil { + return nil, err2 + } + + provider.lastUpdateTimestamp = time.Now().Unix() + provider.expirationTimestamp = expirationTime.Unix() + } + + cc = &Credentials{ + AccessKeyId: provider.sessionCredentials.AccessKeyId, + AccessKeySecret: provider.sessionCredentials.AccessKeySecret, + SecurityToken: provider.sessionCredentials.SecurityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *OIDCCredentialsProvider) GetProviderName() string { + return "oidc_role_arn" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/profile.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/profile.go new file mode 100644 index 000000000..c26548e3e --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/profile.go @@ -0,0 +1,169 @@ +package providers + +import ( + "errors" + "fmt" + "os" + "path" + + "github.com/aliyun/credentials-go/credentials/internal/utils" + "gopkg.in/ini.v1" +) + +type ProfileCredentialsProvider struct { + profileName string + innerProvider CredentialsProvider +} + +type ProfileCredentialsProviderBuilder struct { + provider *ProfileCredentialsProvider +} + +func NewProfileCredentialsProviderBuilder() (builder *ProfileCredentialsProviderBuilder) { + return &ProfileCredentialsProviderBuilder{ + provider: &ProfileCredentialsProvider{}, + } +} + +func (b *ProfileCredentialsProviderBuilder) WithProfileName(profileName string) *ProfileCredentialsProviderBuilder { + b.provider.profileName = profileName + return b +} + +func (b *ProfileCredentialsProviderBuilder) Build() (provider *ProfileCredentialsProvider, err error) { + // 优先级: + // 1. 使用显示指定的 profileName + // 2. 使用环境变量(ALIBABA_CLOUD_PROFILE)指定的 profileName + // 3. 兜底使用 default 作为 profileName + b.provider.profileName = utils.GetDefaultString(b.provider.profileName, os.Getenv("ALIBABA_CLOUD_PROFILE"), "default") + + provider = b.provider + return +} + +func (provider *ProfileCredentialsProvider) getCredentialsProvider(ini *ini.File) (credentialsProvider CredentialsProvider, err error) { + section, err := ini.GetSection(provider.profileName) + if err != nil { + err = errors.New("ERROR: Can not load section" + err.Error()) + return + } + + value, err := section.GetKey("type") + if err != nil { + err = errors.New("ERROR: Can not find credential type" + err.Error()) + return + } + + switch value.String() { + case "access_key": + value1, err1 := section.GetKey("access_key_id") + value2, err2 := section.GetKey("access_key_secret") + if err1 != nil || err2 != nil { + err = errors.New("ERROR: Failed to get value") + return + } + + if value1.String() == "" || value2.String() == "" { + err = errors.New("ERROR: Value can't be empty") + return + } + + credentialsProvider, err = NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(value1.String()). + WithAccessKeySecret(value2.String()). + Build() + case "ecs_ram_role": + value1, err1 := section.GetKey("role_name") + if err1 != nil { + err = errors.New("ERROR: Failed to get value") + return + } + credentialsProvider, err = NewECSRAMRoleCredentialsProviderBuilder().WithRoleName(value1.String()).Build() + case "ram_role_arn": + value1, err1 := section.GetKey("access_key_id") + value2, err2 := section.GetKey("access_key_secret") + value3, err3 := section.GetKey("role_arn") + value4, err4 := section.GetKey("role_session_name") + if err1 != nil || err2 != nil || err3 != nil || err4 != nil { + err = errors.New("ERROR: Failed to get value") + return + } + if value1.String() == "" || value2.String() == "" || value3.String() == "" || value4.String() == "" { + err = errors.New("ERROR: Value can't be empty") + return + } + previous, err5 := NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(value1.String()). + WithAccessKeySecret(value2.String()). + Build() + if err5 != nil { + err = errors.New("get previous credentials provider failed") + return + } + rawPolicy, _ := section.GetKey("policy") + policy := "" + if rawPolicy != nil { + policy = rawPolicy.String() + } + + credentialsProvider, err = NewRAMRoleARNCredentialsProviderBuilder(). + WithCredentialsProvider(previous). + WithRoleArn(value3.String()). + WithRoleSessionName(value4.String()). + WithPolicy(policy). + WithDurationSeconds(3600). + Build() + default: + err = errors.New("ERROR: Failed to get credential") + } + return +} + +func (provider *ProfileCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.innerProvider == nil { + sharedCfgPath := os.Getenv("ALIBABA_CLOUD_CREDENTIALS_FILE") + if sharedCfgPath == "" { + homeDir := getHomePath() + if homeDir == "" { + err = fmt.Errorf("cannot found home dir") + return + } + + sharedCfgPath = path.Join(homeDir, ".alibabacloud/credentials") + } + + ini, err1 := ini.Load(sharedCfgPath) + if err1 != nil { + err = errors.New("ERROR: Can not open file" + err1.Error()) + return + } + + provider.innerProvider, err = provider.getCredentialsProvider(ini) + if err != nil { + return + } + } + + innerCC, err := provider.innerProvider.GetCredentials() + if err != nil { + return + } + + providerName := innerCC.ProviderName + if providerName == "" { + providerName = provider.innerProvider.GetProviderName() + } + + cc = &Credentials{ + AccessKeyId: innerCC.AccessKeyId, + AccessKeySecret: innerCC.AccessKeySecret, + SecurityToken: innerCC.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), providerName), + } + + return +} + +func (provider *ProfileCredentialsProvider) GetProviderName() string { + return "profile" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/ram_role_arn.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/ram_role_arn.go new file mode 100644 index 000000000..969e271ec --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/ram_role_arn.go @@ -0,0 +1,375 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" + "github.com/aliyun/credentials-go/credentials/internal/utils" +) + +type assumedRoleUser struct { +} + +type credentials struct { + SecurityToken *string `json:"SecurityToken"` + Expiration *string `json:"Expiration"` + AccessKeySecret *string `json:"AccessKeySecret"` + AccessKeyId *string `json:"AccessKeyId"` +} + +type assumeRoleResponse struct { + RequestID *string `json:"RequestId"` + AssumedRoleUser *assumedRoleUser `json:"AssumedRoleUser"` + Credentials *credentials `json:"Credentials"` +} + +type sessionCredentials struct { + AccessKeyId string + AccessKeySecret string + SecurityToken string + Expiration string +} + +type HttpOptions struct { + Proxy string + // Connection timeout, in milliseconds. + ConnectTimeout int + // Read timeout, in milliseconds. + ReadTimeout int +} + +type RAMRoleARNCredentialsProvider struct { + // for previous credentials + accessKeyId string + accessKeySecret string + securityToken string + credentialsProvider CredentialsProvider + + roleArn string + roleSessionName string + durationSeconds int + policy string + externalId string + // for sts endpoint + stsRegionId string + enableVpc bool + stsEndpoint string + // for http options + httpOptions *HttpOptions + // inner + expirationTimestamp int64 + lastUpdateTimestamp int64 + previousProviderName string + sessionCredentials *sessionCredentials +} + +type RAMRoleARNCredentialsProviderBuilder struct { + provider *RAMRoleARNCredentialsProvider +} + +func NewRAMRoleARNCredentialsProviderBuilder() *RAMRoleARNCredentialsProviderBuilder { + return &RAMRoleARNCredentialsProviderBuilder{ + provider: &RAMRoleARNCredentialsProvider{}, + } +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithAccessKeyId(accessKeyId string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.accessKeyId = accessKeyId + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithAccessKeySecret(accessKeySecret string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.accessKeySecret = accessKeySecret + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithSecurityToken(securityToken string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.securityToken = securityToken + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithCredentialsProvider(credentialsProvider CredentialsProvider) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.credentialsProvider = credentialsProvider + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithRoleArn(roleArn string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.roleArn = roleArn + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithStsRegionId(regionId string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.stsRegionId = regionId + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithEnableVpc(enableVpc bool) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.enableVpc = enableVpc + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithStsEndpoint(endpoint string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.stsEndpoint = endpoint + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithRoleSessionName(roleSessionName string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.roleSessionName = roleSessionName + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithPolicy(policy string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.policy = policy + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithExternalId(externalId string) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.externalId = externalId + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithDurationSeconds(durationSeconds int) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.durationSeconds = durationSeconds + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *RAMRoleARNCredentialsProviderBuilder { + builder.provider.httpOptions = httpOptions + return builder +} + +func (builder *RAMRoleARNCredentialsProviderBuilder) Build() (provider *RAMRoleARNCredentialsProvider, err error) { + if builder.provider.credentialsProvider == nil { + if builder.provider.accessKeyId != "" && builder.provider.accessKeySecret != "" && builder.provider.securityToken != "" { + builder.provider.credentialsProvider, err = NewStaticSTSCredentialsProviderBuilder(). + WithAccessKeyId(builder.provider.accessKeyId). + WithAccessKeySecret(builder.provider.accessKeySecret). + WithSecurityToken(builder.provider.securityToken). + Build() + if err != nil { + return + } + } else if builder.provider.accessKeyId != "" && builder.provider.accessKeySecret != "" { + builder.provider.credentialsProvider, err = NewStaticAKCredentialsProviderBuilder(). + WithAccessKeyId(builder.provider.accessKeyId). + WithAccessKeySecret(builder.provider.accessKeySecret). + Build() + if err != nil { + return + } + } else { + err = errors.New("must specify a previous credentials provider to assume role") + return + } + } + + if builder.provider.roleArn == "" { + if roleArn := os.Getenv("ALIBABA_CLOUD_ROLE_ARN"); roleArn != "" { + builder.provider.roleArn = roleArn + } else { + err = errors.New("the RoleArn is empty") + return + } + } + + if builder.provider.roleSessionName == "" { + if roleSessionName := os.Getenv("ALIBABA_CLOUD_ROLE_SESSION_NAME"); roleSessionName != "" { + builder.provider.roleSessionName = roleSessionName + } else { + builder.provider.roleSessionName = "credentials-go-" + strconv.FormatInt(time.Now().UnixNano()/1000, 10) + } + } + + // duration seconds + if builder.provider.durationSeconds == 0 { + // default to 3600 + builder.provider.durationSeconds = 3600 + } + + if builder.provider.durationSeconds < 900 { + err = errors.New("session duration should be in the range of 900s - max session duration") + return + } + + // sts endpoint + if builder.provider.stsEndpoint == "" { + if !builder.provider.enableVpc { + builder.provider.enableVpc = strings.ToLower(os.Getenv("ALIBABA_CLOUD_VPC_ENDPOINT_ENABLED")) == "true" + } + prefix := "sts" + if builder.provider.enableVpc { + prefix = "sts-vpc" + } + if builder.provider.stsRegionId != "" { + builder.provider.stsEndpoint = fmt.Sprintf("%s.%s.aliyuncs.com", prefix, builder.provider.stsRegionId) + } else if region := os.Getenv("ALIBABA_CLOUD_STS_REGION"); region != "" { + builder.provider.stsEndpoint = fmt.Sprintf("%s.%s.aliyuncs.com", prefix, region) + } else { + builder.provider.stsEndpoint = "sts.aliyuncs.com" + } + } + + provider = builder.provider + return +} + +func (provider *RAMRoleARNCredentialsProvider) getCredentials(cc *Credentials) (session *sessionCredentials, err error) { + method := "POST" + req := &httputil.Request{ + Method: method, + Protocol: "https", + Host: provider.stsEndpoint, + Headers: map[string]string{}, + } + + queries := make(map[string]string) + queries["Version"] = "2015-04-01" + queries["Action"] = "AssumeRole" + queries["Format"] = "JSON" + queries["Timestamp"] = utils.GetTimeInFormatISO8601() + queries["SignatureMethod"] = "HMAC-SHA1" + queries["SignatureVersion"] = "1.0" + queries["SignatureNonce"] = utils.GetNonce() + queries["AccessKeyId"] = cc.AccessKeyId + + if cc.SecurityToken != "" { + queries["SecurityToken"] = cc.SecurityToken + } + + bodyForm := make(map[string]string) + bodyForm["RoleArn"] = provider.roleArn + if provider.policy != "" { + bodyForm["Policy"] = provider.policy + } + if provider.externalId != "" { + bodyForm["ExternalId"] = provider.externalId + } + bodyForm["RoleSessionName"] = provider.roleSessionName + bodyForm["DurationSeconds"] = strconv.Itoa(provider.durationSeconds) + req.Form = bodyForm + + // caculate signature + signParams := make(map[string]string) + for key, value := range queries { + signParams[key] = value + } + for key, value := range bodyForm { + signParams[key] = value + } + + stringToSign := utils.GetURLFormedMap(signParams) + stringToSign = strings.Replace(stringToSign, "+", "%20", -1) + stringToSign = strings.Replace(stringToSign, "*", "%2A", -1) + stringToSign = strings.Replace(stringToSign, "%7E", "~", -1) + stringToSign = url.QueryEscape(stringToSign) + stringToSign = method + "&%2F&" + stringToSign + secret := cc.AccessKeySecret + "&" + queries["Signature"] = utils.ShaHmac1(stringToSign, secret) + + req.Queries = queries + + // set headers + req.Headers["Accept-Encoding"] = "identity" + req.Headers["Content-Type"] = "application/x-www-form-urlencoded" + req.Headers["x-acs-credentials-provider"] = cc.ProviderName + + connectTimeout := 5 * time.Second + readTimeout := 10 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + res, err := httpDo(req) + if err != nil { + return + } + + if res.StatusCode != http.StatusOK { + err = errors.New("refresh session token failed: " + string(res.Body)) + return + } + var data assumeRoleResponse + err = json.Unmarshal(res.Body, &data) + if err != nil { + err = fmt.Errorf("refresh RoleArn sts token err, json.Unmarshal fail: %s", err.Error()) + return + } + if data.Credentials == nil { + err = fmt.Errorf("refresh RoleArn sts token err, fail to get credentials") + return + } + + if data.Credentials.AccessKeyId == nil || data.Credentials.AccessKeySecret == nil || data.Credentials.SecurityToken == nil { + err = fmt.Errorf("refresh RoleArn sts token err, fail to get credentials") + return + } + + session = &sessionCredentials{ + AccessKeyId: *data.Credentials.AccessKeyId, + AccessKeySecret: *data.Credentials.AccessKeySecret, + SecurityToken: *data.Credentials.SecurityToken, + Expiration: *data.Credentials.Expiration, + } + return +} + +func (provider *RAMRoleARNCredentialsProvider) needUpdateCredential() (result bool) { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *RAMRoleARNCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.sessionCredentials == nil || provider.needUpdateCredential() { + // 获取前置凭证 + previousCredentials, err1 := provider.credentialsProvider.GetCredentials() + if err1 != nil { + return nil, err1 + } + sessionCredentials, err2 := provider.getCredentials(previousCredentials) + if err2 != nil { + return nil, err2 + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", sessionCredentials.Expiration) + if err != nil { + return nil, err + } + + provider.expirationTimestamp = expirationTime.Unix() + provider.lastUpdateTimestamp = time.Now().Unix() + provider.previousProviderName = previousCredentials.ProviderName + provider.sessionCredentials = sessionCredentials + } + + cc = &Credentials{ + AccessKeyId: provider.sessionCredentials.AccessKeyId, + AccessKeySecret: provider.sessionCredentials.AccessKeySecret, + SecurityToken: provider.sessionCredentials.SecurityToken, + ProviderName: fmt.Sprintf("%s/%s", provider.GetProviderName(), provider.previousProviderName), + } + return +} + +func (provider *RAMRoleARNCredentialsProvider) GetProviderName() string { + return "ram_role_arn" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/static_ak.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/static_ak.go new file mode 100644 index 000000000..bd3660ccc --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/static_ak.go @@ -0,0 +1,67 @@ +package providers + +import ( + "errors" + "os" +) + +type StaticAKCredentialsProvider struct { + accessKeyId string + accessKeySecret string +} + +type StaticAKCredentialsProviderBuilder struct { + provider *StaticAKCredentialsProvider +} + +func NewStaticAKCredentialsProviderBuilder() *StaticAKCredentialsProviderBuilder { + return &StaticAKCredentialsProviderBuilder{ + provider: &StaticAKCredentialsProvider{}, + } +} + +func (builder *StaticAKCredentialsProviderBuilder) WithAccessKeyId(accessKeyId string) *StaticAKCredentialsProviderBuilder { + builder.provider.accessKeyId = accessKeyId + return builder +} + +func (builder *StaticAKCredentialsProviderBuilder) WithAccessKeySecret(accessKeySecret string) *StaticAKCredentialsProviderBuilder { + builder.provider.accessKeySecret = accessKeySecret + return builder +} + +func (builder *StaticAKCredentialsProviderBuilder) Build() (provider *StaticAKCredentialsProvider, err error) { + if builder.provider.accessKeyId == "" { + builder.provider.accessKeyId = os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_ID") + } + + if builder.provider.accessKeyId == "" { + err = errors.New("the access key id is empty") + return + } + + if builder.provider.accessKeySecret == "" { + builder.provider.accessKeySecret = os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_SECRET") + } + + if builder.provider.accessKeySecret == "" { + err = errors.New("the access key secret is empty") + return + } + + provider = builder.provider + return +} + +func (provider *StaticAKCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + cc = &Credentials{ + AccessKeyId: provider.accessKeyId, + AccessKeySecret: provider.accessKeySecret, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *StaticAKCredentialsProvider) GetProviderName() string { + return "static_ak" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/static_sts.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/static_sts.go new file mode 100644 index 000000000..ad5715187 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/static_sts.go @@ -0,0 +1,83 @@ +package providers + +import ( + "errors" + "os" +) + +type StaticSTSCredentialsProvider struct { + accessKeyId string + accessKeySecret string + securityToken string +} + +type StaticSTSCredentialsProviderBuilder struct { + provider *StaticSTSCredentialsProvider +} + +func NewStaticSTSCredentialsProviderBuilder() *StaticSTSCredentialsProviderBuilder { + return &StaticSTSCredentialsProviderBuilder{ + provider: &StaticSTSCredentialsProvider{}, + } +} + +func (builder *StaticSTSCredentialsProviderBuilder) WithAccessKeyId(accessKeyId string) *StaticSTSCredentialsProviderBuilder { + builder.provider.accessKeyId = accessKeyId + return builder +} + +func (builder *StaticSTSCredentialsProviderBuilder) WithAccessKeySecret(accessKeySecret string) *StaticSTSCredentialsProviderBuilder { + builder.provider.accessKeySecret = accessKeySecret + return builder +} + +func (builder *StaticSTSCredentialsProviderBuilder) WithSecurityToken(securityToken string) *StaticSTSCredentialsProviderBuilder { + builder.provider.securityToken = securityToken + return builder +} + +func (builder *StaticSTSCredentialsProviderBuilder) Build() (provider *StaticSTSCredentialsProvider, err error) { + if builder.provider.accessKeyId == "" { + builder.provider.accessKeyId = os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_ID") + } + + if builder.provider.accessKeyId == "" { + err = errors.New("the access key id is empty") + return + } + + if builder.provider.accessKeySecret == "" { + builder.provider.accessKeySecret = os.Getenv("ALIBABA_CLOUD_ACCESS_KEY_SECRET") + } + + if builder.provider.accessKeySecret == "" { + err = errors.New("the access key secret is empty") + return + } + + if builder.provider.securityToken == "" { + builder.provider.securityToken = os.Getenv("ALIBABA_CLOUD_SECURITY_TOKEN") + } + + if builder.provider.securityToken == "" { + err = errors.New("the security token is empty") + return + } + + provider = builder.provider + return +} + +func (provider *StaticSTSCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + cc = &Credentials{ + AccessKeyId: provider.accessKeyId, + AccessKeySecret: provider.accessKeySecret, + SecurityToken: provider.securityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *StaticSTSCredentialsProvider) GetProviderName() string { + return "static_sts" +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/providers/uri.go b/vendor/github.com/aliyun/credentials-go/credentials/providers/uri.go new file mode 100644 index 000000000..ccd877d16 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/providers/uri.go @@ -0,0 +1,152 @@ +package providers + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "time" + + httputil "github.com/aliyun/credentials-go/credentials/internal/http" +) + +type URLCredentialsProvider struct { + url string + // for sts + sessionCredentials *sessionCredentials + // for http options + httpOptions *HttpOptions + // inner + expirationTimestamp int64 +} + +type URLCredentialsProviderBuilder struct { + provider *URLCredentialsProvider +} + +func NewURLCredentialsProviderBuilder() *URLCredentialsProviderBuilder { + return &URLCredentialsProviderBuilder{ + provider: &URLCredentialsProvider{}, + } +} + +func (builder *URLCredentialsProviderBuilder) WithUrl(url string) *URLCredentialsProviderBuilder { + builder.provider.url = url + return builder +} + +func (builder *URLCredentialsProviderBuilder) WithHttpOptions(httpOptions *HttpOptions) *URLCredentialsProviderBuilder { + builder.provider.httpOptions = httpOptions + return builder +} + +func (builder *URLCredentialsProviderBuilder) Build() (provider *URLCredentialsProvider, err error) { + + if builder.provider.url == "" { + builder.provider.url = os.Getenv("ALIBABA_CLOUD_CREDENTIALS_URI") + } + + if builder.provider.url == "" { + err = errors.New("the url is empty") + return + } + + provider = builder.provider + return +} + +type urlResponse struct { + AccessKeyId *string `json:"AccessKeyId"` + AccessKeySecret *string `json:"AccessKeySecret"` + SecurityToken *string `json:"SecurityToken"` + Expiration *string `json:"Expiration"` +} + +func (provider *URLCredentialsProvider) getCredentials() (session *sessionCredentials, err error) { + req := &httputil.Request{ + Method: "GET", + URL: provider.url, + } + + connectTimeout := 5 * time.Second + readTimeout := 10 * time.Second + + if provider.httpOptions != nil && provider.httpOptions.ConnectTimeout > 0 { + connectTimeout = time.Duration(provider.httpOptions.ConnectTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.ReadTimeout > 0 { + readTimeout = time.Duration(provider.httpOptions.ReadTimeout) * time.Millisecond + } + if provider.httpOptions != nil && provider.httpOptions.Proxy != "" { + req.Proxy = provider.httpOptions.Proxy + } + req.ConnectTimeout = connectTimeout + req.ReadTimeout = readTimeout + + res, err := httpDo(req) + if err != nil { + return + } + + if res.StatusCode != http.StatusOK { + err = fmt.Errorf("get credentials from %s failed: %s", req.BuildRequestURL(), string(res.Body)) + return + } + + var resp urlResponse + err = json.Unmarshal(res.Body, &resp) + if err != nil { + err = fmt.Errorf("get credentials from %s failed with error, json unmarshal fail: %s", req.BuildRequestURL(), err.Error()) + return + } + + if resp.AccessKeyId == nil || resp.AccessKeySecret == nil || resp.SecurityToken == nil || resp.Expiration == nil { + err = fmt.Errorf("refresh credentials from %s failed: %s", req.BuildRequestURL(), string(res.Body)) + return + } + + session = &sessionCredentials{ + AccessKeyId: *resp.AccessKeyId, + AccessKeySecret: *resp.AccessKeySecret, + SecurityToken: *resp.SecurityToken, + Expiration: *resp.Expiration, + } + return +} + +func (provider *URLCredentialsProvider) needUpdateCredential() (result bool) { + if provider.expirationTimestamp == 0 { + return true + } + + return provider.expirationTimestamp-time.Now().Unix() <= 180 +} + +func (provider *URLCredentialsProvider) GetCredentials() (cc *Credentials, err error) { + if provider.sessionCredentials == nil || provider.needUpdateCredential() { + sessionCredentials, err1 := provider.getCredentials() + if err1 != nil { + return nil, err1 + } + + provider.sessionCredentials = sessionCredentials + expirationTime, err2 := time.Parse("2006-01-02T15:04:05Z", sessionCredentials.Expiration) + if err2 != nil { + return nil, err2 + } + provider.expirationTimestamp = expirationTime.Unix() + } + + cc = &Credentials{ + AccessKeyId: provider.sessionCredentials.AccessKeyId, + AccessKeySecret: provider.sessionCredentials.AccessKeySecret, + SecurityToken: provider.sessionCredentials.SecurityToken, + ProviderName: provider.GetProviderName(), + } + return +} + +func (provider *URLCredentialsProvider) GetProviderName() string { + return "credential_uri" +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f18a1706a..8f2287c0e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -112,6 +112,22 @@ github.com/Microsoft/go-winio/internal/fs github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid +# github.com/alibabacloud-go/debug v1.0.1 +## explicit; go 1.18 +github.com/alibabacloud-go/debug/debug +# github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.2.3 +## explicit; go 1.18 +github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss +github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials +github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/crypto +github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry +github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/signer +github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/transport +# github.com/aliyun/credentials-go v1.4.7 +## explicit; go 1.14 +github.com/aliyun/credentials-go/credentials/internal/http +github.com/aliyun/credentials-go/credentials/internal/utils +github.com/aliyun/credentials-go/credentials/providers # github.com/aws/aws-sdk-go-v2 v1.33.0 ## explicit; go 1.21 github.com/aws/aws-sdk-go-v2/aws From bd120997b104316cf11dfbe4cdf1f1dd6b854f02 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Sun, 3 Aug 2025 10:38:27 +0700 Subject: [PATCH 04/95] feat: implement FileStat method for OSS storage --- pbm/storage/oss/oss.go | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 71a99f4a8..e8c57251a 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -1,11 +1,14 @@ package oss import ( + "context" "fmt" "io" + "path" "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss" + "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/storage" ) @@ -52,7 +55,27 @@ func (o *OSS) SourceReader(name string) (io.ReadCloser, error) { } func (o *OSS) FileStat(name string) (storage.FileInfo, error) { - return storage.FileInfo{}, nil + inf := storage.FileInfo{} + + res, err := o.ossCli.HeadObject(context.Background(), &oss.HeadObjectRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(path.Join(o.cfg.Prefix, name)), + }) + if err != nil { + var serr *oss.ServiceError + if errors.As(err, &serr) && serr.Code == "NoSuchKey" { + return inf, storage.ErrNotExist + } + return inf, errors.Wrap(err, "get OSS object header") + } + + inf.Name = name + inf.Size = res.ContentLength + if inf.Size == 0 { + return inf, storage.ErrEmpty + } + + return inf, nil } func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { From 6249fdd02bf9e97850b42df5f14d2e8999cf06d9 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Sun, 3 Aug 2025 10:52:09 +0700 Subject: [PATCH 05/95] feat: implement Delete method for OSS storage --- pbm/storage/oss/oss.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index e8c57251a..74a1d63e1 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -54,6 +54,7 @@ func (o *OSS) SourceReader(name string) (io.ReadCloser, error) { return nil, nil } +// FileStat returns file info. It returns error if file is empty or not exists. func (o *OSS) FileStat(name string) (storage.FileInfo, error) { inf := storage.FileInfo{} @@ -78,14 +79,28 @@ func (o *OSS) FileStat(name string) (storage.FileInfo, error) { return inf, nil } +// List scans path with prefix and returns all files with given suffix. +// Both prefix and suffix can be omitted. func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { return nil, nil } +// Delete deletes given file. +// It returns storage.ErrNotExist if a file doesn't exists. func (o *OSS) Delete(name string) error { + key := path.Join(o.cfg.Prefix, name) + path.Join(o.cfg.Prefix, name) + _, err := o.ossCli.DeleteObject(context.Background(), &oss.DeleteObjectRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(key), + }) + if err != nil { + return errors.Wrapf(err, "delete %s/%s file from OSS", o.cfg.Bucket, key) + } return nil } +// Copy makes a copy of the src objec/file under dst name func (o *OSS) Copy(src, dst string) error { return nil } From 3a23edda8825a6a40165c70e0a6268a3cfcccf13 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Sun, 3 Aug 2025 10:52:24 +0700 Subject: [PATCH 06/95] feat: add support for OSS storage type in ParseType function --- pbm/storage/storage.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pbm/storage/storage.go b/pbm/storage/storage.go index 117f0c51f..335f412ac 100644 --- a/pbm/storage/storage.go +++ b/pbm/storage/storage.go @@ -65,6 +65,8 @@ func ParseType(s string) Type { return Blackhole case string(GCS): return GCS + case string(OSS): + return OSS default: return Undefined } From 2261920de78e8e92148981e7c20d9d2c651f9737 Mon Sep 17 00:00:00 2001 From: heryheming Date: Fri, 15 Aug 2025 17:15:46 +0700 Subject: [PATCH 07/95] feat: add function List --- pbm/storage/oss/oss.go | 46 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 74a1d63e1..fd4f7c194 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "path" + "strings" "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss" @@ -81,10 +82,53 @@ func (o *OSS) FileStat(name string) (storage.FileInfo, error) { // List scans path with prefix and returns all files with given suffix. // Both prefix and suffix can be omitted. + func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { - return nil, nil + + prfx := path.Join(o.cfg.Prefix, prefix) + if prfx != "" && !strings.HasSuffix(prfx, "/") { + prfx += "/" + } + + var files []storage.FileInfo + var marker *string + for { + res, err := o.ossCli.ListObjects(context.Background(), &oss.ListObjectsRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Prefix: oss.Ptr(prfx), + Marker: marker, + }) + if err != nil { + return nil, errors.Wrap(err, "list OSS objects") + } + for _, obj := range res.Contents { + var key string + if obj.Key != nil { + key = *obj.Key + } + if suffix != "" && !strings.HasSuffix(key, suffix) { + continue + } + if key == "" || strings.HasSuffix(key, "/") { + continue + } + name := strings.TrimPrefix(key, o.cfg.Prefix) + name = strings.TrimPrefix(name, "/") + files = append(files, storage.FileInfo{ + Name: name, + Size: obj.Size, + }) + } + if res.IsTruncated { + marker = res.NextMarker + continue + } + break + } + return files, nil } + // Delete deletes given file. // It returns storage.ErrNotExist if a file doesn't exists. func (o *OSS) Delete(name string) error { From 7301e2c5bbcccacea64f5b9ebdaed91ff38edd93 Mon Sep 17 00:00:00 2001 From: heryheming Date: Mon, 18 Aug 2025 15:45:53 +0700 Subject: [PATCH 08/95] fix: Update configureClient --- pbm/storage/oss/client.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index 2da86d864..02b2bb65d 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -128,8 +128,14 @@ func (c *cred) GetCredentials(ctx context.Context) (osscred.Credentials, error) } func configureClient(config *Config) (*oss.Client, error) { - if config.Region == "" { - return nil, fmt.Errorf("oss region is required") + if config == nil { + return nil, fmt.Errorf("config is nil") + } + if config.Retryer == nil { + config.Retryer = &Retryer{MaxAttempts: 3, MaxBackoff: defaultRetryerMaxBackoff, BaseDelay: defaultRetryBaseDelay} + } + if config.Region == "" || config.Bucket == "" || config.Credentials.AccessKeyID == "" || config.Credentials.AccessKeySecret == "" { + return nil, fmt.Errorf("Missing required OSS config: %+v", config) } cred, err := newCred(config) From 296a2d3e6d14b2201ae9ea2d0d785f0e5d479da9 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Mon, 18 Aug 2025 17:46:10 +0700 Subject: [PATCH 09/95] feat: implement the remaining storage function for oss Signed-off-by: Imre Nagi --- pbm/storage/oss/client.go | 17 +++-- pbm/storage/oss/oss.go | 155 ++++++++++++++++++++++++++++++++------ 2 files changed, 143 insertions(+), 29 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index 02b2bb65d..ee2bed042 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -7,7 +7,6 @@ import ( "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss" osscred "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials" - "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry" "github.com/aliyun/credentials-go/credentials/providers" ) @@ -61,11 +60,15 @@ func (cfg *Config) Cast() error { cfg.Retryer.MaxBackoff = defaultRetryerMaxBackoff } } + if cfg.MaxUploadParts <= 0 { + cfg.MaxUploadParts = maxPart + } return nil } const ( defaultSessionExpiration = 3600 + maxPart = int32(10000) ) func newCred(config *Config) (*cred, error) { @@ -147,12 +150,12 @@ func configureClient(config *Config) (*oss.Client, error) { WithRegion(config.Region). WithCredentialsProvider(cred). WithSignatureVersion(oss.SignatureVersionV4). - WithRetryMaxAttempts(config.Retryer.MaxAttempts). - WithRetryer(retry.NewStandard(func(ro *retry.RetryOptions) { - ro.MaxAttempts = config.Retryer.MaxAttempts - ro.MaxBackoff = config.Retryer.MaxBackoff - ro.BaseDelay = config.Retryer.BaseDelay - })). + // WithRetryMaxAttempts(config.Retryer.MaxAttempts). + // WithRetryer(retry.NewStandard(func(ro *retry.RetryOptions) { + // ro.MaxAttempts = config.Retryer.MaxAttempts + // ro.MaxBackoff = config.Retryer.MaxBackoff + // ro.BaseDelay = config.Retryer.BaseDelay + // })). WithConnectTimeout(time.Duration(config.ConnectTimeout) * time.Second) if config.EndpointURL != "" { diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index fd4f7c194..010bdffd9 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -1,6 +1,7 @@ package oss import ( + "bytes" "context" "fmt" "io" @@ -48,11 +49,114 @@ func (o *OSS) Type() storage.Type { } func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error { - return nil + opts := storage.GetDefaultOpts() + for _, opt := range options { + if err := opt(opts); err != nil { + return errors.Wrap(err, "processing options for save") + } + } + + // TODO move it somewhere + defaultPartSize := int64(10 * 1024 * 1024) // 10MB + minPartSize := int64(5 * 1024 * 1024) // 5MB + + partSize := storage.ComputePartSize( + opts.Size, + defaultPartSize, + minPartSize, + int64(o.cfg.MaxUploadParts), + int64(o.cfg.UploadPartSize), + ) + + if o.log != nil && opts.UseLogger { + o.log.Debug("uploading %q [size hint: %v (%v); part size: %v (%v)]", + name, + opts.Size, + storage.PrettySize(opts.Size), + partSize, + storage.PrettySize(partSize)) + } + + key := path.Join(o.cfg.Prefix, name) + + // Use multipart upload + initResult, err := o.ossCli.InitiateMultipartUpload(context.Background(), &oss.InitiateMultipartUploadRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(key), + }) + if err != nil { + return errors.Wrap(err, "initiate multipart upload") + } + uploadID := initResult.UploadId + + var completeParts []oss.UploadPart + partNumber := int32(1) + buf := make([]byte, partSize) + + for { + n, err := data.Read(buf) + if n > 0 { + uploadPartResult, uerr := o.ossCli.UploadPart(context.Background(), &oss.UploadPartRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(key), + UploadId: uploadID, + PartNumber: partNumber, + Body: bytes.NewReader(buf[:n]), + }) + if uerr != nil { + _, _ = o.ossCli.AbortMultipartUpload(context.Background(), &oss.AbortMultipartUploadRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(key), + UploadId: uploadID, + }) + return errors.Wrap(uerr, "upload part") + } + completeParts = append(completeParts, oss.UploadPart{ + ETag: uploadPartResult.ETag, + PartNumber: partNumber, + }) + partNumber++ + } + + if err == io.EOF { + break + } + if err != nil { + _, _ = o.ossCli.AbortMultipartUpload(context.Background(), &oss.AbortMultipartUploadRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(key), + UploadId: uploadID, + }) + return errors.Wrap(err, "read chunk") + } + } + + _, err = o.ossCli.CompleteMultipartUpload(context.Background(), &oss.CompleteMultipartUploadRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(key), + UploadId: uploadID, + CompleteMultipartUpload: &oss.CompleteMultipartUpload{ + Parts: completeParts, + }, + }) + + return errors.Wrap(err, "complete multipart upload") } func (o *OSS) SourceReader(name string) (io.ReadCloser, error) { - return nil, nil + res, err := o.ossCli.GetObject(context.Background(), &oss.GetObjectRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(path.Join(o.cfg.Prefix, name)), + }) + if err != nil { + var serr *oss.ServiceError + if errors.As(err, &serr) && serr.Code == "NoSuchKey" { + return nil, storage.ErrNotExist + } + return nil, errors.Wrap(err, "get object") + } + + return res.Body, nil } // FileStat returns file info. It returns error if file is empty or not exists. @@ -82,45 +186,46 @@ func (o *OSS) FileStat(name string) (storage.FileInfo, error) { // List scans path with prefix and returns all files with given suffix. // Both prefix and suffix can be omitted. - func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { - prfx := path.Join(o.cfg.Prefix, prefix) if prfx != "" && !strings.HasSuffix(prfx, "/") { prfx += "/" } var files []storage.FileInfo - var marker *string + var continuationToken *string for { - res, err := o.ossCli.ListObjects(context.Background(), &oss.ListObjectsRequest{ - Bucket: oss.Ptr(o.cfg.Bucket), - Prefix: oss.Ptr(prfx), - Marker: marker, + res, err := o.ossCli.ListObjectsV2(context.Background(), &oss.ListObjectsV2Request{ + Bucket: oss.Ptr(o.cfg.Bucket), + Prefix: oss.Ptr(prfx), + ContinuationToken: continuationToken, }) if err != nil { return nil, errors.Wrap(err, "list OSS objects") } for _, obj := range res.Contents { - var key string + key := "" if obj.Key != nil { key = *obj.Key } - if suffix != "" && !strings.HasSuffix(key, suffix) { + + f := strings.TrimPrefix(key, prfx) + if len(f) == 0 { continue } - if key == "" || strings.HasSuffix(key, "/") { - continue + if f[0] == '/' { + f = f[1:] + } + + if strings.HasSuffix(f, suffix) { + files = append(files, storage.FileInfo{ + Name: f, + Size: obj.Size, + }) } - name := strings.TrimPrefix(key, o.cfg.Prefix) - name = strings.TrimPrefix(name, "/") - files = append(files, storage.FileInfo{ - Name: name, - Size: obj.Size, - }) } if res.IsTruncated { - marker = res.NextMarker + continuationToken = res.NextContinuationToken continue } break @@ -128,7 +233,6 @@ func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { return files, nil } - // Delete deletes given file. // It returns storage.ErrNotExist if a file doesn't exists. func (o *OSS) Delete(name string) error { @@ -146,5 +250,12 @@ func (o *OSS) Delete(name string) error { // Copy makes a copy of the src objec/file under dst name func (o *OSS) Copy(src, dst string) error { - return nil + _, err := o.ossCli.CopyObject(context.Background(), &oss.CopyObjectRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(path.Join(o.cfg.Prefix, dst)), + SourceBucket: oss.Ptr(o.cfg.Bucket), + SourceKey: oss.Ptr(path.Join(o.cfg.Prefix, src)), + }) + + return errors.Wrap(err, "copy object") } From 5c9ad8167f57491f12702aef26343ad29a783eae Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Mon, 18 Aug 2025 20:35:53 +0700 Subject: [PATCH 10/95] refactor(PBM-1588): move default part size constants to client.go --- pbm/storage/oss/client.go | 45 +++++++++++++++++++++++---------------- pbm/storage/oss/oss.go | 4 ---- 2 files changed, 27 insertions(+), 22 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index ee2bed042..dfed28da9 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -7,15 +7,20 @@ import ( "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss" osscred "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials" + "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/retry" "github.com/aliyun/credentials-go/credentials/providers" ) const ( defaultPartSize int64 = 10 * 1024 * 1024 // 10Mb + minPartSize int64 = 5 * 1024 * 1024 // 5MB defaultS3Region = "ap-southeast-5" + maxPart int32 = 10000 - defaultRetryBaseDelay = 30 * time.Millisecond - defaultRetryerMaxBackoff = 300 * time.Second + defaultRetryMaxAttempts = 5 + defaultRetryBaseDelay = 30 * time.Millisecond + defaultRetryerMaxBackoff = 300 * time.Second + defaultSessionDurationSeconds = 3600 ) //nolint:lll @@ -53,12 +58,21 @@ func (cfg *Config) Cast() error { cfg.Region = defaultS3Region } if cfg.Retryer != nil { + if cfg.Retryer.MaxAttempts == 0 { + cfg.Retryer.MaxAttempts = defaultRetryMaxAttempts + } if cfg.Retryer.BaseDelay == 0 { cfg.Retryer.BaseDelay = defaultRetryBaseDelay } if cfg.Retryer.MaxBackoff == 0 { cfg.Retryer.MaxBackoff = defaultRetryerMaxBackoff } + } else { + cfg.Retryer = &Retryer{ + MaxAttempts: defaultRetryMaxAttempts, + MaxBackoff: defaultRetryerMaxBackoff, + BaseDelay: defaultRetryBaseDelay, + } } if cfg.MaxUploadParts <= 0 { cfg.MaxUploadParts = maxPart @@ -66,11 +80,6 @@ func (cfg *Config) Cast() error { return nil } -const ( - defaultSessionExpiration = 3600 - maxPart = int32(10000) -) - func newCred(config *Config) (*cred, error) { var credentialsProvider providers.CredentialsProvider var err error @@ -101,7 +110,7 @@ func newCred(config *Config) (*cred, error) { WithCredentialsProvider(internalProvider). WithRoleArn(config.Credentials.RoleARN). WithRoleSessionName(config.Credentials.SessionName). - WithDurationSeconds(defaultSessionExpiration). + WithDurationSeconds(defaultSessionDurationSeconds). Build() if err != nil { return nil, fmt.Errorf("ram role credential provider: %w", err) @@ -134,10 +143,10 @@ func configureClient(config *Config) (*oss.Client, error) { if config == nil { return nil, fmt.Errorf("config is nil") } - if config.Retryer == nil { - config.Retryer = &Retryer{MaxAttempts: 3, MaxBackoff: defaultRetryerMaxBackoff, BaseDelay: defaultRetryBaseDelay} - } - if config.Region == "" || config.Bucket == "" || config.Credentials.AccessKeyID == "" || config.Credentials.AccessKeySecret == "" { + + if config.Region == "" || config.Bucket == "" || + config.Credentials.AccessKeyID == "" || + config.Credentials.AccessKeySecret == "" { return nil, fmt.Errorf("Missing required OSS config: %+v", config) } @@ -150,12 +159,12 @@ func configureClient(config *Config) (*oss.Client, error) { WithRegion(config.Region). WithCredentialsProvider(cred). WithSignatureVersion(oss.SignatureVersionV4). - // WithRetryMaxAttempts(config.Retryer.MaxAttempts). - // WithRetryer(retry.NewStandard(func(ro *retry.RetryOptions) { - // ro.MaxAttempts = config.Retryer.MaxAttempts - // ro.MaxBackoff = config.Retryer.MaxBackoff - // ro.BaseDelay = config.Retryer.BaseDelay - // })). + WithRetryMaxAttempts(config.Retryer.MaxAttempts). + WithRetryer(retry.NewStandard(func(ro *retry.RetryOptions) { + ro.MaxAttempts = config.Retryer.MaxAttempts + ro.MaxBackoff = config.Retryer.MaxBackoff + ro.BaseDelay = config.Retryer.BaseDelay + })). WithConnectTimeout(time.Duration(config.ConnectTimeout) * time.Second) if config.EndpointURL != "" { diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 010bdffd9..bd074a915 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -56,10 +56,6 @@ func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error } } - // TODO move it somewhere - defaultPartSize := int64(10 * 1024 * 1024) // 10MB - minPartSize := int64(5 * 1024 * 1024) // 5MB - partSize := storage.ComputePartSize( opts.Size, defaultPartSize, From 3b299a25d575a300acfb7d0e74bffda7a9c5f266 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Tue, 19 Aug 2025 09:04:45 +0700 Subject: [PATCH 11/95] fix: add missing cast for oss Signed-off-by: Imre Nagi --- pbm/config/config.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pbm/config/config.go b/pbm/config/config.go index b4027d72a..64644337c 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -308,6 +308,8 @@ func (s *StorageConf) Cast() error { return s.Filesystem.Cast() case storage.S3: return s.S3.Cast() + case storage.OSS: + return s.OSS.Cast() case storage.GCS: return nil case storage.Azure: // noop From 710853e61e056035c549cd40aefb8293d8af1361 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Tue, 19 Aug 2025 09:57:28 +0700 Subject: [PATCH 12/95] feat: add OSS credential masking and clone method for Config --- pbm/config/config.go | 13 +++++++++++++ pbm/storage/oss/client.go | 9 +++++++++ 2 files changed, 22 insertions(+) diff --git a/pbm/config/config.go b/pbm/config/config.go index 64644337c..02849bc11 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -161,6 +161,17 @@ func (c *Config) String() string { c.Storage.GCS.Credentials.HMACSecret = "***" } } + if c.Storage.OSS != nil { + if c.Storage.OSS.Credentials.AccessKeyID != "" { + c.Storage.OSS.Credentials.AccessKeyID = "***" + } + if c.Storage.OSS.Credentials.AccessKeySecret != "" { + c.Storage.OSS.Credentials.AccessKeySecret = "***" + } + if c.Storage.OSS.Credentials.SecurityToken != "" { + c.Storage.OSS.Credentials.SecurityToken = "***" + } + } b, err := yaml.Marshal(c) if err != nil { @@ -249,6 +260,8 @@ func (s *StorageConf) Clone() *StorageConf { rv.Azure = s.Azure.Clone() case storage.GCS: rv.GCS = s.GCS.Clone() + case storage.OSS: + rv.OSS = s.OSS.Clone() case storage.Blackhole: // no config } diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index dfed28da9..ff3b56e7a 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -80,6 +80,15 @@ func (cfg *Config) Cast() error { return nil } +func (cfg *Config) Clone() *Config { + if cfg == nil { + return nil + } + + rv := *cfg + return &rv +} + func newCred(config *Config) (*cred, error) { var credentialsProvider providers.CredentialsProvider var err error From 4b178275f8142a829dad18b49103c2aa19e65b58 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Tue, 19 Aug 2025 10:02:59 +0700 Subject: [PATCH 13/95] fix: update ConnectTimeout type to time.Duration and set default value --- pbm/storage/oss/client.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index ff3b56e7a..f25229767 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -21,6 +21,7 @@ const ( defaultRetryBaseDelay = 30 * time.Millisecond defaultRetryerMaxBackoff = 300 * time.Second defaultSessionDurationSeconds = 3600 + defaultConnectTimeout = 5 * time.Second ) //nolint:lll @@ -34,9 +35,9 @@ type Config struct { Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` - ConnectTimeout int `bson:"connectTimeout" json:"connectTimeout" yaml:"connectTimeout"` - UploadPartSize int `bson:"uploadPartSize,omitempty" json:"uploadPartSize,omitempty" yaml:"uploadPartSize,omitempty"` - MaxUploadParts int32 `bson:"maxUploadParts,omitempty" json:"maxUploadParts,omitempty" yaml:"maxUploadParts,omitempty"` + ConnectTimeout time.Duration `bson:"connectTimeout" json:"connectTimeout" yaml:"connectTimeout"` + UploadPartSize int `bson:"uploadPartSize,omitempty" json:"uploadPartSize,omitempty" yaml:"uploadPartSize,omitempty"` + MaxUploadParts int32 `bson:"maxUploadParts,omitempty" json:"maxUploadParts,omitempty" yaml:"maxUploadParts,omitempty"` } type Retryer struct { @@ -57,6 +58,9 @@ func (cfg *Config) Cast() error { if cfg.Region == "" { cfg.Region = defaultS3Region } + if cfg.ConnectTimeout == 0 { + cfg.ConnectTimeout = defaultConnectTimeout + } if cfg.Retryer != nil { if cfg.Retryer.MaxAttempts == 0 { cfg.Retryer.MaxAttempts = defaultRetryMaxAttempts @@ -174,7 +178,7 @@ func configureClient(config *Config) (*oss.Client, error) { ro.MaxBackoff = config.Retryer.MaxBackoff ro.BaseDelay = config.Retryer.BaseDelay })). - WithConnectTimeout(time.Duration(config.ConnectTimeout) * time.Second) + WithConnectTimeout(config.ConnectTimeout) if config.EndpointURL != "" { ossConfig = ossConfig.WithEndpoint(config.EndpointURL) From 16ca10f4d8c679d31d8000f9d29b18adcded3b21 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Tue, 19 Aug 2025 15:43:49 +0700 Subject: [PATCH 14/95] fix: use multipart upload and copy for upload Signed-off-by: Imre Nagi --- pbm/storage/oss/oss.go | 97 +++++++++--------------------------------- 1 file changed, 20 insertions(+), 77 deletions(-) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index bd074a915..824447847 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -1,7 +1,6 @@ package oss import ( - "bytes" "context" "fmt" "io" @@ -56,87 +55,31 @@ func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error } } + if opts.Size > 0 { + o.log.Debug("uploading %s with size %d", name, opts.Size) + } else { + o.log.Debug("uploading %s", name) + } + partSize := storage.ComputePartSize( opts.Size, - defaultPartSize, - minPartSize, - int64(o.cfg.MaxUploadParts), - int64(o.cfg.UploadPartSize), + oss.DefaultPartSize, + oss.MinPartSize, + int64(oss.MaxUploadParts), + int64(oss.DefaultUploadPartSize), ) - if o.log != nil && opts.UseLogger { - o.log.Debug("uploading %q [size hint: %v (%v); part size: %v (%v)]", - name, - opts.Size, - storage.PrettySize(opts.Size), - partSize, - storage.PrettySize(partSize)) - } - - key := path.Join(o.cfg.Prefix, name) - - // Use multipart upload - initResult, err := o.ossCli.InitiateMultipartUpload(context.Background(), &oss.InitiateMultipartUploadRequest{ - Bucket: oss.Ptr(o.cfg.Bucket), - Key: oss.Ptr(key), + uploader := oss.NewUploader(o.ossCli, func(uo *oss.UploaderOptions) { + uo.PartSize = partSize + uo.LeavePartsOnError = true }) - if err != nil { - return errors.Wrap(err, "initiate multipart upload") - } - uploadID := initResult.UploadId - var completeParts []oss.UploadPart - partNumber := int32(1) - buf := make([]byte, partSize) - - for { - n, err := data.Read(buf) - if n > 0 { - uploadPartResult, uerr := o.ossCli.UploadPart(context.Background(), &oss.UploadPartRequest{ - Bucket: oss.Ptr(o.cfg.Bucket), - Key: oss.Ptr(key), - UploadId: uploadID, - PartNumber: partNumber, - Body: bytes.NewReader(buf[:n]), - }) - if uerr != nil { - _, _ = o.ossCli.AbortMultipartUpload(context.Background(), &oss.AbortMultipartUploadRequest{ - Bucket: oss.Ptr(o.cfg.Bucket), - Key: oss.Ptr(key), - UploadId: uploadID, - }) - return errors.Wrap(uerr, "upload part") - } - completeParts = append(completeParts, oss.UploadPart{ - ETag: uploadPartResult.ETag, - PartNumber: partNumber, - }) - partNumber++ - } - - if err == io.EOF { - break - } - if err != nil { - _, _ = o.ossCli.AbortMultipartUpload(context.Background(), &oss.AbortMultipartUploadRequest{ - Bucket: oss.Ptr(o.cfg.Bucket), - Key: oss.Ptr(key), - UploadId: uploadID, - }) - return errors.Wrap(err, "read chunk") - } - } - - _, err = o.ossCli.CompleteMultipartUpload(context.Background(), &oss.CompleteMultipartUploadRequest{ - Bucket: oss.Ptr(o.cfg.Bucket), - Key: oss.Ptr(key), - UploadId: uploadID, - CompleteMultipartUpload: &oss.CompleteMultipartUpload{ - Parts: completeParts, - }, - }) + _, err := uploader.UploadFrom(context.Background(), &oss.PutObjectRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(path.Join(o.cfg.Prefix, name)), + }, data) - return errors.Wrap(err, "complete multipart upload") + return errors.Wrap(err, "put object") } func (o *OSS) SourceReader(name string) (io.ReadCloser, error) { @@ -246,12 +189,12 @@ func (o *OSS) Delete(name string) error { // Copy makes a copy of the src objec/file under dst name func (o *OSS) Copy(src, dst string) error { - _, err := o.ossCli.CopyObject(context.Background(), &oss.CopyObjectRequest{ + uploader := oss.NewCopier(o.ossCli) + _, err := uploader.Copy(context.Background(), &oss.CopyObjectRequest{ Bucket: oss.Ptr(o.cfg.Bucket), Key: oss.Ptr(path.Join(o.cfg.Prefix, dst)), SourceBucket: oss.Ptr(o.cfg.Bucket), SourceKey: oss.Ptr(path.Join(o.cfg.Prefix, src)), }) - return errors.Wrap(err, "copy object") } From b06b1236f77181ebac2c7cbe13ebae6ab37a1076 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Tue, 19 Aug 2025 16:24:25 +0700 Subject: [PATCH 15/95] fix: update part size parameters in OSS Save method --- pbm/storage/oss/oss.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 824447847..144f99b1d 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -63,15 +63,14 @@ func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error partSize := storage.ComputePartSize( opts.Size, - oss.DefaultPartSize, + defaultPartSize, oss.MinPartSize, - int64(oss.MaxUploadParts), - int64(oss.DefaultUploadPartSize), + int64(o.cfg.MaxUploadParts), + int64(o.cfg.UploadPartSize), ) uploader := oss.NewUploader(o.ossCli, func(uo *oss.UploaderOptions) { uo.PartSize = partSize - uo.LeavePartsOnError = true }) _, err := uploader.UploadFrom(context.Background(), &oss.PutObjectRequest{ From 723d4b9d0d265d4ada987c52988a9e8f48bf589a Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Tue, 19 Aug 2025 18:53:07 +0700 Subject: [PATCH 16/95] chore: add unit tests for ComputePartSize function --- pbm/storage/storage_test.go | 99 +++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 pbm/storage/storage_test.go diff --git a/pbm/storage/storage_test.go b/pbm/storage/storage_test.go new file mode 100644 index 000000000..74809190c --- /dev/null +++ b/pbm/storage/storage_test.go @@ -0,0 +1,99 @@ +package storage_test + +import ( + "testing" + + "github.com/percona/percona-backup-mongodb/pbm/storage" +) + +func TestComputePartSize(t *testing.T) { + const ( + _ = iota + KB = 1 << (10 * iota) + MB + GB + ) + + const ( + defaultSize = 10 * MB + minSize = 5 * MB + maxParts = 10000 + ) + + tests := []struct { + name string + fileSize int64 + userSize int64 + want int64 + }{ + { + name: "default", + fileSize: 0, + userSize: 0, + want: defaultSize, + }, + { + name: "user size provided", + fileSize: 0, + userSize: 20 * MB, + want: 20 * MB, + }, + { + name: "user size less than min", + fileSize: 0, + userSize: 4 * MB, + want: minSize, + }, + { + name: "file size requires larger part size", + fileSize: 100 * GB, + userSize: 0, + want: 100 * GB / maxParts * 15 / 10, + }, + { + name: "file size requires larger part size than user size", + fileSize: 100 * GB, + userSize: 10 * MB, + want: 100 * GB / maxParts * 15 / 10, + }, + { + name: "file size does not require larger part size", + fileSize: 50 * GB, + userSize: 0, + want: defaultSize, + }, + { + name: "file size with user size", + fileSize: 50 * GB, + userSize: 12 * MB, + want: 12 * MB, + }, + { + name: "zero file size", + fileSize: 0, + userSize: 0, + want: defaultSize, + }, + { + name: "zero user size", + fileSize: 100 * GB, + userSize: 0, + want: 100 * GB / maxParts * 15 / 10, + }, + { + name: "negative user size", + fileSize: 0, + userSize: -1, + want: defaultSize, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := storage.ComputePartSize(tt.fileSize, defaultSize, minSize, maxParts, tt.userSize) + if got != tt.want { + t.Errorf("ComputePartSize() = %v, want %v", got, tt.want) + } + }) + } +} From 15477fff6de6f3d4c646d8a4bd7d32051f804bec Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Wed, 20 Aug 2025 05:54:16 +0700 Subject: [PATCH 17/95] fix: remove redundant path.Join call in Delete method and correct comment in Copy method --- pbm/storage/oss/oss.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 144f99b1d..636f935c3 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -175,7 +175,6 @@ func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { // It returns storage.ErrNotExist if a file doesn't exists. func (o *OSS) Delete(name string) error { key := path.Join(o.cfg.Prefix, name) - path.Join(o.cfg.Prefix, name) _, err := o.ossCli.DeleteObject(context.Background(), &oss.DeleteObjectRequest{ Bucket: oss.Ptr(o.cfg.Bucket), Key: oss.Ptr(key), @@ -186,7 +185,7 @@ func (o *OSS) Delete(name string) error { return nil } -// Copy makes a copy of the src objec/file under dst name +// Copy makes a copy of the src object/file under dst name func (o *OSS) Copy(src, dst string) error { uploader := oss.NewCopier(o.ossCli) _, err := uploader.Copy(context.Background(), &oss.CopyObjectRequest{ From 6acd6a3573035384be70979ae62f9e13a43221a4 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Thu, 28 Aug 2025 06:18:30 +0700 Subject: [PATCH 18/95] feat: add server-side encryption support and update upload part size handling --- pbm/storage/oss/client.go | 15 ++++++++-- pbm/storage/oss/oss.go | 60 ++++++++++++++++++++++++++++++++------- 2 files changed, 61 insertions(+), 14 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index f25229767..27cd41b47 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -12,8 +12,6 @@ import ( ) const ( - defaultPartSize int64 = 10 * 1024 * 1024 // 10Mb - minPartSize int64 = 5 * 1024 * 1024 // 5MB defaultS3Region = "ap-southeast-5" maxPart int32 = 10000 @@ -36,8 +34,16 @@ type Config struct { Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` ConnectTimeout time.Duration `bson:"connectTimeout" json:"connectTimeout" yaml:"connectTimeout"` - UploadPartSize int `bson:"uploadPartSize,omitempty" json:"uploadPartSize,omitempty" yaml:"uploadPartSize,omitempty"` + UploadPartSize int64 `bson:"uploadPartSize,omitempty" json:"uploadPartSize,omitempty" yaml:"uploadPartSize,omitempty"` MaxUploadParts int32 `bson:"maxUploadParts,omitempty" json:"maxUploadParts,omitempty" yaml:"maxUploadParts,omitempty"` + + ServerSideEncryption *SSE `bson:"serverSideEncryption,omitempty" json:"serverSideEncryption,omitempty" yaml:"serverSideEncryption,omitempty"` +} + +type SSE struct { + EncryptionMethod string `bson:"encryptionMethod,omitempty" json:"encryptionMethod,omitempty" yaml:"encryptionMethod,omitempty"` + EncryptionAlgorithm string `bson:"encryptionAlgorithm,omitempty" json:"encryptionAlgorithm,omitempty" yaml:"encryptionAlgorithm,omitempty"` + EncryptionKeyID string `bson:"encryptionKeyId,omitempty" json:"encryptionKeyId,omitempty" yaml:"encryptionKeyId,omitempty"` } type Retryer struct { @@ -81,6 +87,9 @@ func (cfg *Config) Cast() error { if cfg.MaxUploadParts <= 0 { cfg.MaxUploadParts = maxPart } + if cfg.UploadPartSize <= 0 { + cfg.UploadPartSize = oss.DefaultUploadPartSize + } return nil } diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 636f935c3..5589a8469 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -16,6 +16,12 @@ import ( var _ storage.Storage = &OSS{} +const ( + ServerSideEncryptionAes256 = "AES256" + ServerSideEncryptionKMS = "KMS" + ServerSideEncryptionSM4 = "SM4" +) + func New(cfg *Config, node string, l log.LogEvent) (*OSS, error) { if err := cfg.Cast(); err != nil { return nil, fmt.Errorf("cast config: %w", err) @@ -61,9 +67,28 @@ func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error o.log.Debug("uploading %s", name) } + req := &oss.PutObjectRequest{ + Bucket: oss.Ptr(o.cfg.Bucket), + Key: oss.Ptr(path.Join(o.cfg.Prefix, name)), + } + + if o.cfg.ServerSideEncryption != nil { + sse := o.cfg.ServerSideEncryption + switch sse.EncryptionMethod { + case ServerSideEncryptionSM4: + req.ServerSideEncryption = oss.Ptr(ServerSideEncryptionSM4) + case ServerSideEncryptionKMS: + req.ServerSideEncryption = oss.Ptr(ServerSideEncryptionKMS) + req.ServerSideDataEncryption = oss.Ptr(sse.EncryptionAlgorithm) + req.ServerSideEncryptionKeyId = oss.Ptr(sse.EncryptionKeyID) + default: + req.ServerSideEncryption = oss.Ptr(ServerSideEncryptionAes256) + } + } + partSize := storage.ComputePartSize( opts.Size, - defaultPartSize, + o.cfg.UploadPartSize, oss.MinPartSize, int64(o.cfg.MaxUploadParts), int64(o.cfg.UploadPartSize), @@ -72,11 +97,7 @@ func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error uploader := oss.NewUploader(o.ossCli, func(uo *oss.UploaderOptions) { uo.PartSize = partSize }) - - _, err := uploader.UploadFrom(context.Background(), &oss.PutObjectRequest{ - Bucket: oss.Ptr(o.cfg.Bucket), - Key: oss.Ptr(path.Join(o.cfg.Prefix, name)), - }, data) + _, err := uploader.UploadFrom(context.Background(), req, data) return errors.Wrap(err, "put object") } @@ -101,10 +122,12 @@ func (o *OSS) SourceReader(name string) (io.ReadCloser, error) { func (o *OSS) FileStat(name string) (storage.FileInfo, error) { inf := storage.FileInfo{} - res, err := o.ossCli.HeadObject(context.Background(), &oss.HeadObjectRequest{ + req := &oss.HeadObjectRequest{ Bucket: oss.Ptr(o.cfg.Bucket), Key: oss.Ptr(path.Join(o.cfg.Prefix, name)), - }) + } + + res, err := o.ossCli.HeadObject(context.Background(), req) if err != nil { var serr *oss.ServiceError if errors.As(err, &serr) && serr.Code == "NoSuchKey" { @@ -187,12 +210,27 @@ func (o *OSS) Delete(name string) error { // Copy makes a copy of the src object/file under dst name func (o *OSS) Copy(src, dst string) error { - uploader := oss.NewCopier(o.ossCli) - _, err := uploader.Copy(context.Background(), &oss.CopyObjectRequest{ + req := &oss.CopyObjectRequest{ Bucket: oss.Ptr(o.cfg.Bucket), Key: oss.Ptr(path.Join(o.cfg.Prefix, dst)), SourceBucket: oss.Ptr(o.cfg.Bucket), SourceKey: oss.Ptr(path.Join(o.cfg.Prefix, src)), - }) + } + + if o.cfg.ServerSideEncryption != nil { + sse := o.cfg.ServerSideEncryption + switch sse.EncryptionMethod { + case ServerSideEncryptionSM4: + req.ServerSideEncryption = oss.Ptr(ServerSideEncryptionSM4) + case ServerSideEncryptionKMS: + req.ServerSideEncryption = oss.Ptr(ServerSideEncryptionKMS) + req.ServerSideDataEncryption = oss.Ptr(sse.EncryptionAlgorithm) + req.ServerSideEncryptionKeyId = oss.Ptr(sse.EncryptionKeyID) + default: + req.ServerSideEncryption = oss.Ptr(ServerSideEncryptionAes256) + } + } + copier := oss.NewCopier(o.ossCli, func(co *oss.CopierOptions) {}) + _, err := copier.Copy(context.Background(), req) return errors.Wrap(err, "copy object") } From aec66c4ebbe95d59a6f73372b666b6b0bbb0fc7f Mon Sep 17 00:00:00 2001 From: Neha Oudin Date: Thu, 18 Sep 2025 17:19:17 +0200 Subject: [PATCH 19/95] feat: Prevent SEGFAULT in Cast operations --- pbm/config/config_test.go | 13 +++++++++++++ pbm/storage/fs/fs.go | 3 +++ pbm/storage/s3/s3.go | 5 ++++- pbm/storage/s3/s3_test.go | 11 +++++++++++ 4 files changed, 31 insertions(+), 1 deletion(-) diff --git a/pbm/config/config_test.go b/pbm/config/config_test.go index 4ed6b4205..bd13e15fd 100644 --- a/pbm/config/config_test.go +++ b/pbm/config/config_test.go @@ -3,6 +3,7 @@ package config import ( "testing" + "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/pbm/storage/azure" "github.com/percona/percona-backup-mongodb/pbm/storage/fs" "github.com/percona/percona-backup-mongodb/pbm/storage/gcs" @@ -153,6 +154,18 @@ func TestIsSameStorage(t *testing.T) { }) } +func TestCastError(t *testing.T) { + t.Run("S3", func(t *testing.T) { + cfg := StorageConf{Type: storage.S3} + + err := cfg.Cast() + if err == nil { + t.Errorf("Cast did not raise an error") + } + + }) +} + func boolPtr(b bool) *bool { return &b } diff --git a/pbm/storage/fs/fs.go b/pbm/storage/fs/fs.go index 1b8c4e9b0..c6fd2da22 100644 --- a/pbm/storage/fs/fs.go +++ b/pbm/storage/fs/fs.go @@ -64,6 +64,9 @@ func (cfg *Config) IsSameStorage(other *Config) bool { } func (cfg *Config) Cast() error { + if cfg == nil { + return errors.New("Missing blackhole configuration with blackhole storage type.") + } if cfg.Path == "" { return errors.New("path can't be empty") } diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index dd0cbb64d..e88557358 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -47,7 +47,7 @@ const ( //nolint:lll type Config struct { Provider string `bson:"provider,omitempty" json:"provider,omitempty" yaml:"provider,omitempty"` - Region string `bson:"region" json:"region" yaml:"region"` + Region string `bson:"region,omitempty" json:"region,omitempty" yaml:"region,omitempty"` EndpointURL string `bson:"endpointUrl,omitempty" json:"endpointUrl" yaml:"endpointUrl,omitempty"` EndpointURLMap map[string]string `bson:"endpointUrlMap,omitempty" json:"endpointUrlMap,omitempty" yaml:"endpointUrlMap,omitempty"` ForcePathStyle *bool `bson:"forcePathStyle,omitempty" json:"forcePathStyle,omitempty" yaml:"forcePathStyle,omitempty"` @@ -203,6 +203,9 @@ func (cfg *Config) IsSameStorage(other *Config) bool { } func (cfg *Config) Cast() error { + if cfg == nil { + return errors.New("Missing S3 configuration with S3 storage type.") + } if cfg.Region == "" { cfg.Region = defaultS3Region } diff --git a/pbm/storage/s3/s3_test.go b/pbm/storage/s3/s3_test.go index 67e274b40..ffd5edf18 100644 --- a/pbm/storage/s3/s3_test.go +++ b/pbm/storage/s3/s3_test.go @@ -162,6 +162,17 @@ func TestConfig(t *testing.T) { t.Error("expected not to be equal when updating credentials") } }) + + t.Run("Cast succeeds", func(t *testing.T) { + if opts.Region != "" { + t.Error("Start value is not ''") + } + opts.Cast() + + if opts.Region != "us-east-1" { + t.Error("Default value should be set on Cast") + } + }) } func TestRetryer(t *testing.T) { From b7f5927c561d40af93a15a19fbfbea9971ebc889 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Wed, 24 Sep 2025 10:26:36 +0700 Subject: [PATCH 20/95] fix: implement missing storage method --- pbm/storage/oss/oss.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 5589a8469..8d3169b70 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -234,3 +234,7 @@ func (o *OSS) Copy(src, dst string) error { _, err := copier.Copy(context.Background(), req) return errors.Wrap(err, "copy object") } + +func (o *OSS) DownloadStat() storage.DownloadStat { + return storage.DownloadStat{} +} From 311ba918c6c6f2d96daed02b32951c2867898413 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Wed, 24 Sep 2025 14:36:43 +0700 Subject: [PATCH 21/95] feat: use NewSplitMergeMW Signed-off-by: Imre Nagi --- pbm/storage/oss/client.go | 17 +++++++++++++++++ pbm/storage/oss/oss.go | 6 +++--- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index 27cd41b47..dc8f1abab 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -20,6 +20,7 @@ const ( defaultRetryerMaxBackoff = 300 * time.Second defaultSessionDurationSeconds = 3600 defaultConnectTimeout = 5 * time.Second + defaultMaxObjSizeGB = 48800 // 48.8 TB ) //nolint:lll @@ -36,6 +37,7 @@ type Config struct { ConnectTimeout time.Duration `bson:"connectTimeout" json:"connectTimeout" yaml:"connectTimeout"` UploadPartSize int64 `bson:"uploadPartSize,omitempty" json:"uploadPartSize,omitempty" yaml:"uploadPartSize,omitempty"` MaxUploadParts int32 `bson:"maxUploadParts,omitempty" json:"maxUploadParts,omitempty" yaml:"maxUploadParts,omitempty"` + MaxObjSizeGB *float64 `bson:"maxObjSizeGB,omitempty" json:"maxObjSizeGB,omitempty" yaml:"maxObjSizeGB,omitempty"` ServerSideEncryption *SSE `bson:"serverSideEncryption,omitempty" json:"serverSideEncryption,omitempty" yaml:"serverSideEncryption,omitempty"` } @@ -99,9 +101,24 @@ func (cfg *Config) Clone() *Config { } rv := *cfg + if cfg.MaxObjSizeGB != nil { + v := *cfg.MaxObjSizeGB + rv.MaxObjSizeGB = &v + } + if cfg.Retryer != nil { + v := *cfg.Retryer + rv.Retryer = &v + } return &rv } +func (cfg *Config) GetMaxObjSizeGB() float64 { + if cfg.MaxObjSizeGB != nil && *cfg.MaxObjSizeGB > 0 { + return *cfg.MaxObjSizeGB + } + return defaultMaxObjSizeGB +} + func newCred(config *Config) (*cred, error) { var credentialsProvider providers.CredentialsProvider var err error diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 8d3169b70..0101a6ec8 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -22,7 +22,7 @@ const ( ServerSideEncryptionSM4 = "SM4" ) -func New(cfg *Config, node string, l log.LogEvent) (*OSS, error) { +func New(cfg *Config, node string, l log.LogEvent) (storage.Storage, error) { if err := cfg.Cast(); err != nil { return nil, fmt.Errorf("cast config: %w", err) } @@ -38,8 +38,8 @@ func New(cfg *Config, node string, l log.LogEvent) (*OSS, error) { log: l, ossCli: client, } - - return o, nil + + return storage.NewSplitMergeMW(o, cfg.GetMaxObjSizeGB()), nil } type OSS struct { From e4ab7e5b9bb9a4af9a05f8154edc892540629822 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 26 Sep 2025 13:38:40 +0200 Subject: [PATCH 22/95] Bump default PSMDB ver 6.0->8.0 --- CONTRIBUTING.md | 4 +- Makefile | 2 +- e2e-tests/README.md | 8 ++-- .../docker/docker-compose-remapping.yaml | 8 ++-- e2e-tests/docker/docker-compose-rs.yaml | 16 +++---- e2e-tests/docker/docker-compose-single.yaml | 6 +-- e2e-tests/docker/docker-compose.yaml | 42 +++++++++---------- e2e-tests/docker/mongodb.dockerfile | 2 +- e2e-tests/docker/pbm.dockerfile | 2 +- e2e-tests/functions | 4 +- e2e-tests/run-new-cluster | 2 +- e2e-tests/run-remapping | 2 +- e2e-tests/run-rs | 2 +- e2e-tests/run-sharded | 2 +- e2e-tests/run-single | 2 +- e2e-tests/start-cluster | 2 +- e2e-tests/start-replset | 2 +- 17 files changed, 54 insertions(+), 54 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 39e24380b..7dcd32e18 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -101,7 +101,7 @@ You can find the tests in the ``e2e-tests`` directory. To save time on tests execution during development, we recommend running general and consistency tests for a sharded cluster: ```sh -$ MONGODB_VERSION=5.0 ./run-sharded +$ MONGODB_VERSION=8.0 ./run-sharded ``` ``$ MONGODB_VERSION`` stands for the Percona Server for MongoDB version Percona Backup for MongoDB is running with. Default is 5.0. @@ -109,7 +109,7 @@ $ MONGODB_VERSION=5.0 ./run-sharded After the development is complete and you are ready to submit a pull request, run all tests using the following command: ```sh -$ MONGODB_VERSION=5.0 ./run-all +$ MONGODB_VERSION=8.0 ./run-all ``` You can run tests on your local machine with whatever operating system you have. After you submit the pull request, we will check your patch on multiple operating systems. diff --git a/Makefile b/Makefile index 0a958dd4b..d804197c9 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ CGO_ENABLED?=0 GITCOMMIT?=$(shell git rev-parse HEAD 2>/dev/null) GITBRANCH?=$(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) BUILDTIME?=$(shell TZ=UTC date "+%Y-%m-%d_%H:%M_UTC") -MONGO_TEST_VERSION?=5.0 +MONGO_TEST_VERSION?=8.0 define ENVS GO111MODULE=$(GOMOD) \ diff --git a/e2e-tests/README.md b/e2e-tests/README.md index 614d8b44a..79a07e87f 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -3,9 +3,9 @@ ## Run tests Run all tests ``` -$ MONGODB_VERSION=6.0 ./run-all +$ MONGODB_VERSION=8.0 ./run-all ``` -`MONGODB_VERSION` is a PSMDB version (e.g. 6.0/7.0/8.0). Default is `6.0` +`MONGODB_VERSION` is a PSMDB version (e.g. 6.0/7.0/8.0). Default is `8.0` `./run-all` would run all tests both on a sharded cluster and a non-sharded replica set. @@ -20,9 +20,9 @@ $ MONGODB_VERSION=6.0 ./run-all ## Start test cluster To start tests with a running pbm-agent and minio storage: ``` -$ MONGODB_VERSION=6.0 ./start-cluster +$ MONGODB_VERSION=8.0 ./start-cluster ``` -`MONGODB_VERSION` is a PSMDB version (e.g. 6.0/7.0/8.0). Default is `6.0` +`MONGODB_VERSION` is a PSMDB version (e.g. 6.0/7.0/8.0). Default is `8.0` `./start-replset` - to start a non-sharded replica set. diff --git a/e2e-tests/docker/docker-compose-remapping.yaml b/e2e-tests/docker/docker-compose-remapping.yaml index c08ce7ada..7a362b6ce 100644 --- a/e2e-tests/docker/docker-compose-remapping.yaml +++ b/e2e-tests/docker/docker-compose-remapping.yaml @@ -24,7 +24,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: rs101 labels: @@ -54,7 +54,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} volumes: - ./conf:/etc/pbm @@ -69,7 +69,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: rs201 labels: @@ -99,7 +99,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} volumes: - ./conf:/etc/pbm diff --git a/e2e-tests/docker/docker-compose-rs.yaml b/e2e-tests/docker/docker-compose-rs.yaml index 6ddc1a076..92be54ab3 100644 --- a/e2e-tests/docker/docker-compose-rs.yaml +++ b/e2e-tests/docker/docker-compose-rs.yaml @@ -28,7 +28,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} volumes: - ./conf:/etc/pbm @@ -41,7 +41,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: rs101 labels: @@ -51,7 +51,7 @@ services: - MONGO_USER=dba - BACKUP_USER=bcp - MONGO_PASS=test1234 - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} command: mongod --replSet rs1 --directoryperdb --port 27017 --dbpath=/data/db/ --storageEngine wiredTiger --keyFile /opt/keyFile --wiredTigerCacheSizeGB 1 volumes: - data-rs101:/data/db @@ -61,7 +61,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: rs102 labels: @@ -74,7 +74,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: rs103 labels: @@ -98,7 +98,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} volumes: - ./conf:/etc/pbm @@ -123,7 +123,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} command: pbm-agent cap_add: @@ -147,7 +147,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} command: pbm-agent cap_add: diff --git a/e2e-tests/docker/docker-compose-single.yaml b/e2e-tests/docker/docker-compose-single.yaml index 8cb96b83b..e0513d759 100644 --- a/e2e-tests/docker/docker-compose-single.yaml +++ b/e2e-tests/docker/docker-compose-single.yaml @@ -24,7 +24,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: rs101 labels: @@ -35,7 +35,7 @@ services: - BACKUP_USER=bcp - MONGO_PASS=test1234 - SINGLE_NODE=true - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} command: mongod --replSet rs1 --port 27017 --storageEngine wiredTiger --keyFile /opt/keyFile --wiredTigerCacheSizeGB 1 volumes: - data-rs101:/data/db @@ -55,7 +55,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} volumes: - ./conf:/etc/pbm diff --git a/e2e-tests/docker/docker-compose.yaml b/e2e-tests/docker/docker-compose.yaml index d215198c9..4e21b8786 100644 --- a/e2e-tests/docker/docker-compose.yaml +++ b/e2e-tests/docker/docker-compose.yaml @@ -30,7 +30,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} volumes: - ./conf:/etc/pbm @@ -43,7 +43,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: cfg01 labels: @@ -54,7 +54,7 @@ services: - MONGO_USER=dba - BACKUP_USER=bcp - MONGO_PASS=test1234 - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} command: mongod --configsvr --dbpath /data/db --replSet cfg --bind_ip_all --port 27017 --keyFile /opt/keyFile --storageEngine wiredTiger --wiredTigerCacheSizeGB 1 volumes: - ./scripts/start.sh:/opt/start.sh @@ -64,7 +64,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: cfg02 labels: @@ -77,7 +77,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: cfg03 labels: @@ -99,7 +99,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} command: pbm-agent cap_add: @@ -121,7 +121,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} command: pbm-agent cap_add: @@ -143,7 +143,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} command: pbm-agent cap_add: @@ -157,7 +157,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: rs101 labels: @@ -177,7 +177,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: rs102 labels: @@ -190,7 +190,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: rs103 labels: @@ -212,7 +212,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} volumes: - ./conf:/etc/pbm @@ -235,7 +235,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} command: pbm-agent cap_add: @@ -257,7 +257,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} command: pbm-agent cap_add: @@ -271,7 +271,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: rs201 labels: @@ -291,7 +291,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: rs202 labels: @@ -304,7 +304,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: rs203 labels: @@ -326,7 +326,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} command: pbm-agent cap_add: @@ -348,7 +348,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} command: pbm-agent cap_add: @@ -370,7 +370,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} command: pbm-agent cap_add: @@ -384,7 +384,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-6.0} + - MONGODB_VERSION=${MONGODB_VERSION:-8.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-perconalab/percona-server-mongodb} hostname: mongos labels: diff --git a/e2e-tests/docker/mongodb.dockerfile b/e2e-tests/docker/mongodb.dockerfile index 7a8a4f32b..87766d7ad 100644 --- a/e2e-tests/docker/mongodb.dockerfile +++ b/e2e-tests/docker/mongodb.dockerfile @@ -1,4 +1,4 @@ -ARG MONGODB_VERSION=6.0 +ARG MONGODB_VERSION=8.0 ARG MONGODB_IMAGE=perconalab/percona-server-mongodb FROM ${MONGODB_IMAGE}:${MONGODB_VERSION} USER root diff --git a/e2e-tests/docker/pbm.dockerfile b/e2e-tests/docker/pbm.dockerfile index c1b8055d9..fcadb9334 100644 --- a/e2e-tests/docker/pbm.dockerfile +++ b/e2e-tests/docker/pbm.dockerfile @@ -1,4 +1,4 @@ -ARG MONGODB_VERSION=6.0 +ARG MONGODB_VERSION=8.0 ARG MONGODB_IMAGE=perconalab/percona-server-mongodb FROM ${MONGODB_IMAGE}:${MONGODB_VERSION} AS mongo_image diff --git a/e2e-tests/functions b/e2e-tests/functions index f5f4cef80..efec5bdc6 100644 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -169,7 +169,7 @@ start_cluster() { mkdir "${test_dir}/docker/backups" chmod -R 777 "${test_dir}/docker/backups" fi - export MONGODB_VERSION=${mongo_version:-"6.0"} + export MONGODB_VERSION=${mongo_version:-"8.0"} export MONGODB_IMAGE=${MONGODB_IMAGE:-"perconalab/percona-server-mongodb"} docker compose -f $COMPOSE_PATH up --quiet-pull --no-color -d \ cfg01 cfg02 cfg03 rs101 rs102 rs103 rs201 rs202 rs203 mongos minio createbucket @@ -210,7 +210,7 @@ start_replset() { chmod -R 777 "${test_dir}/docker/backups" fi - export MONGODB_VERSION=${mongo_version:-"6.0"} + export MONGODB_VERSION=${mongo_version:-"8.0"} export MONGODB_IMAGE=${MONGODB_IMAGE:-"perconalab/percona-server-mongodb"} docker compose -f $compose up --quiet-pull --no-color -d \ $nodes diff --git a/e2e-tests/run-new-cluster b/e2e-tests/run-new-cluster index 92aafe8fb..cc266c5ef 100755 --- a/e2e-tests/run-new-cluster +++ b/e2e-tests/run-new-cluster @@ -6,7 +6,7 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/functions -MONGO_VERSION=${MONGODB_VERSION:-"6.0"} +MONGO_VERSION=${MONGODB_VERSION:-"8.0"} desc 'RUN RESTORE ON NEW CLUSTER TEST' diff --git a/e2e-tests/run-remapping b/e2e-tests/run-remapping index 7d7d81b2d..c48eac2a5 100755 --- a/e2e-tests/run-remapping +++ b/e2e-tests/run-remapping @@ -6,7 +6,7 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/functions -MONGO_VERSION=${MONGODB_VERSION:-"6.0"} +MONGO_VERSION=${MONGODB_VERSION:-"8.0"} desc 'RUN REMAPPING TESTS' diff --git a/e2e-tests/run-rs b/e2e-tests/run-rs index 92e03c928..6a6dbdeaf 100755 --- a/e2e-tests/run-rs +++ b/e2e-tests/run-rs @@ -6,7 +6,7 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/functions -MONGO_VERSION=${MONGODB_VERSION:-"6.0"} +MONGO_VERSION=${MONGODB_VERSION:-"8.0"} desc 'RUN REPLICA SET TESTS' diff --git a/e2e-tests/run-sharded b/e2e-tests/run-sharded index 99e006f48..7b647105c 100755 --- a/e2e-tests/run-sharded +++ b/e2e-tests/run-sharded @@ -6,7 +6,7 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/functions -MONGO_VERSION=${MONGODB_VERSION:-"6.0"} +MONGO_VERSION=${MONGODB_VERSION:-"8.0"} desc 'RUN SHARDED CLUTER TESTS' diff --git a/e2e-tests/run-single b/e2e-tests/run-single index 3ba3b2fe2..2ea1818f1 100755 --- a/e2e-tests/run-single +++ b/e2e-tests/run-single @@ -6,7 +6,7 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/functions -MONGO_VERSION=${MONGODB_VERSION:-"6.0"} +MONGO_VERSION=${MONGODB_VERSION:-"8.0"} desc 'RUN REPLICA SET TESTS' diff --git a/e2e-tests/start-cluster b/e2e-tests/start-cluster index a17040f46..f05ae5b86 100755 --- a/e2e-tests/start-cluster +++ b/e2e-tests/start-cluster @@ -6,7 +6,7 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/functions -MONGO_VERSION=${MONGODB_VERSION:-"6.0"} +MONGO_VERSION=${MONGODB_VERSION:-"8.0"} desc 'Start cluster' start_cluster $MONGO_VERSION diff --git a/e2e-tests/start-replset b/e2e-tests/start-replset index 4f915a55f..19d93fc1d 100755 --- a/e2e-tests/start-replset +++ b/e2e-tests/start-replset @@ -6,7 +6,7 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/functions -MONGO_VERSION=${MONGODB_VERSION:-"6.0"} +MONGO_VERSION=${MONGODB_VERSION:-"8.0"} desc 'Start replicaset' start_replset $MONGO_VERSION "$COMPOSE_RS_PATH" From 9eede552d6bc3c143c9935943c3326d54ac9cbea Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 26 Sep 2025 13:50:32 +0200 Subject: [PATCH 23/95] Update default PSMDB in CONTRIBUTING.md Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7dcd32e18..36a2339a8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -104,7 +104,7 @@ To save time on tests execution during development, we recommend running genera $ MONGODB_VERSION=8.0 ./run-sharded ``` -``$ MONGODB_VERSION`` stands for the Percona Server for MongoDB version Percona Backup for MongoDB is running with. Default is 5.0. +``$ MONGODB_VERSION`` stands for the Percona Server for MongoDB version Percona Backup for MongoDB is running with. Default is 8.0. After the development is complete and you are ready to submit a pull request, run all tests using the following command: From ac3ad481b5a78dc637fb0bb6cc5544ac6e9a721b Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 26 Sep 2025 19:10:33 +0200 Subject: [PATCH 24/95] Move GCS endpoint from S3 to GCS package --- pbm/storage/gcs/gcs.go | 1 + pbm/storage/gcs/hmac_client.go | 2 +- pbm/storage/s3/s3.go | 2 -- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/pbm/storage/gcs/gcs.go b/pbm/storage/gcs/gcs.go index 1fb1fa4d6..63cc98540 100644 --- a/pbm/storage/gcs/gcs.go +++ b/pbm/storage/gcs/gcs.go @@ -13,6 +13,7 @@ import ( ) const ( + GCSEndpointURL = "storage.googleapis.com" defaultMaxObjSizeGB = 5018 // 4.9 TB ) diff --git a/pbm/storage/gcs/hmac_client.go b/pbm/storage/gcs/hmac_client.go index ae0a7646f..c47f69962 100644 --- a/pbm/storage/gcs/hmac_client.go +++ b/pbm/storage/gcs/hmac_client.go @@ -36,7 +36,7 @@ func newHMACClient(opts *Config, l log.LogEvent) (*hmacClient, error) { } } - minioClient, err := minio.New("storage.googleapis.com", &minio.Options{ + minioClient, err := minio.New(GCSEndpointURL, &minio.Options{ Creds: credentials.NewStaticV2(opts.Credentials.HMACAccessKey, opts.Credentials.HMACSecret, ""), }) if err != nil { diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index dd0cbb64d..39fbc0bb4 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -33,8 +33,6 @@ import ( ) const ( - // GCSEndpointURL is the endpoint url for Google Clound Strage service - GCSEndpointURL = "storage.googleapis.com" defaultPartSize int64 = 10 * 1024 * 1024 // 10Mb defaultS3Region = "us-east-1" From f5eb4f24970ff950e228df957e496fdf7b20cc8a Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 26 Sep 2025 19:19:17 +0200 Subject: [PATCH 25/95] Add minio storage type constant --- pbm/storage/storage.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pbm/storage/storage.go b/pbm/storage/storage.go index 0915bf2c7..122ba6672 100644 --- a/pbm/storage/storage.go +++ b/pbm/storage/storage.go @@ -29,6 +29,7 @@ const ( Filesystem Type = "filesystem" Blackhole Type = "blackhole" GCS Type = "gcs" + Minio Type = "minio" ) type FileInfo struct { @@ -67,6 +68,8 @@ func ParseType(s string) Type { return Blackhole case string(GCS): return GCS + case string(Minio): + return Minio default: return Undefined } From 38c3369f2ac474d5aca59330149688eb5e4695cd Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 26 Sep 2025 22:02:06 +0200 Subject: [PATCH 26/95] Remove Provider from S3 Config It's not used anymore. --- pbm/storage/s3/s3.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index 39fbc0bb4..0bcd461c1 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -44,7 +44,6 @@ const ( //nolint:lll type Config struct { - Provider string `bson:"provider,omitempty" json:"provider,omitempty" yaml:"provider,omitempty"` Region string `bson:"region" json:"region" yaml:"region"` EndpointURL string `bson:"endpointUrl,omitempty" json:"endpointUrl" yaml:"endpointUrl,omitempty"` EndpointURLMap map[string]string `bson:"endpointUrlMap,omitempty" json:"endpointUrlMap,omitempty" yaml:"endpointUrlMap,omitempty"` From 6788bf5f3f7a85745d48cb2bf9b6fb5d9fc75b49 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Mon, 29 Sep 2025 09:46:48 +0700 Subject: [PATCH 27/95] fix: resolve review comments --- pbm/storage/oss/client.go | 8 ++++++-- pbm/storage/oss/oss.go | 19 +++++++++++-------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index dc8f1abab..3f6fc6b6a 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -12,7 +12,7 @@ import ( ) const ( - defaultS3Region = "ap-southeast-5" + defaultS3Region = "us-east-1" maxPart int32 = 10000 defaultRetryMaxAttempts = 5 @@ -20,7 +20,7 @@ const ( defaultRetryerMaxBackoff = 300 * time.Second defaultSessionDurationSeconds = 3600 defaultConnectTimeout = 5 * time.Second - defaultMaxObjSizeGB = 48800 // 48.8 TB + defaultMaxObjSizeGB = 48700 // 48.8 TB ) //nolint:lll @@ -109,6 +109,10 @@ func (cfg *Config) Clone() *Config { v := *cfg.Retryer rv.Retryer = &v } + if cfg.ServerSideEncryption != nil { + a := *cfg.ServerSideEncryption + rv.ServerSideEncryption = &a + } return &rv } diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 0101a6ec8..590dbe4f0 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -38,7 +38,7 @@ func New(cfg *Config, node string, l log.LogEvent) (storage.Storage, error) { log: l, ossCli: client, } - + return storage.NewSplitMergeMW(o, cfg.GetMaxObjSizeGB()), nil } @@ -61,12 +61,6 @@ func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error } } - if opts.Size > 0 { - o.log.Debug("uploading %s with size %d", name, opts.Size) - } else { - o.log.Debug("uploading %s", name) - } - req := &oss.PutObjectRequest{ Bucket: oss.Ptr(o.cfg.Bucket), Key: oss.Ptr(path.Join(o.cfg.Prefix, name)), @@ -88,12 +82,21 @@ func (o *OSS) Save(name string, data io.Reader, options ...storage.Option) error partSize := storage.ComputePartSize( opts.Size, - o.cfg.UploadPartSize, + oss.DefaultUploadPartSize, oss.MinPartSize, int64(o.cfg.MaxUploadParts), int64(o.cfg.UploadPartSize), ) + if o.log != nil && opts.UseLogger { + o.log.Debug("uploading %q [size hint: %v (%v); part size: %v (%v)]", + name, + opts.Size, + storage.PrettySize(opts.Size), + partSize, + storage.PrettySize(partSize)) + } + uploader := oss.NewUploader(o.ossCli, func(uo *oss.UploaderOptions) { uo.PartSize = partSize }) From 8667b86b9974cdf19633272e10002bd54c140bc1 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Mon, 29 Sep 2025 10:08:26 +0700 Subject: [PATCH 28/95] fix: call FileStat before deleting file --- pbm/storage/oss/oss.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pbm/storage/oss/oss.go b/pbm/storage/oss/oss.go index 590dbe4f0..fe1a88bf5 100644 --- a/pbm/storage/oss/oss.go +++ b/pbm/storage/oss/oss.go @@ -200,6 +200,10 @@ func (o *OSS) List(prefix, suffix string) ([]storage.FileInfo, error) { // Delete deletes given file. // It returns storage.ErrNotExist if a file doesn't exists. func (o *OSS) Delete(name string) error { + if _, err := o.FileStat(name); err == storage.ErrNotExist { + return err + } + key := path.Join(o.cfg.Prefix, name) _, err := o.ossCli.DeleteObject(context.Background(), &oss.DeleteObjectRequest{ Bucket: oss.Ptr(o.cfg.Bucket), From fb43864ab8ead69695e7f7be6b791ea07b076898 Mon Sep 17 00:00:00 2001 From: Imre Nagi Date: Mon, 29 Sep 2025 10:09:40 +0700 Subject: [PATCH 29/95] fix: change incorrect default var name --- pbm/storage/oss/client.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index 3f6fc6b6a..7b897edd5 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -12,8 +12,8 @@ import ( ) const ( - defaultS3Region = "us-east-1" - maxPart int32 = 10000 + defaultOSSRegion = "us-east-1" + maxPart int32 = 10000 defaultRetryMaxAttempts = 5 defaultRetryBaseDelay = 30 * time.Millisecond @@ -64,7 +64,7 @@ type Credentials struct { func (cfg *Config) Cast() error { if cfg.Region == "" { - cfg.Region = defaultS3Region + cfg.Region = defaultOSSRegion } if cfg.ConnectTimeout == 0 { cfg.ConnectTimeout = defaultConnectTimeout From 3c1aeae635dff5e0425f135242cd47dab4c3d630 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 29 Sep 2025 12:48:07 +0200 Subject: [PATCH 30/95] Add minio storage Intention is to use minio driver as the alternative for accessing S3 compatible storages or minio storage itself. --- pbm/storage/mio/config.go | 89 ++++++++++++ pbm/storage/mio/download.go | 156 +++++++++++++++++++++ pbm/storage/mio/minio.go | 270 ++++++++++++++++++++++++++++++++++++ 3 files changed, 515 insertions(+) create mode 100644 pbm/storage/mio/config.go create mode 100644 pbm/storage/mio/download.go create mode 100644 pbm/storage/mio/minio.go diff --git a/pbm/storage/mio/config.go b/pbm/storage/mio/config.go new file mode 100644 index 000000000..823b9d9da --- /dev/null +++ b/pbm/storage/mio/config.go @@ -0,0 +1,89 @@ +package mio + +import ( + "errors" + "time" +) + +type Config struct { + Region string `bson:"region" json:"region" yaml:"region"` + EndpointURL string `bson:"endpointUrl,omitempty" json:"endpointUrl" yaml:"endpointUrl,omitempty"` + EndpointURLMap map[string]string `bson:"endpointUrlMap,omitempty" json:"endpointUrlMap,omitempty" yaml:"endpointUrlMap,omitempty"` + Bucket string `bson:"bucket" json:"bucket" yaml:"bucket"` + Prefix string `bson:"prefix" json:"prefix" yaml:"prefix"` + Credentials Credentials `bson:"credentials" json:"-" yaml:"credentials"` + Secure bool `bson:"secure" json:"secure" yaml:"secure"` + + ChunkSize int64 `bson:"chunkSize,omitempty" json:"chunkSize,omitempty" yaml:"chunkSize,omitempty"` + MaxObjSizeGB *float64 `bson:"maxObjSizeGB,omitempty" json:"maxObjSizeGB,omitempty" yaml:"maxObjSizeGB,omitempty"` + + Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` +} + +type Credentials struct { + SigVer string `bson:"signature-ver" json:"signature-ver,omitempty" yaml:"signature-ver,omitempty"` + AccessKeyID string `bson:"access-key-id" json:"access-key-id,omitempty" yaml:"access-key-id,omitempty"` + SecretAccessKey string `bson:"secret-access-key" json:"secret-access-key,omitempty" yaml:"secret-access-key,omitempty"` + SessionToken string `bson:"session-token" json:"session-token,omitempty" yaml:"session-token,omitempty"` +} + +type Retryer struct { + // Num max Retries is the number of max retries that will be performed. + NumMaxRetries int `bson:"numMaxRetries,omitempty" json:"numMaxRetries,omitempty" yaml:"numMaxRetries,omitempty"` + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + MinRetryDelay time.Duration `bson:"minRetryDelay,omitempty" json:"minRetryDelay,omitempty" yaml:"minRetryDelay,omitempty"` + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + MaxRetryDelay time.Duration `bson:"maxRetryDelay,omitempty" json:"maxRetryDelay,omitempty" yaml:"maxRetryDelay,omitempty"` +} + + + +func (cfg *Config) Cast() error { + if cfg.EndpointURL == "" { + return errors.New("endpointURL cannot be empty") + } + + if cfg.ChunkSize == 0 { + cfg.ChunkSize = defaultPartSize + } + + if cfg.Retryer == nil { + cfg.Retryer = &Retryer{ + NumMaxRetries: defaultMaxRetries, + MinRetryDelay: defaultRetryerMinRetryDelay, + MaxRetryDelay: defaultRetryerMaxRetryDelay, + } + } else { + if cfg.Retryer.NumMaxRetries == 0 { + cfg.Retryer.NumMaxRetries = defaultMaxRetries + } + if cfg.Retryer.MinRetryDelay == 0 { + cfg.Retryer.MinRetryDelay = defaultRetryerMinRetryDelay + } + if cfg.Retryer.MaxRetryDelay == 0 { + cfg.Retryer.MaxRetryDelay = defaultRetryerMaxRetryDelay + } + } + + return nil +} + +// resolveEndpointURL returns endpoint url based on provided +// EndpointURL or associated EndpointURLMap configuration fields. +// If specified EndpointURLMap overrides EndpointURL field. +func (cfg *Config) resolveEndpointURL(node string) string { + ep := cfg.EndpointURL + if epm, ok := cfg.EndpointURLMap[node]; ok { + ep = epm + } + return ep +} + +func (cfg *Config) GetMaxObjSizeGB() float64 { + if cfg.MaxObjSizeGB != nil && *cfg.MaxObjSizeGB > 0 { + return *cfg.MaxObjSizeGB + } + return defaultMaxObjSizeGB +} diff --git a/pbm/storage/mio/download.go b/pbm/storage/mio/download.go new file mode 100644 index 000000000..51c173ec4 --- /dev/null +++ b/pbm/storage/mio/download.go @@ -0,0 +1,156 @@ +package mio + +import ( + "container/heap" + "context" + "io" + "path" + "time" + + "github.com/minio/minio-go/v7" + "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/storage" +) + +type Download struct { + arenas []*storage.Arena // mem buffer for downloads + spanSize int + cc int // download concurrency + + stat storage.DownloadStat +} + +func (m *Minio) DownloadStat() storage.DownloadStat { + m.d.stat.Arenas = []storage.ArenaStat{} + for _, a := range m.d.arenas { + m.d.stat.Arenas = append(m.d.stat.Arenas, a.Stat) + } + + return m.d.stat +} + +func (m *Minio) SourceReader(name string) (io.ReadCloser, error) { + return m.sourceReader(name, m.d.arenas, m.d.cc, m.d.spanSize) +} + +func (m *Minio) sourceReader(fname string, arenas []*storage.Arena, cc, downloadChuckSize int) (io.ReadCloser, error) { + if cc < 1 { + return nil, errors.Errorf("num of workers shuld be at least 1 (got %d)", cc) + } + if len(arenas) < cc { + return nil, errors.Errorf("num of arenas (%d) less then workers (%d)", len(arenas), cc) + } + + fstat, err := m.FileStat(fname) + if err != nil { + return nil, errors.Wrap(err, "get file stat") + } + + r, w := io.Pipe() + + go func() { + pr := m.newPartReader(fname, fstat.Size, downloadChuckSize) + + pr.Run(cc, arenas) + + exitErr := io.EOF + defer func() { + w.CloseWithError(exitErr) + pr.Reset() + }() + + cqueue := &storage.ChunksQueue{} + heap.Init(cqueue) + + for { + select { + case rs := <-pr.Resultq: + // Although chunks are requested concurrently they must be written sequentially + // to the destination as it is not necessary a file (decompress, mongorestore etc.). + // If it is not its turn (previous chunks weren't written yet) the chunk will be + // added to the buffer to wait. If the buffer grows too much the scheduling of new + // chunks will be paused for buffer to be handled. + if rs.Meta.Start != pr.Written { + heap.Push(cqueue, &rs) + continue + } + + err := pr.WriteChunk(&rs, w) + if err != nil { + exitErr = errors.Wrapf(err, "SourceReader: copy bytes %d-%d from resoponse", rs.Meta.Start, rs.Meta.End) + return + } + + // check if we can send something from the buffer + for len(*cqueue) > 0 && (*cqueue)[0].Meta.Start == pr.Written { + r := heap.Pop(cqueue).(*storage.Chunk) + err := pr.WriteChunk(r, w) + if err != nil { + exitErr = errors.Wrapf(err, "SourceReader: copy bytes %d-%d from resoponse buffer", r.Meta.Start, r.Meta.End) + return + } + } + + // we've read all bytes in the object + if pr.Written >= pr.Fsize { + return + } + + case err := <-pr.Errc: + exitErr = errors.Wrapf(err, "SourceReader: download '%s/%s'", m.cfg.Bucket, fname) + return + } + } + }() + + return r, nil +} + +func (m *Minio) newPartReader(fname string, fsize int64, chunkSize int) *storage.PartReader { + return &storage.PartReader{ + Fname: fname, + Fsize: fsize, + ChunkSize: int64(chunkSize), + Buf: make([]byte, 32*1024), + L: m.log, + GetChunk: func(fname string, arena *storage.Arena, _ any, start, end int64) (io.ReadCloser, error) { + return m.getPartialObject(fname, arena, start, end-start+1) + }, + GetSess: func() (any, error) { + return m.cl, nil // re-use the already-initialized client + }, + } +} + +func (m *Minio) getPartialObject(name string, buf *storage.Arena, start, length int64) (io.ReadCloser, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) + defer cancel() + + objectName := path.Join(m.cfg.Prefix, name) + + opts := minio.GetObjectOptions{} + err := opts.SetRange(start, start+length-1) + if err != nil { + return nil, errors.Wrap(err, "failed to set range on GetObjectOptions") + } + + object, err := m.cl.GetObject(ctx, m.cfg.Bucket, objectName, opts) + if err != nil { + respErr := minio.ToErrorResponse(err) + if respErr.Code == "NoSuchKey" || respErr.Code == "InvalidRange" { + return nil, io.EOF + } + + return nil, storage.GetObjError{Err: err} + } + defer object.Close() + + ch := buf.GetSpan() + _, err = io.CopyBuffer(ch, object, buf.CpBuf) + if err != nil { + ch.Close() + return nil, errors.Wrap(err, "copy") + } + + return ch, nil +} diff --git a/pbm/storage/mio/minio.go b/pbm/storage/mio/minio.go new file mode 100644 index 000000000..391d88508 --- /dev/null +++ b/pbm/storage/mio/minio.go @@ -0,0 +1,270 @@ +package mio + +import ( + "context" + "io" + "path" + "runtime" + "strings" + "time" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + + "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/log" + "github.com/percona/percona-backup-mongodb/pbm/storage" +) + +const ( + // https://docs.min.io/enterprise/aistor-object-store/reference/aistor-server/thresholds/#s3-api-limits + maxUploadParts = 10000 + defaultPartSize int64 = 10 * 1024 * 1024 // 10Mb + minPartSize int64 = 1024 * 1024 * 5 // 5Mb + + // minio allows 50TiB, sensibile default is aligned with S3 + defaultMaxObjSizeGB = 5018 // 4.9 TB + + defaultMaxRetries = 10 + defaultRetryerMinRetryDelay = 200 * time.Millisecond + defaultRetryerMaxRetryDelay = 1 * time.Second +) + +type Minio struct { + cfg *Config + node string + log log.LogEvent + cl *minio.Client + + d *Download +} + +func New(cfg *Config, node string, l log.LogEvent) (storage.Storage, error) { + m, err := new(cfg, node, l) + if err != nil { + return nil, err + } + + // default downloader for small files + m.d = &Download{ + arenas: []*storage.Arena{storage.NewArena( + storage.DownloadChuckSizeDefault, + storage.DownloadChuckSizeDefault)}, + spanSize: storage.DownloadChuckSizeDefault, + cc: 1, + } + + return storage.NewSplitMergeMW(m, cfg.GetMaxObjSizeGB()), nil +} + +func NewWithDownloader( + cfg *Config, node string, l log.LogEvent, + cc, bufSizeMb, spanSizeMb int, +) (storage.Storage, error) { + m, err := new(cfg, node, l) + if err != nil { + return nil, err + } + + arenaSize, spanSize, cc := storage.DownloadOpts(cc, bufSizeMb, spanSizeMb) + m.log.Debug("download max buf %d (arena %d, span %d, concurrency %d)", + arenaSize*cc, arenaSize, spanSize, cc) + + var arenas []*storage.Arena + for range cc { + arenas = append(arenas, storage.NewArena(arenaSize, spanSize)) + } + + m.d = &Download{ + arenas: arenas, + spanSize: spanSize, + cc: cc, + stat: storage.NewDownloadStat(cc, arenaSize, spanSize), + } + + return storage.NewSplitMergeMW(m, cfg.GetMaxObjSizeGB()), nil +} + +func new(cfg *Config, n string, l log.LogEvent) (*Minio, error) { + if err := cfg.Cast(); err != nil { + return nil, errors.Wrap(err, "set defaults") + } + if l == nil { + l = log.DiscardEvent + } + + var creds *credentials.Credentials + if cfg.Credentials.SigVer == "V2" { + creds = credentials.NewStaticV2( + cfg.Credentials.AccessKeyID, + cfg.Credentials.SecretAccessKey, + cfg.Credentials.SessionToken, + ) + } else { + creds = credentials.NewStaticV4( + cfg.Credentials.AccessKeyID, + cfg.Credentials.SecretAccessKey, + cfg.Credentials.SessionToken, + ) + } + + minio.DefaultRetryUnit = cfg.Retryer.MinRetryDelay + minio.DefaultRetryCap = cfg.Retryer.MaxRetryDelay + + cl, err := minio.New(cfg.resolveEndpointURL(n), &minio.Options{ + Creds: creds, + Secure: cfg.Secure, + Region: cfg.Region, + MaxRetries: cfg.Retryer.NumMaxRetries, + // Trace: *httptrace.ClientTrace, + }) + if err != nil { + return nil, errors.Wrap(err, "minio session") + } + + return &Minio{ + cfg: cfg, + node: n, + log: l, + cl: cl, + }, nil +} + +func (*Minio) Type() storage.Type { + return storage.Minio +} + +func (m *Minio) Save(name string, data io.Reader, options ...storage.Option) error { + opts := storage.GetDefaultOpts() + for _, opt := range options { + if err := opt(opts); err != nil { + return errors.Wrap(err, "processing options for save") + } + } + + partSize := storage.ComputePartSize( + opts.Size, + defaultPartSize, + minPartSize, + maxUploadParts, + m.cfg.ChunkSize, + ) + + if m.log != nil && opts.UseLogger { + m.log.Debug("uploading %q [size hint: %v (%v); part size: %v (%v)]", + name, + opts.Size, + storage.PrettySize(opts.Size), + partSize, + storage.PrettySize(partSize)) + } + + putOpts := minio.PutObjectOptions{ + PartSize: uint64(partSize), + NumThreads: uint(max(runtime.NumCPU()/2, 1)), + } + _, err := m.cl.PutObject( + context.Background(), + m.cfg.Bucket, + path.Join(m.cfg.Prefix, name), + data, + -1, + putOpts, + ) + + return errors.Wrap(err, "upload using minio") +} + +func (m *Minio) FileStat(name string) (storage.FileInfo, error) { + objectName := path.Join(m.cfg.Prefix, name) + + object, err := m.cl.StatObject( + context.Background(), + m.cfg.Bucket, + objectName, + minio.StatObjectOptions{}) + if err != nil { + respErr := minio.ToErrorResponse(err) + if respErr.Code == "NoSuchKey" || respErr.Code == "NotFound" { + return storage.FileInfo{}, storage.ErrNotExist + } + + return storage.FileInfo{}, errors.Wrap(err, "get using minio") + } + + inf := storage.FileInfo{ + Name: name, + Size: object.Size, + } + + if inf.Size == 0 { + return inf, storage.ErrEmpty + } + + return inf, nil +} + +func (m *Minio) List(prefix, suffix string) ([]storage.FileInfo, error) { + ctx := context.Background() + + var files []storage.FileInfo + + for obj := range m.cl.ListObjects(ctx, m.cfg.Bucket, minio.ListObjectsOptions{ + Prefix: prefix, + Recursive: true, + }) { + if obj.Err != nil { + return nil, errors.Wrap(obj.Err, "list using minio") + } + + name := strings.TrimPrefix(obj.Key, prefix) + if len(name) > 0 && name[0] == '/' { + name = name[1:] + } + + if suffix != "" && !strings.HasSuffix(name, suffix) { + continue + } + + files = append(files, storage.FileInfo{ + Name: name, + Size: obj.Size, + }) + } + + return files, nil +} + +func (m *Minio) Delete(name string) error { + ctx := context.Background() + objName := path.Join(m.cfg.Prefix, name) + + err := m.cl.RemoveObject(ctx, m.cfg.Bucket, objName, minio.RemoveObjectOptions{}) + if err != nil { + respErr := minio.ToErrorResponse(err) + if respErr.Code == "NoSuchKey" || respErr.Code == "NotFound" { + return storage.ErrNotExist + } + + return errors.Wrap(err, "delete using minio") + } + + return nil +} + +func (m *Minio) Copy(src, dst string) error { + ctx := context.Background() + + _, err := m.cl.CopyObject(ctx, + minio.CopyDestOptions{ + Bucket: m.cfg.Bucket, + Object: path.Join(m.cfg.Prefix, dst), + }, + minio.CopySrcOptions{ + Bucket: m.cfg.Bucket, + Object: path.Join(m.cfg.Prefix, src), + }, + ) + + return errors.Wrap(err, "copy using minio") +} From 859a8ded7b8c224a2e84dee67863faefa6aa2cec Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 29 Sep 2025 16:55:16 +0200 Subject: [PATCH 31/95] Add go-cmp dependency --- go.mod | 1 + vendor/github.com/google/go-cmp/LICENSE | 27 + .../github.com/google/go-cmp/cmp/compare.go | 671 ++++++++++++++++++ vendor/github.com/google/go-cmp/cmp/export.go | 31 + .../go-cmp/cmp/internal/diff/debug_disable.go | 18 + .../go-cmp/cmp/internal/diff/debug_enable.go | 123 ++++ .../google/go-cmp/cmp/internal/diff/diff.go | 402 +++++++++++ .../google/go-cmp/cmp/internal/flags/flags.go | 9 + .../go-cmp/cmp/internal/function/func.go | 106 +++ .../google/go-cmp/cmp/internal/value/name.go | 164 +++++ .../go-cmp/cmp/internal/value/pointer.go | 34 + .../google/go-cmp/cmp/internal/value/sort.go | 106 +++ .../github.com/google/go-cmp/cmp/options.go | 562 +++++++++++++++ vendor/github.com/google/go-cmp/cmp/path.go | 390 ++++++++++ vendor/github.com/google/go-cmp/cmp/report.go | 54 ++ .../google/go-cmp/cmp/report_compare.go | 433 +++++++++++ .../google/go-cmp/cmp/report_references.go | 264 +++++++ .../google/go-cmp/cmp/report_reflect.go | 414 +++++++++++ .../google/go-cmp/cmp/report_slices.go | 614 ++++++++++++++++ .../google/go-cmp/cmp/report_text.go | 432 +++++++++++ .../google/go-cmp/cmp/report_value.go | 121 ++++ vendor/modules.txt | 7 + 22 files changed, 4983 insertions(+) create mode 100644 vendor/github.com/google/go-cmp/LICENSE create mode 100644 vendor/github.com/google/go-cmp/cmp/compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/export.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/func.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/name.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/sort.go create mode 100644 vendor/github.com/google/go-cmp/cmp/options.go create mode 100644 vendor/github.com/google/go-cmp/cmp/path.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_compare.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_references.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_reflect.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_slices.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_text.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_value.go diff --git a/go.mod b/go.mod index cbac54cac..1ce6e94b5 100644 --- a/go.mod +++ b/go.mod @@ -19,6 +19,7 @@ require ( github.com/docker/go-connections v0.6.0 github.com/fsnotify/fsnotify v1.9.0 github.com/golang/snappy v1.0.0 + github.com/google/go-cmp v0.7.0 github.com/google/uuid v1.6.0 github.com/googleapis/gax-go/v2 v2.15.0 github.com/klauspost/compress v1.18.0 diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE new file mode 100644 index 000000000..32017f8fa --- /dev/null +++ b/vendor/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 000000000..0f5b8a48c --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,671 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// [reflect.DeepEqual] for comparing whether two values are semantically equal. +// It is intended to only be used in tests, as performance is not a goal and +// it may panic if it cannot compare the values. Its propensity towards +// panicking means that its unsuitable for production environments where a +// spurious panic may be fatal. +// +// The primary features of cmp are: +// +// - When the default behavior of equality does not suit the test's needs, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as +// they are within some tolerance of each other. +// +// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method +// to determine equality. This allows package authors to determine +// the equality operation for the types that they define. +// +// - If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on +// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual], +// unexported fields are not compared by default; they result in panics +// unless suppressed by using an [Ignore] option +// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) +// or explicitly compared using the [Exporter] option. +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// TODO(≥go1.18): Use any instead of interface{}. + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that +// remain after applying all path filters, value filters, and type filters. +// If at least one [Ignore] exists in S, then the comparison is ignored. +// If the number of [Transformer] and [Comparer] options in S is non-zero, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single [Transformer], then use that to transform +// the current values and recursively call Equal on the output values. +// If S contains a single [Comparer], then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// - If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. +// +// - Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, +// and channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an [Ignore] option +// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field +// or the [Exporter] option explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. +// +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using +// [github.com/google/go-cmp/cmp/cmpopts.SortMaps]. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty]. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. +// +// Before recursing into a pointer, slice element, or map, the current path +// is checked to detect whether the address has already been visited. +// If there is a cycle, then the pointed at values are considered equal +// only if both addresses were previously visited in the same path step. +func Equal(x, y interface{}, opts ...Option) bool { + s := newState(opts) + s.compareAny(rootStep(x, y)) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values: +// y - x. It returns an empty string if and only if Equal returns true for the +// same input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added from y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + s := newState(opts) + + // Optimization: If there are no other reporters, we can optimize for the + // common case where the result is equal (and thus no reported difference). + // This avoids the expensive construction of a difference tree. + if len(s.reporters) == 0 { + s.compareAny(rootStep(x, y)) + if s.result.Equal() { + return "" + } + s.result = diff.Result{} // Reset results + } + + r := new(defaultReporter) + s.reporters = append(s.reporters, reporter{r}) + s.compareAny(rootStep(x, y)) + d := r.String() + if (d == "") != s.result.Equal() { + panic("inconsistent difference and equality results") + } + return d +} + +// rootStep constructs the first path step. If x and y have differing types, +// then they are stored within an empty interface type. +func rootStep(x, y interface{}) PathStep { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = anyType + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + + return &pathStep{t, vx, vy} +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + curPtrs pointerPath // The current set of visited pointers + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters []exporter // List of exporters for structs with unexported fields + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.curPtrs.Init() + s.processOption(Options(opts)) + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case exporter: + s.exporters = append(s.exporters, opt) + case reporter: + s.reporters = append(s.reporters, opt) + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(step PathStep) diff.Result { + // We do not save and restore curPath and curPtrs because all of the + // compareX methods should properly push and pop from them. + // It is an implementation bug if the contents of the paths differ from + // when calling this function to when returning from it. + + oldResult, oldReporters := s.result, s.reporters + s.result = diff.Result{} // Reset result + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) + res := s.result + s.result, s.reporters = oldResult, oldReporters + return res +} + +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() + } + s.recChecker.Check(s.curPath) + + // Cycle-detection for slice elements (see NOTE in compareSlice). + t := step.Type() + vx, vy := step.Values() + if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() { + px, py := vx.Addr(), vy.Addr() + if eq, visited := s.curPtrs.Push(px, py); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(px, py) + } + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(t, vx, vy) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(t, vx, vy) { + return + } + + // Rule 3: Compare based on the underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), 0) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), 0) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), 0) + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), 0) + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), 0) + case reflect.String: + s.report(vx.String() == vy.String(), 0) + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), 0) + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) + case reflect.Ptr: + s.comparePtr(t, vx, vy) + case reflect.Interface: + s.compareInterface(t, vx, vy) + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { + // Evaluate all filters and apply the remaining options. + if opt := s.opts.filter(s, t, vx, vy); opt != nil { + opt.apply(s, vx, vy) + return true + } + return false +} + +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, reportByMethod) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + got := <-c + want := f.Call([]reflect.Value{v})[0] + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { + return want + } + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + got := <-c + want := f.Call([]reflect.Value{x, y})[0].Bool() + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var addr bool + var vax, vay reflect.Value // Addressable versions of vx and vy + + var mayForce, mayForceInit bool + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + addr = vx.CanAddr() || vy.CanAddr() + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + if !mayForceInit { + for _, xf := range s.exporters { + mayForce = mayForce || xf(t) + } + mayForceInit = true + } + step.mayForce = mayForce + step.paddr = addr + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} + +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer + // since slices represents a list of pointers, rather than a single pointer. + // The pointer checking logic must be handled on a per-element basis + // in compareAny. + // + // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting + // pointer P, a length N, and a capacity C. Supposing each slice element has + // a memory size of M, then the slice is equivalent to the list of pointers: + // [P+i*M for i in range(N)] + // + // For example, v[:0] and v[:1] are slices with the same starting pointer, + // but they are clearly different values. Using the slice pointer alone + // violates the assumption that equal pointers implies equal values. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. + var ix, iy int + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } + switch e { + case diff.UniqueX: + s.compareAny(withIndexes(ix, -1)) + ix++ + case diff.UniqueY: + s.compareAny(withIndexes(-1, iy)) + iy++ + default: + s.compareAny(withIndexes(ix, iy)) + ix++ + iy++ + } + } +} + +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // Cycle-detection for maps. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) + step.key = k + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) + } + s.compareAny(step) + } +} + +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // Cycle-detection for pointers. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal + } + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) + } +} + +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } + } + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/vendor/github.com/google/go-cmp/cmp/export.go b/vendor/github.com/google/go-cmp/cmp/export.go new file mode 100644 index 000000000..29f82fe6b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export.go @@ -0,0 +1,31 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "reflect" + "unsafe" +) + +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. If addr is false, +// then the returned value will be shallowed copied to be non-addressable. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value { + ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() + if !addr { + // A field is addressable if and only if the struct is addressable. + // If the original parent value was not addressable, shallow copy the + // value to make it non-addressable to avoid leaking an implementation + // detail of how forcibly exporting a field works. + if ve.Kind() == reflect.Interface && ve.IsNil() { + return reflect.Zero(f.Type) + } + return reflect.ValueOf(ve.Interface()).Convert(f.Type) + } + return ve +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 000000000..36062a604 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,18 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cmp_debug +// +build !cmp_debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 000000000..a3b97a1ad --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,123 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cmp_debug +// +build cmp_debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=cmp_debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// separated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 000000000..a248e5436 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,402 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +import ( + "math/rand" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NumSame to NumDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff +} + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function returns an edit-script, which is a sequence of operations +// needed to convert one list into the other. The following invariants for +// the edit-script are maintained: +// - eq == (es.Dist()==0) +// - nx == es.LenX() +// - ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (es EditScript) { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // Invariants: + // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // - fwdFrontier.X < revFrontier.X + // - fwdFrontier.Y < revFrontier.Y + // + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // - Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). + // The goal of the search is connect with the search + // from the opposite corner. + // - As we search, we build a path in a greedy manner, + // where the first match seen is added to the path (this is sub-optimal, + // but provides a decent result in practice). When matches are found, + // we try the next pair of symbols in the lists and follow all matches + // as far as possible. + // - When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, + // we advance the frontier towards the opposite corner. + // - This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Non-deterministically start with either the forward or reverse direction + // to introduce some deliberate instability so that we have the flexibility + // to change this algorithm in the future. + if flags.Deterministic || randBool { + goto forwardSearch + } else { + goto reverseSearch + } + +forwardSearch: + { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + goto finishSearch + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + goto reverseSearch + } + +reverseSearch: + { + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + goto finishSearch + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + goto forwardSearch + } + +finishSearch: + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 000000000..d8e459c9b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 000000000..def01a6be --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,106 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package function provides functionality for identifying function types. +package function + +import ( + "reflect" + "regexp" + "runtime" + "strings" +) + +type funcType int + +const ( + _ funcType = iota + + tbFunc // func(T) bool + ttbFunc // func(T, T) bool + ttiFunc // func(T, T) int + trbFunc // func(T, R) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + Compare = ttiFunc // func(T, T) int + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool +) + +var boolType = reflect.TypeOf(true) +var intType = reflect.TypeOf(0) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case ttiFunc: // func(T, T) int + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType { + return true + } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go new file mode 100644 index 000000000..7b498bb2c --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -0,0 +1,164 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package value + +import ( + "reflect" + "strconv" +) + +var anyType = reflect.TypeOf((*interface{})(nil)).Elem() + +// TypeString is nearly identical to reflect.Type.String, +// but has an additional option to specify that full type names be used. +func TypeString(t reflect.Type, qualified bool) string { + return string(appendTypeName(nil, t, qualified, false)) +} + +func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte { + // BUG: Go reflection provides no way to disambiguate two named types + // of the same name and within the same package, + // but declared within the namespace of different functions. + + // Use the "any" alias instead of "interface{}" for better readability. + if t == anyType { + return append(b, "any"...) + } + + // Named type. + if t.Name() != "" { + if qualified && t.PkgPath() != "" { + b = append(b, '"') + b = append(b, t.PkgPath()...) + b = append(b, '"') + b = append(b, '.') + b = append(b, t.Name()...) + } else { + b = append(b, t.String()...) + } + return b + } + + // Unnamed type. + switch k := t.Kind(); k { + case reflect.Bool, reflect.String, reflect.UnsafePointer, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + b = append(b, k.String()...) + case reflect.Chan: + if t.ChanDir() == reflect.RecvDir { + b = append(b, "<-"...) + } + b = append(b, "chan"...) + if t.ChanDir() == reflect.SendDir { + b = append(b, "<-"...) + } + b = append(b, ' ') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Func: + if !elideFunc { + b = append(b, "func"...) + } + b = append(b, '(') + for i := 0; i < t.NumIn(); i++ { + if i > 0 { + b = append(b, ", "...) + } + if i == t.NumIn()-1 && t.IsVariadic() { + b = append(b, "..."...) + b = appendTypeName(b, t.In(i).Elem(), qualified, false) + } else { + b = appendTypeName(b, t.In(i), qualified, false) + } + } + b = append(b, ')') + switch t.NumOut() { + case 0: + // Do nothing + case 1: + b = append(b, ' ') + b = appendTypeName(b, t.Out(0), qualified, false) + default: + b = append(b, " ("...) + for i := 0; i < t.NumOut(); i++ { + if i > 0 { + b = append(b, ", "...) + } + b = appendTypeName(b, t.Out(i), qualified, false) + } + b = append(b, ')') + } + case reflect.Struct: + b = append(b, "struct{ "...) + for i := 0; i < t.NumField(); i++ { + if i > 0 { + b = append(b, "; "...) + } + sf := t.Field(i) + if !sf.Anonymous { + if qualified && sf.PkgPath != "" { + b = append(b, '"') + b = append(b, sf.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, sf.Name...) + b = append(b, ' ') + } + b = appendTypeName(b, sf.Type, qualified, false) + if sf.Tag != "" { + b = append(b, ' ') + b = strconv.AppendQuote(b, string(sf.Tag)) + } + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + case reflect.Slice, reflect.Array: + b = append(b, '[') + if k == reflect.Array { + b = strconv.AppendUint(b, uint64(t.Len()), 10) + } + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Map: + b = append(b, "map["...) + b = appendTypeName(b, t.Key(), qualified, false) + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Ptr: + b = append(b, '*') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Interface: + b = append(b, "interface{ "...) + for i := 0; i < t.NumMethod(); i++ { + if i > 0 { + b = append(b, "; "...) + } + m := t.Method(i) + if qualified && m.PkgPath != "" { + b = append(b, '"') + b = append(b, m.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, m.Name...) + b = appendTypeName(b, m.Type, qualified, true) + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + default: + panic("invalid kind: " + k.String()) + } + return b +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go new file mode 100644 index 000000000..e5dfff69a --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go @@ -0,0 +1,34 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == nil +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return uintptr(p.p) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 000000000..98533b036 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,106 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if isLess(vs2[len(vs2)-1], v) { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + // NOTE: This does not sort -0 as less than +0 + // since Go maps treat -0 and +0 as equal keys. + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 000000000..ba3fce81f --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,562 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of [Equal] and [Diff]. In particular, +// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]), +// configure how equality is determined. +// +// The fundamental options may be composed with filters ([FilterPath] and +// [FilterValues]) to control the scope over which they are applied. +// +// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions +// for creating options that may be used with [Equal] and [Diff]. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption +} + +// applicableOption represents the following types: +// +// Fundamental: ignore | validator | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option, which may mutate s or panic. + apply(s *state, vx, vy reflect.Value) +} + +// coreOption represents the following types: +// +// Fundamental: ignore | validator | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of [Option] values that also satisfies the [Option] interface. +// Helper comparison packages may return an Options value when packing multiple +// [Option] values into a single [Option]. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case validator: + out = validator{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case validator: + // Keep validator + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new [Option] where opt is only evaluated if filter f +// returns true for the current [Path] in the value tree. +// +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// +// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or +// a previously filtered [Option]. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) +} + +// FilterValues returns a new [Option] where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or +// a previously filtered [Option]. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) +} + +// Ignore is an [Option] that causes all comparisons to be ignored. +// This value is intended to be combined with [FilterPath] or [FilterValues]. +// It is an error to pass an unfiltered Ignore option to [Equal]. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } +func (ignore) String() string { return "Ignore()" } + +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" + var name string + if t := s.curPath.Index(-2).Type(); t.Name() != "" { + // Named type with unexported fields. + name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + isProtoMessage := func(t reflect.Type) bool { + m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect") + return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 && + m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" && + m.Type.Out(0).Name() == "Message" + } + if isProtoMessage(t) { + help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types` + } else if _, ok := reflect.New(t).Interface().(error); ok { + help = "consider using cmpopts.EquateErrors to compare error values" + } else if t.Comparable() { + help = "consider using cmpopts.EquateComparable to compare comparable Go types" + } + } else { + // Unnamed type with unexported fields. Derive PkgPath from field. + var pkgPath string + for i := 0; i < t.NumField() && pkgPath == ""; i++ { + pkgPath = t.Field(i).PkgPath + } + name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int }) + } + panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help)) + } + + panic("not reachable") +} + +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +// Transformer returns an [Option] that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// +// To help prevent some cases of infinite recursive cycles applying the +// same transform to the output of itself (e.g., in the case where the +// input and output types are the same), an implicit filter is added such that +// a transformer is applicable only if that exact transformer is not already +// in the tail of the [Path] since the last non-[Transform] step. +// For situations where the implicit filter is still insufficient, +// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer], +// which adds a filter to prevent the transformer from +// being recursively applied upon itself. +// +// The name is a user provided label that is used as the [Transform.Name] in the +// transformation [PathStep] (and eventually shown in the [Diff] output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { + for i := len(s.curPath) - 1; i >= 0; i-- { + if t, ok := s.curPath[i].(Transform); !ok { + break // Hit most recent non-Transform step + } else if tr == t.trans { + return nil // Cannot directly use same Transform + } + } + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) { + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) +} + +// Comparer returns an [Option] that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// - Symmetric: equal(x, y) == equal(y, x) +// - Deterministic: equal(x, y) == equal(x, y) +// - Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, reportByFunc) +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) +} + +// Exporter returns an [Option] that specifies whether [Equal] is allowed to +// introspect into the unexported fields of certain struct types. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of [Equal] +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// In many cases, a custom [Comparer] should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the [reflect.Type] documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *[regexp.Regexp] types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using [Comparer] options: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported] +// option can be used to ignore all unexported fields on specified struct types. +func Exporter(f func(reflect.Type) bool) Option { + return exporter(f) +} + +type exporter func(reflect.Type) bool + +func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect +// unexported fields of the specified struct types. +// +// See [Exporter] for the proper use of this option. +func AllowUnexported(types ...interface{}) Option { + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return exporter(func(t reflect.Type) bool { return m[t] }) +} + +// Result represents the comparison result for a single node and +// is provided by cmp when calling Report (see [Reporter]). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if [Result.Equal] reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a [Comparer] function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +// ByCycle reports whether a reference cycle was detected. +func (r Result) ByCycle() bool { + return r.flags&reportByCycle != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc + reportByCycle +) + +// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. + // + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} + +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} + +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 000000000..c3c145642 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,390 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// Path is a list of [PathStep] describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less [PathStep] that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep + +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface: +// - [StructField] +// - [SliceIndex] +// - [MapIndex] +// - [Indirect] +// - [TypeAssertion] +// - [Transform] +type PathStep interface { + String() string + + // Type is the resulting type after performing the path step. + Type() reflect.Type + + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // - For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // - For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // - For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last [PathStep] in the Path. +// If the path is empty, this returns a non-nil [PathStep] +// that reports a nil [PathStep.Type]. +func (pa Path) Last() PathStep { + return pa.Index(-1) +} + +// Index returns the ith step in the Path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// If index is invalid, this returns a non-nil [PathStep] +// that reports a nil [PathStep.Type]. +func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i + } + if i < 0 || i >= len(pa) { + return pathStep{} + } + return pa[i] +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(StructField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case Indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case Indirect: + continue // Next step is indirection, so let them batch up + case StructField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case Transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := value.TypeString(ps.typ, false) + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +// StructField is a [PathStep] that represents a struct field access +// on a field called [StructField.Name]. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + paddr bool // Was parent addressable? + pvx, pvy reflect.Value // Parent values (always addressable) + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr) + vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See [reflect.Type.Field]. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is a [PathStep] that represents an index operation on +// a slice or array at some index [SliceIndex.Key]. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int + isSlice bool // False for reflect.Array +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} + +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} + +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes +// returned by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is a [PathStep] that represents an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect is a [PathStep] that represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion is a [PathStep] that represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } + +// Transform is a [PathStep] that represents a transformation +// from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the [Transformer]. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed [Transformer] option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } + +// pointerPath represents a dual-stack of pointers encountered when +// recursively traversing the x and y values. This data structure supports +// detection of cycles and determining whether the cycles are equal. +// In Go, cycles can occur via pointers, slices, and maps. +// +// The pointerPath uses a map to represent a stack; where descension into a +// pointer pushes the address onto the stack, and ascension from a pointer +// pops the address from the stack. Thus, when traversing into a pointer from +// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles +// by checking whether the pointer has already been visited. The cycle detection +// uses a separate stack for the x and y values. +// +// If a cycle is detected we need to determine whether the two pointers +// should be considered equal. The definition of equality chosen by Equal +// requires two graphs to have the same structure. To determine this, both the +// x and y values must have a cycle where the previous pointers were also +// encountered together as a pair. +// +// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and +// MapIndex with pointer information for the x and y values. +// Suppose px and py are two pointers to compare, we then search the +// Path for whether px was ever encountered in the Path history of x, and +// similarly so with py. If either side has a cycle, the comparison is only +// equal if both px and py have a cycle resulting from the same PathStep. +// +// Using a map as a stack is more performant as we can perform cycle detection +// in O(1) instead of O(N) where N is len(Path). +type pointerPath struct { + // mx is keyed by x pointers, where the value is the associated y pointer. + mx map[value.Pointer]value.Pointer + // my is keyed by y pointers, where the value is the associated x pointer. + my map[value.Pointer]value.Pointer +} + +func (p *pointerPath) Init() { + p.mx = make(map[value.Pointer]value.Pointer) + p.my = make(map[value.Pointer]value.Pointer) +} + +// Push indicates intent to descend into pointers vx and vy where +// visited reports whether either has been seen before. If visited before, +// equal reports whether both pointers were encountered together. +// Pop must be called if and only if the pointers were never visited. +// +// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map +// and be non-nil. +func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) { + px := value.PointerOf(vx) + py := value.PointerOf(vy) + _, ok1 := p.mx[px] + _, ok2 := p.my[py] + if ok1 || ok2 { + equal = p.mx[px] == py && p.my[py] == px // Pointers paired together + return equal, true + } + p.mx[px] = py + p.my[py] = px + return false, false +} + +// Pop ascends from pointers vx and vy. +func (p pointerPath) Pop(vx, vy reflect.Value) { + delete(p.mx, value.PointerOf(vx)) + delete(p.my, value.PointerOf(vy)) +} + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 000000000..f43cd12eb --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,54 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + ptrs := new(pointerReferences) + text := formatOptions{}.FormatDiff(r.root, ptrs) + resolveReferences(text) + return text.String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 000000000..2050bf6b4 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,433 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" +) + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} +func (opts formatOptions) WithVerbosity(level int) formatOptions { + opts.VerbosityLevel = level + opts.LimitVerbosity = true + return opts +} +func (opts formatOptions) verbosity() uint { + switch { + case opts.VerbosityLevel < 0: + return 0 + case opts.VerbosityLevel > 16: + return 16 // some reasonable maximum to avoid shift overflow + default: + return uint(opts.VerbosityLevel) + } +} + +const maxVerbosityPreset = 6 + +// verbosityPreset modifies the verbosity settings given an index +// between 0 and maxVerbosityPreset, inclusive. +func verbosityPreset(opts formatOptions, i int) formatOptions { + opts.VerbosityLevel = int(opts.verbosity()) + 2*i + if i > 0 { + opts.AvoidStringer = true + } + if i >= maxVerbosityPreset { + opts.PrintAddresses = true + opts.QualifiedNames = true + } + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { + if opts.DiffMode == diffIdentical { + opts = opts.WithVerbosity(1) + } else if opts.verbosity() < 3 { + opts = opts.WithVerbosity(3) + } + + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + var parentKind reflect.Kind + if v.parent != nil && v.parent.TransformerName == "" { + parentKind = v.parent.Type.Kind() + } + + // For leaf nodes, format the value based on the reflect.Values alone. + // As a special case, treat equal []byte as a leaf nodes. + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType + isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 + if v.MaxDepth == 0 || isEqualBytes { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.FormatValue(v.ValueY, parentKind, ptrs) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i).WithTypeMode(elideType) + outx = opts2.FormatValue(v.ValueX, parentKind, ptrs) + outy = opts2.FormatValue(v.ValueY, parentKind, ptrs) + } + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, parentKind, ptrs) + case diffInserted: + return opts.FormatValue(v.ValueY, parentKind, ptrs) + default: + panic("invalid diff mode") + } + } + + // Register slice element to support cycle detection. + if parentKind == reflect.Slice { + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true) + defer ptrs.Pop() + defer func() { out = wrapTrunkReferences(ptrRefs, out) }() + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice: + out = opts.formatDiffList(v.Records, k, ptrs) + out = opts.FormatType(v.Type, out) + case reflect.Map: + // Register map to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.formatDiffList(v.Records, k, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = opts.FormatType(v.Type, out) + case reflect.Ptr: + // Register pointer to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.FormatDiff(v.Value, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = &textWrap{Prefix: "&", Value: out} + case reflect.Interface: + out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + return out + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) } + } + + maxLen := -1 + if opts.LimitVerbosity { + if opts.DiffMode == diffIdentical { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + } else { + maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc... + } + opts.VerbosityLevel-- + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + if len(list) == maxLen { + deferredEllipsis = true + break + } + + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero() + case diffRemoved: + isZero = r.Value.ValueX.IsZero() + case diffInserted: + isZero = r.Value.ValueY.IsZero() + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value, ptrs); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var numDiffs int + var list textList + var keys []reflect.Value // invariant: len(list) == len(keys) + groups := coalesceAdjacentRecords(name, recs) + maxGroup := diffStats{Name: name} + for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i) + outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + } + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + keys = append(keys, r.Key) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + keys = append(keys, r.Key) + } + default: + out := opts.FormatDiff(r.Value, ptrs) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) + } + } + recs = recs[ds.NumDiff():] + numDiffs += ds.NumDiff() + } + if maxGroup.IsZero() { + assert(len(recs) == 0) + } else { + list.AppendEllipsis(maxGroup) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } + } + assert(len(list) == len(keys)) + + // For maps, the default formatting logic uses fmt.Stringer which may + // produce ambiguous output. Avoid calling String to disambiguate. + if k == reflect.Map { + var ambiguous bool + seenKeys := map[string]reflect.Value{} + for i, currKey := range keys { + if currKey.IsValid() { + strKey := list[i].Key + prevKey, seen := seenKeys[strKey] + if seen && prevKey.CanInterface() && currKey.CanInterface() { + ambiguous = prevKey.Interface() != currKey.Interface() + if ambiguous { + break + } + } + seenKeys[strKey] = currKey + } + } + if ambiguous { + for i, k := range keys { + if k.IsValid() { + list[i].Key = formatMapKey(k, true, ptrs) + } + } + } + } + + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go new file mode 100644 index 000000000..be31b33a9 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_references.go @@ -0,0 +1,264 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +const ( + pointerDelimPrefix = "⟪" + pointerDelimSuffix = "⟫" +) + +// formatPointer prints the address of the pointer. +func formatPointer(p value.Pointer, withDelims bool) string { + v := p.Uintptr() + if flags.Deterministic { + v = 0xdeadf00f // Only used for stable testing purposes + } + if withDelims { + return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix + } + return formatHex(uint64(v)) +} + +// pointerReferences is a stack of pointers visited so far. +type pointerReferences [][2]value.Pointer + +func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) { + if deref && vx.IsValid() { + vx = vx.Addr() + } + if deref && vy.IsValid() { + vy = vy.Addr() + } + switch d { + case diffUnknown, diffIdentical: + pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)} + case diffRemoved: + pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}} + case diffInserted: + pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)} + } + *ps = append(*ps, pp) + return pp +} + +func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) { + p = value.PointerOf(v) + for _, pp := range *ps { + if p == pp[0] || p == pp[1] { + return p, true + } + } + *ps = append(*ps, [2]value.Pointer{p, p}) + return p, false +} + +func (ps *pointerReferences) Pop() { + *ps = (*ps)[:len(*ps)-1] +} + +// trunkReferences is metadata for a textNode indicating that the sub-tree +// represents the value for either pointer in a pair of references. +type trunkReferences struct{ pp [2]value.Pointer } + +// trunkReference is metadata for a textNode indicating that the sub-tree +// represents the value for the given pointer reference. +type trunkReference struct{ p value.Pointer } + +// leafReference is metadata for a textNode indicating that the value is +// truncated as it refers to another part of the tree (i.e., a trunk). +type leafReference struct{ p value.Pointer } + +func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode { + switch { + case pp[0].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[1]}} + case pp[1].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + case pp[0] == pp[1]: + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + default: + return &textWrap{Value: s, Metadata: trunkReferences{pp}} + } +} +func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode { + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}} +} +func makeLeafReference(p value.Pointer, printAddress bool) textNode { + out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"} + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}} +} + +// resolveReferences walks the textNode tree searching for any leaf reference +// metadata and resolves each against the corresponding trunk references. +// Since pointer addresses in memory are not particularly readable to the user, +// it replaces each pointer value with an arbitrary and unique reference ID. +func resolveReferences(s textNode) { + var walkNodes func(textNode, func(textNode)) + walkNodes = func(s textNode, f func(textNode)) { + f(s) + switch s := s.(type) { + case *textWrap: + walkNodes(s.Value, f) + case textList: + for _, r := range s { + walkNodes(r.Value, f) + } + } + } + + // Collect all trunks and leaves with reference metadata. + var trunks, leaves []*textWrap + walkNodes(s, func(s textNode) { + if s, ok := s.(*textWrap); ok { + switch s.Metadata.(type) { + case leafReference: + leaves = append(leaves, s) + case trunkReference, trunkReferences: + trunks = append(trunks, s) + } + } + }) + + // No leaf references to resolve. + if len(leaves) == 0 { + return + } + + // Collect the set of all leaf references to resolve. + leafPtrs := make(map[value.Pointer]bool) + for _, leaf := range leaves { + leafPtrs[leaf.Metadata.(leafReference).p] = true + } + + // Collect the set of trunk pointers that are always paired together. + // This allows us to assign a single ID to both pointers for brevity. + // If a pointer in a pair ever occurs by itself or as a different pair, + // then the pair is broken. + pairedTrunkPtrs := make(map[value.Pointer]value.Pointer) + unpair := func(p value.Pointer) { + if !pairedTrunkPtrs[p].IsNil() { + pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half + } + pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + unpair(p.p) // standalone pointer cannot be part of a pair + case trunkReferences: + p0, ok0 := pairedTrunkPtrs[p.pp[0]] + p1, ok1 := pairedTrunkPtrs[p.pp[1]] + switch { + case !ok0 && !ok1: + // Register the newly seen pair. + pairedTrunkPtrs[p.pp[0]] = p.pp[1] + pairedTrunkPtrs[p.pp[1]] = p.pp[0] + case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]: + // Exact pair already seen; do nothing. + default: + // Pair conflicts with some other pair; break all pairs. + unpair(p.pp[0]) + unpair(p.pp[1]) + } + } + } + + // Correlate each pointer referenced by leaves to a unique identifier, + // and print the IDs for each trunk that matches those pointers. + var nextID uint + ptrIDs := make(map[value.Pointer]uint) + newID := func() uint { + id := nextID + nextID++ + return id + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + if print := leafPtrs[p.p]; print { + id, ok := ptrIDs[p.p] + if !ok { + id = newID() + ptrIDs[p.p] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } + case trunkReferences: + print0 := leafPtrs[p.pp[0]] + print1 := leafPtrs[p.pp[1]] + if print0 || print1 { + id0, ok0 := ptrIDs[p.pp[0]] + id1, ok1 := ptrIDs[p.pp[1]] + isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0] + if isPair { + var id uint + assert(ok0 == ok1) // must be seen together or not at all + if ok0 { + assert(id0 == id1) // must have the same ID + id = id0 + } else { + id = newID() + ptrIDs[p.pp[0]] = id + ptrIDs[p.pp[1]] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } else { + if print0 && !ok0 { + id0 = newID() + ptrIDs[p.pp[0]] = id0 + } + if print1 && !ok1 { + id1 = newID() + ptrIDs[p.pp[1]] = id1 + } + switch { + case print0 && print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1)) + case print0: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)) + case print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1)) + } + } + } + } + } + + // Update all leaf references with the unique identifier. + for _, leaf := range leaves { + if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok { + leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id)) + } + } +} + +func formatReference(id uint) string { + return fmt.Sprintf("ref#%d", id) +} + +func updateReferencePrefix(prefix, ref string) string { + if prefix == "" { + return pointerDelimPrefix + ref + pointerDelimSuffix + } + suffix := strings.TrimPrefix(prefix, pointerDelimPrefix) + return pointerDelimPrefix + ref + ": " + suffix +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 000000000..e39f42284 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,414 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" +) + +var ( + anyType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + byteType = reflect.TypeOf((*byte)(nil)).Elem() +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool + + // QualifiedNames controls whether FormatType uses the fully qualified name + // (including the full package path as opposed to just the package name). + QualifiedNames bool + + // VerbosityLevel controls the amount of output to produce. + // A higher value produces more output. A value of zero or lower produces + // no output (represented using an ellipsis). + // If LimitVerbosity is false, then the level is treated as infinite. + VerbosityLevel int + + // LimitVerbosity specifies that formatting should respect VerbosityLevel. + LimitVerbosity bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + if opts.DiffMode == diffIdentical { + return s // elide type for identical nodes + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := value.TypeString(t, opts.QualifiedNames) + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + } + return &textWrap{Prefix: typeName, Value: wrapParens(s)} +} + +// wrapParens wraps s with a set of parenthesis, but avoids it if the +// wrapped node itself is already surrounded by a pair of parenthesis or braces. +// It handles unwrapping one level of pointer-reference nodes. +func wrapParens(s textNode) textNode { + var refNode *textWrap + if s2, ok := s.(*textWrap); ok { + // Unwrap a single pointer reference node. + switch s2.Metadata.(type) { + case leafReference, trunkReference, trunkReferences: + refNode = s2 + if s3, ok := refNode.Value.(*textWrap); ok { + s2 = s3 + } + } + + // Already has delimiters that make parenthesis unnecessary. + hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")") + hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}") + if hasParens || hasBraces { + return s + } + } + if refNode != nil { + refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"} + return s + } + return &textWrap{Prefix: "(", Value: s, Suffix: ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in ptrs. As pointers are visited, ptrs is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check slice element for cycles. + if parentKind == reflect.Slice { + ptrRef, visited := ptrs.Push(v.Addr()) + if visited { + return makeLeafReference(ptrRef, false) + } + defer ptrs.Pop() + defer func() { out = wrapTrunkReference(ptrRef, false, out) }() + } + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + var prefix, strVal string + func() { + // Swallow and ignore any panics from String or Error. + defer func() { recover() }() + switch v := v.Interface().(type) { + case error: + strVal = v.Error() + prefix = "e" + case fmt.Stringer: + strVal = v.String() + prefix = "s" + } + }() + if prefix != "" { + return opts.formatString(prefix, strVal) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uint8: + if parentKind == reflect.Slice || parentKind == reflect.Array { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uintptr: + return textLine(formatHex(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return opts.formatString("", v.String()) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(value.PointerOf(v), true)) + case reflect.Struct: + var list textList + v := makeAddressable(v) // needed for retrieveUnexportedField + maxLen := v.NumField() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if vv.IsZero() { + continue // Elide fields with zero values + } + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sf := t.Field(i) + if !isExported(sf.Name) { + vv = retrieveUnexportedField(v, sf, true) + } + s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) + list = append(list, textRecord{Key: sf.Name, Value: s}) + } + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + + // Check whether this is a []byte of text data. + if t.Elem() == byteType { + b := v.Bytes() + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } + if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { + out = opts.formatString("", string(b)) + skipType = true + return opts.FormatType(t, out) + } + } + + fallthrough + case reflect.Array: + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + var list textList + for i := 0; i < v.Len(); i++ { + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs) + list = append(list, textRecord{Value: s}) + } + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if t.Kind() == reflect.Slice && opts.PrintAddresses { + header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap()) + out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out} + } + return out + case reflect.Map: + if v.IsNil() { + return textNil + } + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + return makeLeafReference(ptrRef, opts.PrintAddresses) + } + defer ptrs.Pop() + + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sk := formatMapKey(k, false, ptrs) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs) + list = append(list, textRecord{Key: sk, Value: sv}) + } + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + return out + case reflect.Ptr: + if v.IsNil() { + return textNil + } + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + out = makeLeafReference(ptrRef, opts.PrintAddresses) + return &textWrap{Prefix: "&", Value: out} + } + defer ptrs.Pop() + + // Skip the name only if this is an unnamed pointer type. + // Otherwise taking the address of a value does not reproduce + // the named pointer type. + if v.Type().Name() == "" { + skipType = true // Let the underlying value print the type instead + } + out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + out = &textWrap{Prefix: "&", Value: out} + return out + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +func (opts formatOptions) formatString(prefix, s string) textNode { + maxLen := len(s) + maxLines := strings.Count(s, "\n") + 1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... + maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + } + + // For multiline strings, use the triple-quote syntax, + // but only use it when printing removed or inserted nodes since + // we only want the extra verbosity for those cases. + lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n") + isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+') + for i := 0; i < len(lines) && isTripleQuoted; i++ { + lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + line := lines[i] + isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen + } + if isTripleQuoted { + var list textList + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + for i, line := range lines { + if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 { + comment := commentString(fmt.Sprintf("%d elided lines", numElided)) + list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment}) + break + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true}) + } + list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) + return &textWrap{Prefix: "(", Value: list, Suffix: ")"} + } + + // Format the string as a single-line quoted string. + if len(s) > maxLen+len(textEllipsis) { + return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis)) + } + return textLine(prefix + formatString(s)) +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string { + var opts formatOptions + opts.DiffMode = diffIdentical + opts.TypeMode = elideType + opts.PrintAddresses = disambiguate + opts.AvoidStringer = disambiguate + opts.QualifiedNames = disambiguate + opts.VerbosityLevel = maxVerbosityPreset + opts.LimitVerbosity = true + s := opts.FormatValue(v, reflect.Map, ptrs).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 000000000..23e444f62 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,614 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "bytes" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + case v.NumIgnored > 0: + return false // Some ignore option was used + case v.NumTransformed > 0: + return false // Some transform option was used + case v.NumCompared > 1: + return false // More than one comparison was used + case v.NumCompared == 1 && v.Type.Name() != "": + // The need for cmp to check applicability of options on every element + // in a slice is a significant performance detriment for large []byte. + // The workaround is to specify Comparer(bytes.Equal), + // which enables cmp to compare []byte more efficiently. + // If they differ, we still want to provide batched diffing. + // The logic disallows named types since they tend to have their own + // String method, with nicer formatting than what this provides. + return false + } + + // Check whether this is an interface with the same concrete types. + t := v.Type + vx, vy := v.ValueX, v.ValueY + if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + } + + // Check whether we provide specialized diffing for this type. + switch t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // Both slice values have to be non-empty. + if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) { + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 32 + return vx.Len() >= minLength && vy.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + if t.Kind() == reflect.Interface { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + opts = opts.WithTypeMode(emitType) + } + + // Auto-detect the type of the data. + var sx, sy string + var ssx, ssy []string + var isString, isMostlyText, isPureLinedText, isBinary bool + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isString = true + case t.Kind() == reflect.Slice && t.Elem() == byteType: + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isString = true + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isString { + var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int + for i, r := range sx + sy { + numTotalRunes++ + if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError { + numValidRunes++ + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + maxLineLen = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isPureText := numValidRunes == numTotalRunes + isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes)) + isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024 + isBinary = !isMostlyText + + // Avoid diffing by lines if it produces a significantly more complex + // edit script than diffing by bytes. + if isPureLinedText { + ssx = strings.Split(sx, "\n") + ssy = strings.Split(sy, "\n") + esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result { + return diff.BoolResult(ssx[ix] == ssy[iy]) + }) + esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result { + return diff.BoolResult(sx[ix] == sy[iy]) + }) + efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) + efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) + quotedLength := len(strconv.Quote(sx + sy)) + unquotedLength := len(sx) + len(sy) + escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength) + isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1 + } + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isPureLinedText: + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + + // If possible, use a custom triple-quote (""") syntax for printing + // differences in a string literal. This format is more readable, + // but has edge-cases where differences are visually indistinguishable. + // This format is avoided under the following conditions: + // - A line starts with `"""` + // - A line starts with "..." + // - A line contains non-printable characters + // - Adjacent different lines differ only by whitespace + // + // For example: + // + // """ + // ... // 3 identical lines + // foo + // bar + // - baz + // + BAZ + // """ + isTripleQuoted := true + prevRemoveLines := map[string]bool{} + prevInsertLines := map[string]bool{} + var list2 textList + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + for _, r := range list { + if !r.Value.Equal(textEllipsis) { + line, _ := strconv.Unquote(string(r.Value.(textLine))) + line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + normLine := strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return -1 // drop whitespace to avoid visually indistinguishable output + } + return r + }, line) + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" + switch r.Diff { + case diffRemoved: + isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine] + prevRemoveLines[normLine] = true + case diffInserted: + isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine] + prevInsertLines[normLine] = true + } + if !isTripleQuoted { + break + } + r.Value = textLine(line) + r.ElideComma = true + } + if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group + prevRemoveLines = map[string]bool{} + prevInsertLines = map[string]bool{} + } + list2 = append(list2, r) + } + if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 { + list2 = list2[:len(list2)-1] // elide single empty line at the end + } + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + if isTripleQuoted { + var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} + switch t.Kind() { + case reflect.String: + if t != stringType { + out = opts.FormatType(t, out) + } + case reflect.Slice: + // Always emit type for slices since the triple-quote syntax + // looks like a string (not a slice). + opts = opts.WithTypeMode(emitType) + out = opts.FormatType(t, out) + } + return out + } + + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isMostlyText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ss = append(ss, fmt.Sprint(v.Index(i).Uint())) + case reflect.Uint8, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if !isMostlyText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} + if t != stringType { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} + if t != bytesType { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + eq := func(ix, iy int) bool { + return vx.Index(ix).Interface() == vy.Index(iy).Interface() + } + es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { + return diff.BoolResult(eq(ix, iy)) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + var numDiffs int + maxLen := -1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + opts.VerbosityLevel-- + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + groups = cleanupSurroundingIdentical(groups, eq) + maxGroup := diffStats{Name: name} + for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + len0 := len(list) + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + numDiffs += len(list) - len0 + } + if maxGroup.IsZero() { + assert(vx.Len() == 0 && vy.Len() == 0) + } else { + list.AppendEllipsis(maxGroup) + } + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +// +// Example: +// +// Input: "..XXY...Y" +// Output: [ +// {NumIdentical: 2}, +// {NumRemoved: 2, NumInserted 1}, +// {NumIdentical: 3}, +// {NumInserted: 1}, +// ] +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevMode byte + lastStats := func(mode byte) *diffStats { + if prevMode != mode { + groups = append(groups, diffStats{Name: name}) + prevMode = mode + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats('=').NumIdentical++ + case diff.UniqueX: + lastStats('!').NumRemoved++ + case diff.UniqueY: + lastStats('!').NumInserted++ + case diff.Modified: + lastStats('!').NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +// +// Example: +// +// WindowSize: 16, +// Input: [ +// {NumIdentical: 61}, // group 0 +// {NumRemoved: 3, NumInserted: 1}, // group 1 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 9}, // └── coalesce +// {NumIdentical: 64}, // group 2 +// {NumRemoved: 3, NumInserted: 1}, // group 3 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 7}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 2}, // └── coalesce +// {NumIdentical: 63}, // group 4 +// ] +// Output: [ +// {NumIdentical: 61}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 64}, +// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 63}, +// ] +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = prev.Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} + +// cleanupSurroundingIdentical scans through all unequal groups, and +// moves any leading sequence of equal elements to the preceding equal group and +// moves and trailing sequence of equal elements to the succeeding equal group. +// +// This is necessary since coalesceInterveningIdentical may coalesce edit groups +// together such that leading/trailing spans of equal elements becomes possible. +// Note that this can occur even with an optimal diffing algorithm. +// +// Example: +// +// Input: [ +// {NumIdentical: 61}, +// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements +// {NumIdentical: 67}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements +// {NumIdentical: 54}, +// ] +// Output: [ +// {NumIdentical: 64}, // incremented by 3 +// {NumRemoved: 9}, +// {NumIdentical: 67}, +// {NumRemoved: 9}, +// {NumIdentical: 64}, // incremented by 10 +// ] +func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { + var ix, iy int // indexes into sequence x and y + for i, ds := range groups { + // Handle equal group. + if ds.NumDiff() == 0 { + ix += ds.NumIdentical + iy += ds.NumIdentical + continue + } + + // Handle unequal group. + nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified + ny := ds.NumIdentical + ds.NumInserted + ds.NumModified + var numLeadingIdentical, numTrailingIdentical int + for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ { + numLeadingIdentical++ + } + for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ { + numTrailingIdentical++ + } + if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { + if numLeadingIdentical > 0 { + // Remove leading identical span from this group and + // insert it into the preceding group. + if i-1 >= 0 { + groups[i-1].NumIdentical += numLeadingIdentical + } else { + // No preceding group exists, so prepend a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...) + }() + } + // Increment indexes since the preceding group would have handled this. + ix += numLeadingIdentical + iy += numLeadingIdentical + } + if numTrailingIdentical > 0 { + // Remove trailing identical span from this group and + // insert it into the succeeding group. + if i+1 < len(groups) { + groups[i+1].NumIdentical += numTrailingIdentical + } else { + // No succeeding group exists, so append a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical}) + }() + } + // Do not increment indexes since the succeeding group will handle this. + } + + // Update this group since some identical elements were removed. + nx -= numIdentical + ny -= numIdentical + groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny} + } + ix += nx + iy += ny + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 000000000..388fcf571 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,432 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +const maxColumnLength = 80 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + // The output of Diff is documented as being unstable to provide future + // flexibility in changing the output for more humanly readable reports. + // This logic intentionally introduces instability to the exact output + // so that users can detect accidental reliance on stability early on, + // rather than much later when an actual change to the format occurs. + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" + Metadata interface{} // arbitrary metadata; has no effect on formatting +} + +func (s *textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 *textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(*textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s *textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + ElideComma bool // avoid trailing comma + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := !ds.IsZero() + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return utf8.RuneCountInString(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) }, + ) + + // Format lists of simple lists in a batched form. + // If the list is sequence of only textLine values, + // then batch multiple values on a single line. + var isSimple bool + for _, r := range s { + _, isLine := r.Value.(textLine) + isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil + if !isSimple { + break + } + } + if isSimple { + n++ + var batch []byte + emitBatch := func() { + if len(batch) > 0 { + b = n.appendIndent(append(b, '\n'), d) + b = append(b, bytes.TrimRight(batch, " ")...) + batch = batch[:0] + } + } + for _, r := range s { + line := r.Value.(textLine) + if len(batch)+len(line)+len(", ") > maxColumnLength { + emitBatch() + } + batch = append(batch, line...) + batch = append(batch, ", "...) + } + emitBatch() + n-- + return n.appendIndent(append(b, '\n'), d) + } + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.ElideComma { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) IsZero() bool { + s.Name = "" + return s == diffStats{} +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name += "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 000000000..668d470fd --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8736cd652..32d4dabc8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -425,6 +425,13 @@ github.com/goccy/go-json/internal/runtime # github.com/golang/snappy v1.0.0 ## explicit github.com/golang/snappy +# github.com/google/go-cmp v0.7.0 +## explicit; go 1.21 +github.com/google/go-cmp/cmp +github.com/google/go-cmp/cmp/internal/diff +github.com/google/go-cmp/cmp/internal/flags +github.com/google/go-cmp/cmp/internal/function +github.com/google/go-cmp/cmp/internal/value # github.com/google/s2a-go v0.1.9 ## explicit; go 1.20 github.com/google/s2a-go From dec28b9accd67e1236561fa9c27b27ee9090bbac Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 29 Sep 2025 17:03:50 +0200 Subject: [PATCH 32/95] Add minio config required methods --- pbm/config/config_test.go | 53 +++++++++++++++++ pbm/storage/mio/config.go | 47 +++++++++++++++ pbm/storage/mio/config_test.go | 101 +++++++++++++++++++++++++++++++++ 3 files changed, 201 insertions(+) create mode 100644 pbm/storage/mio/config_test.go diff --git a/pbm/config/config_test.go b/pbm/config/config_test.go index 4ed6b4205..30ab2e425 100644 --- a/pbm/config/config_test.go +++ b/pbm/config/config_test.go @@ -3,9 +3,11 @@ package config import ( "testing" + "github.com/google/go-cmp/cmp" "github.com/percona/percona-backup-mongodb/pbm/storage/azure" "github.com/percona/percona-backup-mongodb/pbm/storage/fs" "github.com/percona/percona-backup-mongodb/pbm/storage/gcs" + "github.com/percona/percona-backup-mongodb/pbm/storage/mio" "github.com/percona/percona-backup-mongodb/pbm/storage/s3" ) @@ -151,6 +153,57 @@ func TestIsSameStorage(t *testing.T) { t.Errorf("storage instances has different bucket: cfg=%+v, eq=%+v", cfg, neq) } }) + + t.Run("minio", func(t *testing.T) { + cfg := &mio.Config{ + Region: "eu", + EndpointURL: "ep.com", + Bucket: "b1", + Prefix: "p1", + Credentials: mio.Credentials{ + AccessKeyID: "k1", + SecretAccessKey: "k2", + SessionToken: "sess", + }, + Secure: true, + ChunkSize: 6 << 20, + Retryer: &mio.Retryer{}, + } + eq := &mio.Config{ + Region: "eu", + EndpointURL: "ep.com", + Bucket: "b1", + Prefix: "p1", + } + if !cfg.IsSameStorage(eq) { + t.Errorf("config storage should identify the same instance: cfg=%+v, eq=%+v, diff=%s", + cfg, eq, cmp.Diff(*cfg, *eq)) + } + + neq := cfg.Clone() + neq.Region = "us" + if cfg.IsSameStorage(neq) { + t.Errorf("storage instances has different region: cfg=%+v, eq=%+v", cfg, neq) + } + + neq = cfg.Clone() + neq.EndpointURL = "ep2.com" + if cfg.IsSameStorage(neq) { + t.Errorf("storage instances has different EndpointURL: cfg=%+v, eq=%+v", cfg, neq) + } + + neq = cfg.Clone() + neq.Bucket = "b2" + if cfg.IsSameStorage(neq) { + t.Errorf("storage instances has different bucket: cfg=%+v, eq=%+v", cfg, neq) + } + + neq = cfg.Clone() + neq.Prefix = "p2" + if cfg.IsSameStorage(neq) { + t.Errorf("storage instances has different prefix: cfg=%+v, eq=%+v", cfg, neq) + } + }) } func boolPtr(b bool) *bool { diff --git a/pbm/storage/mio/config.go b/pbm/storage/mio/config.go index 823b9d9da..554e9cb44 100644 --- a/pbm/storage/mio/config.go +++ b/pbm/storage/mio/config.go @@ -2,6 +2,8 @@ package mio import ( "errors" + "maps" + "reflect" "time" ) @@ -38,7 +40,52 @@ type Retryer struct { MaxRetryDelay time.Duration `bson:"maxRetryDelay,omitempty" json:"maxRetryDelay,omitempty" yaml:"maxRetryDelay,omitempty"` } +func (cfg *Config) Clone() *Config { + if cfg == nil { + return nil + } + + c := *cfg + c.EndpointURLMap = maps.Clone(cfg.EndpointURLMap) + if cfg.MaxObjSizeGB != nil { + v := *cfg.MaxObjSizeGB + c.MaxObjSizeGB = &v + } + if cfg.Retryer != nil { + v := *cfg.Retryer + c.Retryer = &v + } + + return &c +} + +func (cfg *Config) Equal(other *Config) bool { + return reflect.DeepEqual(cfg, other) +} +// IsSameStorage identifies the same instance of the minio storage. +func (cfg *Config) IsSameStorage(other *Config) bool { + if cfg == nil || other == nil { + return cfg == other + } + + if cfg.Region != other.Region { + return false + } + if cfg.EndpointURL != other.EndpointURL { + return false + } + if !maps.Equal(cfg.EndpointURLMap, other.EndpointURLMap) { + return false + } + if cfg.Bucket != other.Bucket { + return false + } + if cfg.Prefix != other.Prefix { + return false + } + return true +} func (cfg *Config) Cast() error { if cfg.EndpointURL == "" { diff --git a/pbm/storage/mio/config_test.go b/pbm/storage/mio/config_test.go new file mode 100644 index 000000000..7925f0d27 --- /dev/null +++ b/pbm/storage/mio/config_test.go @@ -0,0 +1,101 @@ +package mio + +import ( + "reflect" + "testing" + "time" + + "github.com/google/go-cmp/cmp" +) + +func TestClone(t *testing.T) { + f := 1.1 + c1 := &Config{ + Region: "eu", + EndpointURL: "ep.com", + EndpointURLMap: map[string]string{"n1": "ep1", "n2": "ep2"}, + Bucket: "b1", + Prefix: "p1", + Credentials: Credentials{ + AccessKeyID: "k1", + SecretAccessKey: "k2", + SessionToken: "sess", + }, + Secure: true, + ChunkSize: 6 << 20, + MaxObjSizeGB: &f, + Retryer: &Retryer{ + NumMaxRetries: 1, + MinRetryDelay: time.Second, + MaxRetryDelay: time.Minute, + }, + } + + c2 := c1.Clone() + + if &c1.EndpointURLMap == &c2.EndpointURLMap || + c1.MaxObjSizeGB == c2.MaxObjSizeGB || + c1.Retryer == c2.Retryer { + t.Fatal("Deep copy of pointer fields is missing") + } + if !reflect.DeepEqual(c1, c2) { + t.Fatalf("Clone is not performed, diff=%s", cmp.Diff(*c1, *c2)) + } +} + +func TestEqual(t *testing.T) { + f := 1.1 + c1 := &Config{ + Region: "eu", + EndpointURL: "ep.com", + EndpointURLMap: map[string]string{"n1": "ep1", "n2": "ep2"}, + Bucket: "b1", + Prefix: "p1", + Credentials: Credentials{ + AccessKeyID: "k1", + SecretAccessKey: "k2", + SessionToken: "sess", + }, + Secure: true, + ChunkSize: 6 << 20, + MaxObjSizeGB: &f, + Retryer: &Retryer{ + NumMaxRetries: 1, + MinRetryDelay: time.Second, + MaxRetryDelay: time.Minute, + }, + } + + c2 := c1.Clone() + + if !c1.Equal(c2) { + t.Fatalf("cfg should be equal, diff=%s", cmp.Diff(*c1, *c2)) + } +} + +func TestCast(t *testing.T) { + c := &Config{} + + if err := c.Cast(); err == nil { + t.Fatal("want error when EndpointURL is not specified") + } + + c.EndpointURL = "url" + err := c.Cast() + if err != nil { + t.Fatalf("got error during Cast: %v", err) + } + want := &Config{ + EndpointURL: "url", + ChunkSize: defaultPartSize, + Retryer: &Retryer{ + NumMaxRetries: defaultMaxRetries, + MinRetryDelay: defaultRetryerMinRetryDelay, + MaxRetryDelay: defaultRetryerMaxRetryDelay, + }, + } + + if !c.Equal(want) { + t.Fatalf("wrong config after Cast, diff=%s", cmp.Diff(*c, *want)) + } +} From 9ab4a057d43feca604ef62233dac76ebd665f31f Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 30 Sep 2025 10:25:22 +0200 Subject: [PATCH 33/95] Add minio storage config within pbm config --- pbm/config/config.go | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/pbm/config/config.go b/pbm/config/config.go index 7fecb1c16..a0c0d0531 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -26,6 +26,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/storage/azure" "github.com/percona/percona-backup-mongodb/pbm/storage/fs" "github.com/percona/percona-backup-mongodb/pbm/storage/gcs" + "github.com/percona/percona-backup-mongodb/pbm/storage/mio" "github.com/percona/percona-backup-mongodb/pbm/storage/oss" "github.com/percona/percona-backup-mongodb/pbm/storage/s3" "github.com/percona/percona-backup-mongodb/pbm/topo" @@ -142,6 +143,17 @@ func (c *Config) String() string { c.Storage.S3.ServerSideEncryption.SseCustomerKey = "***" } } + if c.Storage.Minio != nil { + if c.Storage.Minio.Credentials.AccessKeyID != "" { + c.Storage.Minio.Credentials.AccessKeyID = "***" + } + if c.Storage.Minio.Credentials.SecretAccessKey != "" { + c.Storage.Minio.Credentials.SecretAccessKey = "***" + } + if c.Storage.Minio.Credentials.SessionToken != "" { + c.Storage.Minio.Credentials.SessionToken = "***" + } + } if c.Storage.Azure != nil { if c.Storage.Azure.Credentials.Key != "" { c.Storage.Azure.Credentials.Key = "***" @@ -236,6 +248,7 @@ func (cfg *PITRConf) Clone() *PITRConf { type StorageConf struct { Type storage.Type `bson:"type" json:"type" yaml:"type"` S3 *s3.Config `bson:"s3,omitempty" json:"s3,omitempty" yaml:"s3,omitempty"` + Minio *mio.Config `bson:"minio,omitempty" json:"minio,omitempty" yaml:"minio,omitempty"` GCS *gcs.Config `bson:"gcs,omitempty" json:"gcs,omitempty" yaml:"gcs,omitempty"` Azure *azure.Config `bson:"azure,omitempty" json:"azure,omitempty" yaml:"azure,omitempty"` Filesystem *fs.Config `bson:"filesystem,omitempty" json:"filesystem,omitempty" yaml:"filesystem,omitempty"` @@ -256,6 +269,8 @@ func (s *StorageConf) Clone() *StorageConf { rv.Filesystem = s.Filesystem.Clone() case storage.S3: rv.S3 = s.S3.Clone() + case storage.Minio: + rv.Minio = s.Minio.Clone() case storage.Azure: rv.Azure = s.Azure.Clone() case storage.GCS: @@ -276,6 +291,8 @@ func (s *StorageConf) Equal(other *StorageConf) bool { switch s.Type { case storage.S3: return s.S3.Equal(other.S3) + case storage.Minio: + return s.Minio.Equal(other.Minio) case storage.Azure: return s.Azure.Equal(other.Azure) case storage.GCS: @@ -301,6 +318,8 @@ func (s *StorageConf) IsSameStorage(other *StorageConf) bool { switch s.Type { case storage.S3: return s.S3.IsSameStorage(other.S3) + case storage.Minio: + return s.Minio.IsSameStorage(other.Minio) case storage.Azure: return s.Azure.IsSameStorage(other.Azure) case storage.GCS: @@ -320,6 +339,8 @@ func (s *StorageConf) Cast() error { return s.Filesystem.Cast() case storage.S3: return s.S3.Cast() + case storage.Minio: + return s.Minio.Cast() case storage.OSS: return s.OSS.Cast() case storage.GCS: @@ -337,6 +358,8 @@ func (s *StorageConf) Typ() string { switch s.Type { case storage.S3: return "S3" + case storage.Minio: + return "Minio" case storage.Azure: return "Azure" case storage.GCS: @@ -368,6 +391,19 @@ func (s *StorageConf) Path() string { if s.S3.Prefix != "" { path += "/" + s.S3.Prefix } + case storage.Minio: + path = s.Minio.EndpointURL + if path == "" { + path = "minio://" + s.Minio.Bucket + } else { + if !strings.Contains(path, "://") { + path = "minio://" + path + } + path += "/" + s.Minio.Bucket + } + if s.Minio.Prefix != "" { + path += "/" + s.Minio.Prefix + } case storage.Azure: epURL := s.Azure.EndpointURL if epURL == "" { From e6b655728d2d8946c390c24ea3c4bfa0bde19f25 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 30 Sep 2025 10:28:01 +0200 Subject: [PATCH 34/95] Wire up minio storage with storage factory --- pbm/util/storage.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pbm/util/storage.go b/pbm/util/storage.go index af66e82bf..b4517a99b 100644 --- a/pbm/util/storage.go +++ b/pbm/util/storage.go @@ -13,6 +13,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/storage/blackhole" "github.com/percona/percona-backup-mongodb/pbm/storage/fs" "github.com/percona/percona-backup-mongodb/pbm/storage/gcs" + "github.com/percona/percona-backup-mongodb/pbm/storage/mio" "github.com/percona/percona-backup-mongodb/pbm/storage/oss" "github.com/percona/percona-backup-mongodb/pbm/storage/s3" "github.com/percona/percona-backup-mongodb/pbm/version" @@ -27,6 +28,8 @@ func StorageFromConfig(cfg *config.StorageConf, node string, l log.LogEvent) (st switch cfg.Type { case storage.S3: return s3.New(cfg.S3, node, l) + case storage.Minio: + return mio.New(cfg.Minio, node, l) case storage.Azure: return azure.New(cfg.Azure, node, l) case storage.Filesystem: @@ -54,6 +57,9 @@ func StorageWithDownloaderFromConfig( case storage.S3: return s3.NewWithDownloader(cfg.S3, node, l, rstCfg.NumDownloadWorkers, rstCfg.MaxDownloadBufferMb, rstCfg.DownloadChunkMb) + case storage.Minio: + return mio.NewWithDownloader(cfg.Minio, node, l, + rstCfg.NumDownloadWorkers, rstCfg.MaxDownloadBufferMb, rstCfg.DownloadChunkMb) case storage.GCS: return gcs.NewWithDownloader(cfg.GCS, node, l, rstCfg.NumDownloadWorkers, rstCfg.MaxDownloadBufferMb, rstCfg.DownloadChunkMb) From af47140664d56e982fe938c95e0225c21189f75c Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 30 Sep 2025 19:21:04 +0200 Subject: [PATCH 35/95] Enable minio storage tests --- pbm/storage/mio/minio_test.go | 83 +++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 pbm/storage/mio/minio_test.go diff --git a/pbm/storage/mio/minio_test.go b/pbm/storage/mio/minio_test.go new file mode 100644 index 000000000..73311fd86 --- /dev/null +++ b/pbm/storage/mio/minio_test.go @@ -0,0 +1,83 @@ +package mio + +import ( + "context" + "net/url" + "testing" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/testcontainers/testcontainers-go" + tcminio "github.com/testcontainers/testcontainers-go/modules/minio" + + "github.com/percona/percona-backup-mongodb/pbm/storage" +) + +func TestMinio(t *testing.T) { + ctx := context.Background() + + minioContainer, err := tcminio.Run(ctx, "minio/minio:RELEASE.2024-08-17T01-24-54Z") + defer func() { + if err := testcontainers.TerminateContainer(minioContainer); err != nil { + t.Fatalf("failed to terminate container: %s", err) + } + }() + if err != nil { + t.Fatalf("failed to start container: %s", err) + } + + bucketName := "test-bucket-mio" + //todo: add https test + epTC, err := minioContainer.Endpoint(ctx, "http") + if err != nil { + t.Fatalf("failed to get endpoint: %s", err) + } + u, err := url.Parse(epTC) + if err != nil { + t.Fatalf("parsing endpoint: %v", err) + } + epMinio := u.Host + + cfg := &Config{ + EndpointURL: epMinio, + Bucket: bucketName, + Prefix: "p1", + Secure: false, + Credentials: Credentials{ + SigVer: "V4", + AccessKeyID: "minioadmin", + SecretAccessKey: "minioadmin", + }, + } + minioCl, err := minio.New(cfg.EndpointURL, &minio.Options{ + Creds: credentials.NewStaticV4(cfg.Credentials.AccessKeyID, cfg.Credentials.SecretAccessKey, ""), + Secure: false, + }) + if err != nil { + t.Fatalf("minio client creation: %v", err) + } + err = minioCl.MakeBucket(ctx, cfg.Bucket, minio.MakeBucketOptions{}) + if err != nil { + t.Errorf("bucket creation: %v", err) + } + + stg, err := New(cfg, "", nil) + if err != nil { + t.Fatalf("storage creation: %v", err) + } + + storage.RunStorageBaseTests(t, stg, storage.Minio) + storage.RunStorageAPITests(t, stg) + storage.RunSplitMergeMWTests(t, stg) + + t.Run("with downloader", func(t *testing.T) { + stg, err := NewWithDownloader(cfg, "node", nil, 0, 0, 0) + if err != nil { + t.Fatalf("failed to create s3 storage: %s", err) + } + + storage.RunStorageBaseTests(t, stg, storage.Minio) + storage.RunStorageAPITests(t, stg) + storage.RunSplitMergeMWTests(t, stg) + }) +} From e88f00fec309b117226fde51b1965d2f59bccb3f Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 1 Oct 2025 09:43:27 +0200 Subject: [PATCH 36/95] Fix minio storage issues - Adding handling for ErrNotExist resolutin during Copy and Delete. - Handle prefix during the List operation --- pbm/storage/mio/minio.go | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/pbm/storage/mio/minio.go b/pbm/storage/mio/minio.go index 391d88508..d55afb96d 100644 --- a/pbm/storage/mio/minio.go +++ b/pbm/storage/mio/minio.go @@ -207,17 +207,21 @@ func (m *Minio) FileStat(name string) (storage.FileInfo, error) { func (m *Minio) List(prefix, suffix string) ([]storage.FileInfo, error) { ctx := context.Background() - var files []storage.FileInfo + prfx := path.Join(m.cfg.Prefix, prefix) + if prfx != "" && !strings.HasSuffix(prfx, "/") { + prfx += "/" + } + var files []storage.FileInfo for obj := range m.cl.ListObjects(ctx, m.cfg.Bucket, minio.ListObjectsOptions{ - Prefix: prefix, + Prefix: prfx, Recursive: true, }) { if obj.Err != nil { return nil, errors.Wrap(obj.Err, "list using minio") } - name := strings.TrimPrefix(obj.Key, prefix) + name := strings.TrimPrefix(obj.Key, prfx) if len(name) > 0 && name[0] == '/' { name = name[1:] } @@ -236,6 +240,10 @@ func (m *Minio) List(prefix, suffix string) ([]storage.FileInfo, error) { } func (m *Minio) Delete(name string) error { + if _, err := m.FileStat(name); err == storage.ErrNotExist { + return err + } + ctx := context.Background() objName := path.Join(m.cfg.Prefix, name) @@ -253,8 +261,11 @@ func (m *Minio) Delete(name string) error { } func (m *Minio) Copy(src, dst string) error { - ctx := context.Background() + if _, err := m.FileStat(src); err == storage.ErrNotExist { + return err + } + ctx := context.Background() _, err := m.cl.CopyObject(ctx, minio.CopyDestOptions{ Bucket: m.cfg.Bucket, From 7f0ce901ff27fa52c0ccd4a66d96ded14f38526d Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 1 Oct 2025 15:22:30 +0200 Subject: [PATCH 37/95] Expand pbm logger to support custom loggers PBM propagates LogEvent by injecting it in types and methods. Custom logging typically requires logger instance. Therefore it's exposed in LogEvent interface. --- pbm/log/discard.go | 1 + pbm/log/event.go | 14 +++++++++----- pbm/log/log.go | 1 + 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/pbm/log/discard.go b/pbm/log/discard.go index 3ae438528..3119efb1b 100644 --- a/pbm/log/discard.go +++ b/pbm/log/discard.go @@ -66,3 +66,4 @@ func (discardEventImpl) Info(msg string, args ...any) {} func (discardEventImpl) Warning(msg string, args ...any) {} func (discardEventImpl) Error(msg string, args ...any) {} func (discardEventImpl) Fatal(msg string, args ...any) {} +func (discardEventImpl) GetLogger() Logger { return &discardLoggerImpl{} } diff --git a/pbm/log/event.go b/pbm/log/event.go index 97f5ed3a3..595772445 100644 --- a/pbm/log/event.go +++ b/pbm/log/event.go @@ -13,22 +13,26 @@ type eventImpl struct { opid string } -func (e *eventImpl) Debug(msg string, args ...interface{}) { +func (e *eventImpl) Debug(msg string, args ...any) { e.l.Debug(e.typ, e.obj, e.opid, e.ep, msg, args...) } -func (e *eventImpl) Info(msg string, args ...interface{}) { +func (e *eventImpl) Info(msg string, args ...any) { e.l.Info(e.typ, e.obj, e.opid, e.ep, msg, args...) } -func (e *eventImpl) Warning(msg string, args ...interface{}) { +func (e *eventImpl) Warning(msg string, args ...any) { e.l.Warning(e.typ, e.obj, e.opid, e.ep, msg, args...) } -func (e *eventImpl) Error(msg string, args ...interface{}) { +func (e *eventImpl) Error(msg string, args ...any) { e.l.Error(e.typ, e.obj, e.opid, e.ep, msg, args...) } -func (e *eventImpl) Fatal(msg string, args ...interface{}) { +func (e *eventImpl) Fatal(msg string, args ...any) { e.l.Fatal(e.typ, e.obj, e.opid, e.ep, msg, args...) } + +func (e *eventImpl) GetLogger() Logger { + return e.l +} diff --git a/pbm/log/log.go b/pbm/log/log.go index 8ec891a85..ed22277bc 100644 --- a/pbm/log/log.go +++ b/pbm/log/log.go @@ -45,6 +45,7 @@ type LogEvent interface { Warning(msg string, args ...any) Error(msg string, args ...any) Fatal(msg string, args ...any) + GetLogger() Logger } type Buffer interface { From 2a7e734a21a15cfdc09f9df49d4ea9a68475e4a1 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 1 Oct 2025 15:26:50 +0200 Subject: [PATCH 38/95] Expand minio config with debugTrace option It allows turning debug trace on/off. --- pbm/storage/mio/config.go | 1 + pbm/storage/mio/minio.go | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/pbm/storage/mio/config.go b/pbm/storage/mio/config.go index 554e9cb44..a70641211 100644 --- a/pbm/storage/mio/config.go +++ b/pbm/storage/mio/config.go @@ -15,6 +15,7 @@ type Config struct { Prefix string `bson:"prefix" json:"prefix" yaml:"prefix"` Credentials Credentials `bson:"credentials" json:"-" yaml:"credentials"` Secure bool `bson:"secure" json:"secure" yaml:"secure"` + DebugTrace bool `bson:"debugTrace,omitempty" json:"debugTrace,omitempty" yaml:"debugTrace,omitempty"` ChunkSize int64 `bson:"chunkSize,omitempty" json:"chunkSize,omitempty" yaml:"chunkSize,omitempty"` MaxObjSizeGB *float64 `bson:"maxObjSizeGB,omitempty" json:"maxObjSizeGB,omitempty" yaml:"maxObjSizeGB,omitempty"` diff --git a/pbm/storage/mio/minio.go b/pbm/storage/mio/minio.go index d55afb96d..6bc42ef22 100644 --- a/pbm/storage/mio/minio.go +++ b/pbm/storage/mio/minio.go @@ -116,12 +116,15 @@ func new(cfg *Config, n string, l log.LogEvent) (*Minio, error) { Secure: cfg.Secure, Region: cfg.Region, MaxRetries: cfg.Retryer.NumMaxRetries, - // Trace: *httptrace.ClientTrace, }) if err != nil { return nil, errors.Wrap(err, "minio session") } + if cfg.DebugTrace { + cl.TraceOn(l.GetLogger()) + } + return &Minio{ cfg: cfg, node: n, From 460b160ec403e3382e61d0204fe3af3137ebcd32 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 1 Oct 2025 17:15:38 +0200 Subject: [PATCH 39/95] Remove MinRetryDelay & maxRetryDelay Minio library supports changing this only on the global level. For PBM that means data races or performance degradation. MaxRetries should be enough. --- pbm/storage/mio/config.go | 15 --------------- pbm/storage/mio/config_test.go | 7 ------- pbm/storage/mio/minio.go | 11 ++++------- 3 files changed, 4 insertions(+), 29 deletions(-) diff --git a/pbm/storage/mio/config.go b/pbm/storage/mio/config.go index a70641211..c01dc4d5d 100644 --- a/pbm/storage/mio/config.go +++ b/pbm/storage/mio/config.go @@ -4,7 +4,6 @@ import ( "errors" "maps" "reflect" - "time" ) type Config struct { @@ -33,12 +32,6 @@ type Credentials struct { type Retryer struct { // Num max Retries is the number of max retries that will be performed. NumMaxRetries int `bson:"numMaxRetries,omitempty" json:"numMaxRetries,omitempty" yaml:"numMaxRetries,omitempty"` - - // MinRetryDelay is the minimum retry delay after which retry will be performed. - MinRetryDelay time.Duration `bson:"minRetryDelay,omitempty" json:"minRetryDelay,omitempty" yaml:"minRetryDelay,omitempty"` - - // MaxRetryDelay is the maximum retry delay before which retry must be performed. - MaxRetryDelay time.Duration `bson:"maxRetryDelay,omitempty" json:"maxRetryDelay,omitempty" yaml:"maxRetryDelay,omitempty"` } func (cfg *Config) Clone() *Config { @@ -100,19 +93,11 @@ func (cfg *Config) Cast() error { if cfg.Retryer == nil { cfg.Retryer = &Retryer{ NumMaxRetries: defaultMaxRetries, - MinRetryDelay: defaultRetryerMinRetryDelay, - MaxRetryDelay: defaultRetryerMaxRetryDelay, } } else { if cfg.Retryer.NumMaxRetries == 0 { cfg.Retryer.NumMaxRetries = defaultMaxRetries } - if cfg.Retryer.MinRetryDelay == 0 { - cfg.Retryer.MinRetryDelay = defaultRetryerMinRetryDelay - } - if cfg.Retryer.MaxRetryDelay == 0 { - cfg.Retryer.MaxRetryDelay = defaultRetryerMaxRetryDelay - } } return nil diff --git a/pbm/storage/mio/config_test.go b/pbm/storage/mio/config_test.go index 7925f0d27..fcd4dbb9a 100644 --- a/pbm/storage/mio/config_test.go +++ b/pbm/storage/mio/config_test.go @@ -3,7 +3,6 @@ package mio import ( "reflect" "testing" - "time" "github.com/google/go-cmp/cmp" ) @@ -26,8 +25,6 @@ func TestClone(t *testing.T) { MaxObjSizeGB: &f, Retryer: &Retryer{ NumMaxRetries: 1, - MinRetryDelay: time.Second, - MaxRetryDelay: time.Minute, }, } @@ -61,8 +58,6 @@ func TestEqual(t *testing.T) { MaxObjSizeGB: &f, Retryer: &Retryer{ NumMaxRetries: 1, - MinRetryDelay: time.Second, - MaxRetryDelay: time.Minute, }, } @@ -90,8 +85,6 @@ func TestCast(t *testing.T) { ChunkSize: defaultPartSize, Retryer: &Retryer{ NumMaxRetries: defaultMaxRetries, - MinRetryDelay: defaultRetryerMinRetryDelay, - MaxRetryDelay: defaultRetryerMaxRetryDelay, }, } diff --git a/pbm/storage/mio/minio.go b/pbm/storage/mio/minio.go index 6bc42ef22..ce3ac3d62 100644 --- a/pbm/storage/mio/minio.go +++ b/pbm/storage/mio/minio.go @@ -6,7 +6,6 @@ import ( "path" "runtime" "strings" - "time" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" @@ -25,11 +24,12 @@ const ( // minio allows 50TiB, sensibile default is aligned with S3 defaultMaxObjSizeGB = 5018 // 4.9 TB - defaultMaxRetries = 10 - defaultRetryerMinRetryDelay = 200 * time.Millisecond - defaultRetryerMaxRetryDelay = 1 * time.Second + defaultMaxRetries = 10 ) +//todo: +// update docs + type Minio struct { cfg *Config node string @@ -108,9 +108,6 @@ func new(cfg *Config, n string, l log.LogEvent) (*Minio, error) { ) } - minio.DefaultRetryUnit = cfg.Retryer.MinRetryDelay - minio.DefaultRetryCap = cfg.Retryer.MaxRetryDelay - cl, err := minio.New(cfg.resolveEndpointURL(n), &minio.Options{ Creds: creds, Secure: cfg.Secure, From d081a3ad9417a4fc61a24908d20fbc1332c02115 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 1 Oct 2025 17:26:12 +0200 Subject: [PATCH 40/95] Fix review comments --- pbm/config/config_test.go | 1 + pbm/storage/mio/config.go | 1 + pbm/storage/mio/download.go | 7 ++++--- pbm/storage/mio/minio.go | 9 +++------ pbm/storage/mio/minio_test.go | 1 - 5 files changed, 9 insertions(+), 10 deletions(-) diff --git a/pbm/config/config_test.go b/pbm/config/config_test.go index 30ab2e425..08bcf9465 100644 --- a/pbm/config/config_test.go +++ b/pbm/config/config_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/percona/percona-backup-mongodb/pbm/storage/azure" "github.com/percona/percona-backup-mongodb/pbm/storage/fs" "github.com/percona/percona-backup-mongodb/pbm/storage/gcs" diff --git a/pbm/storage/mio/config.go b/pbm/storage/mio/config.go index c01dc4d5d..94f036b2b 100644 --- a/pbm/storage/mio/config.go +++ b/pbm/storage/mio/config.go @@ -6,6 +6,7 @@ import ( "reflect" ) +//nolint:lll type Config struct { Region string `bson:"region" json:"region" yaml:"region"` EndpointURL string `bson:"endpointUrl,omitempty" json:"endpointUrl" yaml:"endpointUrl,omitempty"` diff --git a/pbm/storage/mio/download.go b/pbm/storage/mio/download.go index 51c173ec4..21bfb2a71 100644 --- a/pbm/storage/mio/download.go +++ b/pbm/storage/mio/download.go @@ -8,6 +8,7 @@ import ( "time" "github.com/minio/minio-go/v7" + "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/storage" ) @@ -35,7 +36,7 @@ func (m *Minio) SourceReader(name string) (io.ReadCloser, error) { func (m *Minio) sourceReader(fname string, arenas []*storage.Arena, cc, downloadChuckSize int) (io.ReadCloser, error) { if cc < 1 { - return nil, errors.Errorf("num of workers shuld be at least 1 (got %d)", cc) + return nil, errors.Errorf("num of workers should be at least 1 (got %d)", cc) } if len(arenas) < cc { return nil, errors.Errorf("num of arenas (%d) less then workers (%d)", len(arenas), cc) @@ -77,7 +78,7 @@ func (m *Minio) sourceReader(fname string, arenas []*storage.Arena, cc, download err := pr.WriteChunk(&rs, w) if err != nil { - exitErr = errors.Wrapf(err, "SourceReader: copy bytes %d-%d from resoponse", rs.Meta.Start, rs.Meta.End) + exitErr = errors.Wrapf(err, "SourceReader: copy bytes %d-%d from response", rs.Meta.Start, rs.Meta.End) return } @@ -86,7 +87,7 @@ func (m *Minio) sourceReader(fname string, arenas []*storage.Arena, cc, download r := heap.Pop(cqueue).(*storage.Chunk) err := pr.WriteChunk(r, w) if err != nil { - exitErr = errors.Wrapf(err, "SourceReader: copy bytes %d-%d from resoponse buffer", r.Meta.Start, r.Meta.End) + exitErr = errors.Wrapf(err, "SourceReader: copy bytes %d-%d from response buffer", r.Meta.Start, r.Meta.End) return } } diff --git a/pbm/storage/mio/minio.go b/pbm/storage/mio/minio.go index ce3ac3d62..8e43837d2 100644 --- a/pbm/storage/mio/minio.go +++ b/pbm/storage/mio/minio.go @@ -27,9 +27,6 @@ const ( defaultMaxRetries = 10 ) -//todo: -// update docs - type Minio struct { cfg *Config node string @@ -40,7 +37,7 @@ type Minio struct { } func New(cfg *Config, node string, l log.LogEvent) (storage.Storage, error) { - m, err := new(cfg, node, l) + m, err := newMinio(cfg, node, l) if err != nil { return nil, err } @@ -61,7 +58,7 @@ func NewWithDownloader( cfg *Config, node string, l log.LogEvent, cc, bufSizeMb, spanSizeMb int, ) (storage.Storage, error) { - m, err := new(cfg, node, l) + m, err := newMinio(cfg, node, l) if err != nil { return nil, err } @@ -85,7 +82,7 @@ func NewWithDownloader( return storage.NewSplitMergeMW(m, cfg.GetMaxObjSizeGB()), nil } -func new(cfg *Config, n string, l log.LogEvent) (*Minio, error) { +func newMinio(cfg *Config, n string, l log.LogEvent) (*Minio, error) { if err := cfg.Cast(); err != nil { return nil, errors.Wrap(err, "set defaults") } diff --git a/pbm/storage/mio/minio_test.go b/pbm/storage/mio/minio_test.go index 73311fd86..120920d50 100644 --- a/pbm/storage/mio/minio_test.go +++ b/pbm/storage/mio/minio_test.go @@ -27,7 +27,6 @@ func TestMinio(t *testing.T) { } bucketName := "test-bucket-mio" - //todo: add https test epTC, err := minioContainer.Endpoint(ctx, "http") if err != nil { t.Fatalf("failed to get endpoint: %s", err) From e37c3814477310a90d819a7d64f2382ca9205206 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 1 Oct 2025 17:43:28 +0200 Subject: [PATCH 41/95] Add reference config for minio storage --- packaging/conf/pbm-conf-reference.yml | 51 ++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/packaging/conf/pbm-conf-reference.yml b/packaging/conf/pbm-conf-reference.yml index b01b6472d..32a9f86ee 100644 --- a/packaging/conf/pbm-conf-reference.yml +++ b/packaging/conf/pbm-conf-reference.yml @@ -10,7 +10,12 @@ #storage: -## Remote backup storage type. Supported types: S3, GCS, shared filesystem, Azure +## Remote backup storage type. Supported types: +## - S3, +## - GCS, +## - Minio (for S3 compatible storage), +## - shared filesystem, +## - Azure #---------------------S3 Storage Configuration-------------------------- @@ -69,6 +74,50 @@ # ## The maximum object size that will be stored on the storage # maxObjSizeGB: 5018 +# + +#---------------------S3 Storage Configuration-------------------------- +# type: +# minio: + +## Specify the location and name of the bucket that you have configured on the storage +# region: +# bucket: + +## The data directory to store backups in. +## When undefined, backups are saved at the root of the bucket. +# prefix: + +## An optional custom URL to access the bucket. Useful for S3-compatible storage (e.g. MinIO) +# endpointUrl: + +# add url map +# +## Use https +# secure: true + +## Access credentials +# credentials: +# access-key-id: +# secret-access-key: +# session-token: +# signature-ver: V4 + +## The size of data chinks (in MB) to upload to the bucket. +# uploadPartSize: 10 + +## Data upload configuration +# maxUploadParts: 10,000 + +## Enable debug trace of HTTP communication +# debugTrace: true + +## Retry upload configuration options. +# retryer: +# numMaxRetries: 3 +# +## The maximum object size that will be stored on the storage +# maxObjSizeGB: 5018 #--------------------Google Cloud Storage Configuration----------------- From 8d4573c1744aa576e12e7f124e3dce5a3152c3f1 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 2 Oct 2025 09:52:50 +0200 Subject: [PATCH 42/95] Add region when displaying storage info The Region info was displayed just for S3 type of storage. Storage config is expanded with the method that resolves the Region field for any type of the storage. --- cmd/pbm/status.go | 5 +---- pbm/config/config.go | 13 +++++++++++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index ba4d26594..010e27fef 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -540,10 +540,7 @@ func getStorageStat( } s.Type = cfg.Storage.Typ() - - if cfg.Storage.Type == storage.S3 { - s.Region = cfg.Storage.S3.Region - } + s.Region = cfg.Storage.Region() s.Path = cfg.Storage.Path() bcps, err := pbm.GetAllBackups(ctx) diff --git a/pbm/config/config.go b/pbm/config/config.go index a0c0d0531..e41ee0407 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -425,6 +425,19 @@ func (s *StorageConf) Path() string { return path } +func (s *StorageConf) Region() string { + region := "" + + switch s.Type { + case storage.S3: + region = s.S3.Region + case storage.Minio: + region = s.Minio.Region + } + + return region +} + // RestoreConf is config options for the restore // //nolint:lll From dff8e0eb4db39aed4887eec42cacfd3a86a7cb94 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 2 Oct 2025 11:57:26 +0200 Subject: [PATCH 43/95] Fix review suggestions --- packaging/conf/pbm-conf-reference.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packaging/conf/pbm-conf-reference.yml b/packaging/conf/pbm-conf-reference.yml index 32a9f86ee..7d320dd25 100644 --- a/packaging/conf/pbm-conf-reference.yml +++ b/packaging/conf/pbm-conf-reference.yml @@ -76,7 +76,7 @@ # maxObjSizeGB: 5018 # -#---------------------S3 Storage Configuration-------------------------- +#---------------------Minio Storage Configuration-------------------------- # type: # minio: @@ -103,8 +103,8 @@ # session-token: # signature-ver: V4 -## The size of data chinks (in MB) to upload to the bucket. -# uploadPartSize: 10 +## The size of data chunks in bytes to be uploaded to the storage bucket in a single request. +# chunkSize: 10485760 ## Data upload configuration # maxUploadParts: 10,000 From 8bdddda42a7ef362de14f2945bbd53c2f5be4131 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 2 Oct 2025 12:57:09 +0200 Subject: [PATCH 44/95] Rename chunkSize with partSize for minio storage --- packaging/conf/pbm-conf-reference.yml | 2 +- pbm/config/config_test.go | 2 +- pbm/storage/mio/config.go | 6 +++--- pbm/storage/mio/config_test.go | 6 +++--- pbm/storage/mio/minio.go | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packaging/conf/pbm-conf-reference.yml b/packaging/conf/pbm-conf-reference.yml index 7d320dd25..9f01a91ff 100644 --- a/packaging/conf/pbm-conf-reference.yml +++ b/packaging/conf/pbm-conf-reference.yml @@ -104,7 +104,7 @@ # signature-ver: V4 ## The size of data chunks in bytes to be uploaded to the storage bucket in a single request. -# chunkSize: 10485760 +# partSize: 10485760 ## Data upload configuration # maxUploadParts: 10,000 diff --git a/pbm/config/config_test.go b/pbm/config/config_test.go index 08bcf9465..a48fe0f76 100644 --- a/pbm/config/config_test.go +++ b/pbm/config/config_test.go @@ -167,7 +167,7 @@ func TestIsSameStorage(t *testing.T) { SessionToken: "sess", }, Secure: true, - ChunkSize: 6 << 20, + PartSize: 6 << 20, Retryer: &mio.Retryer{}, } eq := &mio.Config{ diff --git a/pbm/storage/mio/config.go b/pbm/storage/mio/config.go index 94f036b2b..e039ab881 100644 --- a/pbm/storage/mio/config.go +++ b/pbm/storage/mio/config.go @@ -17,7 +17,7 @@ type Config struct { Secure bool `bson:"secure" json:"secure" yaml:"secure"` DebugTrace bool `bson:"debugTrace,omitempty" json:"debugTrace,omitempty" yaml:"debugTrace,omitempty"` - ChunkSize int64 `bson:"chunkSize,omitempty" json:"chunkSize,omitempty" yaml:"chunkSize,omitempty"` + PartSize int64 `bson:"partSize,omitempty" json:"partSize,omitempty" yaml:"partSize,omitempty"` MaxObjSizeGB *float64 `bson:"maxObjSizeGB,omitempty" json:"maxObjSizeGB,omitempty" yaml:"maxObjSizeGB,omitempty"` Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` @@ -87,8 +87,8 @@ func (cfg *Config) Cast() error { return errors.New("endpointURL cannot be empty") } - if cfg.ChunkSize == 0 { - cfg.ChunkSize = defaultPartSize + if cfg.PartSize == 0 { + cfg.PartSize = defaultPartSize } if cfg.Retryer == nil { diff --git a/pbm/storage/mio/config_test.go b/pbm/storage/mio/config_test.go index fcd4dbb9a..ce60168c8 100644 --- a/pbm/storage/mio/config_test.go +++ b/pbm/storage/mio/config_test.go @@ -21,7 +21,7 @@ func TestClone(t *testing.T) { SessionToken: "sess", }, Secure: true, - ChunkSize: 6 << 20, + PartSize: 6 << 20, MaxObjSizeGB: &f, Retryer: &Retryer{ NumMaxRetries: 1, @@ -54,7 +54,7 @@ func TestEqual(t *testing.T) { SessionToken: "sess", }, Secure: true, - ChunkSize: 6 << 20, + PartSize: 6 << 20, MaxObjSizeGB: &f, Retryer: &Retryer{ NumMaxRetries: 1, @@ -82,7 +82,7 @@ func TestCast(t *testing.T) { } want := &Config{ EndpointURL: "url", - ChunkSize: defaultPartSize, + PartSize: defaultPartSize, Retryer: &Retryer{ NumMaxRetries: defaultMaxRetries, }, diff --git a/pbm/storage/mio/minio.go b/pbm/storage/mio/minio.go index 8e43837d2..d6b2064eb 100644 --- a/pbm/storage/mio/minio.go +++ b/pbm/storage/mio/minio.go @@ -144,7 +144,7 @@ func (m *Minio) Save(name string, data io.Reader, options ...storage.Option) err defaultPartSize, minPartSize, maxUploadParts, - m.cfg.ChunkSize, + m.cfg.PartSize, ) if m.log != nil && opts.UseLogger { From b0bc8ab082f1e8dc060826219acb4d6524439a76 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 2 Oct 2025 21:13:18 +0200 Subject: [PATCH 45/95] Add insecureSkipTLSVerify config opt for minio --- pbm/storage/mio/config.go | 4 ++++ pbm/storage/mio/minio.go | 16 +++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/pbm/storage/mio/config.go b/pbm/storage/mio/config.go index e039ab881..d630067e3 100644 --- a/pbm/storage/mio/config.go +++ b/pbm/storage/mio/config.go @@ -21,6 +21,10 @@ type Config struct { MaxObjSizeGB *float64 `bson:"maxObjSizeGB,omitempty" json:"maxObjSizeGB,omitempty" yaml:"maxObjSizeGB,omitempty"` Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` + + // InsecureSkipTLSVerify disables client verification of the server's + // certificate chain and host name + InsecureSkipTLSVerify bool `bson:"insecureSkipTLSVerify" json:"insecureSkipTLSVerify" yaml:"insecureSkipTLSVerify"` } type Credentials struct { diff --git a/pbm/storage/mio/minio.go b/pbm/storage/mio/minio.go index d6b2064eb..22c1433a6 100644 --- a/pbm/storage/mio/minio.go +++ b/pbm/storage/mio/minio.go @@ -2,7 +2,9 @@ package mio import ( "context" + "crypto/tls" "io" + "net/http" "path" "runtime" "strings" @@ -83,7 +85,8 @@ func NewWithDownloader( } func newMinio(cfg *Config, n string, l log.LogEvent) (*Minio, error) { - if err := cfg.Cast(); err != nil { + err := cfg.Cast() + if err != nil { return nil, errors.Wrap(err, "set defaults") } if l == nil { @@ -105,11 +108,22 @@ func newMinio(cfg *Config, n string, l log.LogEvent) (*Minio, error) { ) } + var transport http.RoundTripper + if cfg.InsecureSkipTLSVerify { + tr, err := minio.DefaultTransport(cfg.Secure) + if err != nil { + return nil, errors.Wrap(err, "transport for InsecureSkipTLSVerify") + } + tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + transport = tr + } + cl, err := minio.New(cfg.resolveEndpointURL(n), &minio.Options{ Creds: creds, Secure: cfg.Secure, Region: cfg.Region, MaxRetries: cfg.Retryer.NumMaxRetries, + Transport: transport, }) if err != nil { return nil, errors.Wrap(err, "minio session") From 529eb1a16dabdfc7a68b93cf61c2c29e840fbae9 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 3 Oct 2025 09:46:45 +0200 Subject: [PATCH 46/95] Rename endpointUrl -> endpoint and fix typos --- pbm/config/config.go | 2 +- pbm/config/config_test.go | 22 +++++++++++----------- pbm/storage/mio/config.go | 30 +++++++++++++++--------------- pbm/storage/mio/config_test.go | 28 ++++++++++++++-------------- pbm/storage/mio/minio.go | 2 +- pbm/storage/mio/minio_test.go | 12 ++++++------ 6 files changed, 48 insertions(+), 48 deletions(-) diff --git a/pbm/config/config.go b/pbm/config/config.go index e41ee0407..f4b684515 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -392,7 +392,7 @@ func (s *StorageConf) Path() string { path += "/" + s.S3.Prefix } case storage.Minio: - path = s.Minio.EndpointURL + path = s.Minio.Endpoint if path == "" { path = "minio://" + s.Minio.Bucket } else { diff --git a/pbm/config/config_test.go b/pbm/config/config_test.go index a48fe0f76..4cc6167bf 100644 --- a/pbm/config/config_test.go +++ b/pbm/config/config_test.go @@ -157,24 +157,24 @@ func TestIsSameStorage(t *testing.T) { t.Run("minio", func(t *testing.T) { cfg := &mio.Config{ - Region: "eu", - EndpointURL: "ep.com", - Bucket: "b1", - Prefix: "p1", + Region: "eu", + Endpoint: "ep.com", + Bucket: "b1", + Prefix: "p1", Credentials: mio.Credentials{ AccessKeyID: "k1", SecretAccessKey: "k2", SessionToken: "sess", }, - Secure: true, + Secure: true, PartSize: 6 << 20, - Retryer: &mio.Retryer{}, + Retryer: &mio.Retryer{}, } eq := &mio.Config{ - Region: "eu", - EndpointURL: "ep.com", - Bucket: "b1", - Prefix: "p1", + Region: "eu", + Endpoint: "ep.com", + Bucket: "b1", + Prefix: "p1", } if !cfg.IsSameStorage(eq) { t.Errorf("config storage should identify the same instance: cfg=%+v, eq=%+v, diff=%s", @@ -188,7 +188,7 @@ func TestIsSameStorage(t *testing.T) { } neq = cfg.Clone() - neq.EndpointURL = "ep2.com" + neq.Endpoint = "ep2.com" if cfg.IsSameStorage(neq) { t.Errorf("storage instances has different EndpointURL: cfg=%+v, eq=%+v", cfg, neq) } diff --git a/pbm/storage/mio/config.go b/pbm/storage/mio/config.go index d630067e3..465b360e4 100644 --- a/pbm/storage/mio/config.go +++ b/pbm/storage/mio/config.go @@ -8,14 +8,14 @@ import ( //nolint:lll type Config struct { - Region string `bson:"region" json:"region" yaml:"region"` - EndpointURL string `bson:"endpointUrl,omitempty" json:"endpointUrl" yaml:"endpointUrl,omitempty"` - EndpointURLMap map[string]string `bson:"endpointUrlMap,omitempty" json:"endpointUrlMap,omitempty" yaml:"endpointUrlMap,omitempty"` - Bucket string `bson:"bucket" json:"bucket" yaml:"bucket"` - Prefix string `bson:"prefix" json:"prefix" yaml:"prefix"` - Credentials Credentials `bson:"credentials" json:"-" yaml:"credentials"` - Secure bool `bson:"secure" json:"secure" yaml:"secure"` - DebugTrace bool `bson:"debugTrace,omitempty" json:"debugTrace,omitempty" yaml:"debugTrace,omitempty"` + Region string `bson:"region" json:"region" yaml:"region"` + Endpoint string `bson:"endpoint,omitempty" json:"endpoint" yaml:"endpoint,omitempty"` + EndpointMap map[string]string `bson:"endpointMap,omitempty" json:"endpointMap,omitempty" yaml:"endpointMap,omitempty"` + Bucket string `bson:"bucket" json:"bucket" yaml:"bucket"` + Prefix string `bson:"prefix" json:"prefix" yaml:"prefix"` + Credentials Credentials `bson:"credentials" json:"-" yaml:"credentials"` + Secure bool `bson:"secure" json:"secure" yaml:"secure"` + DebugTrace bool `bson:"debugTrace,omitempty" json:"debugTrace,omitempty" yaml:"debugTrace,omitempty"` PartSize int64 `bson:"partSize,omitempty" json:"partSize,omitempty" yaml:"partSize,omitempty"` MaxObjSizeGB *float64 `bson:"maxObjSizeGB,omitempty" json:"maxObjSizeGB,omitempty" yaml:"maxObjSizeGB,omitempty"` @@ -35,7 +35,7 @@ type Credentials struct { } type Retryer struct { - // Num max Retries is the number of max retries that will be performed. + // NumMaxRetries is the number of max retries that will be performed. NumMaxRetries int `bson:"numMaxRetries,omitempty" json:"numMaxRetries,omitempty" yaml:"numMaxRetries,omitempty"` } @@ -45,7 +45,7 @@ func (cfg *Config) Clone() *Config { } c := *cfg - c.EndpointURLMap = maps.Clone(cfg.EndpointURLMap) + c.EndpointMap = maps.Clone(cfg.EndpointMap) if cfg.MaxObjSizeGB != nil { v := *cfg.MaxObjSizeGB c.MaxObjSizeGB = &v @@ -71,10 +71,10 @@ func (cfg *Config) IsSameStorage(other *Config) bool { if cfg.Region != other.Region { return false } - if cfg.EndpointURL != other.EndpointURL { + if cfg.Endpoint != other.Endpoint { return false } - if !maps.Equal(cfg.EndpointURLMap, other.EndpointURLMap) { + if !maps.Equal(cfg.EndpointMap, other.EndpointMap) { return false } if cfg.Bucket != other.Bucket { @@ -87,7 +87,7 @@ func (cfg *Config) IsSameStorage(other *Config) bool { } func (cfg *Config) Cast() error { - if cfg.EndpointURL == "" { + if cfg.Endpoint == "" { return errors.New("endpointURL cannot be empty") } @@ -112,8 +112,8 @@ func (cfg *Config) Cast() error { // EndpointURL or associated EndpointURLMap configuration fields. // If specified EndpointURLMap overrides EndpointURL field. func (cfg *Config) resolveEndpointURL(node string) string { - ep := cfg.EndpointURL - if epm, ok := cfg.EndpointURLMap[node]; ok { + ep := cfg.Endpoint + if epm, ok := cfg.EndpointMap[node]; ok { ep = epm } return ep diff --git a/pbm/storage/mio/config_test.go b/pbm/storage/mio/config_test.go index ce60168c8..e3215e289 100644 --- a/pbm/storage/mio/config_test.go +++ b/pbm/storage/mio/config_test.go @@ -10,11 +10,11 @@ import ( func TestClone(t *testing.T) { f := 1.1 c1 := &Config{ - Region: "eu", - EndpointURL: "ep.com", - EndpointURLMap: map[string]string{"n1": "ep1", "n2": "ep2"}, - Bucket: "b1", - Prefix: "p1", + Region: "eu", + Endpoint: "ep.com", + EndpointMap: map[string]string{"n1": "ep1", "n2": "ep2"}, + Bucket: "b1", + Prefix: "p1", Credentials: Credentials{ AccessKeyID: "k1", SecretAccessKey: "k2", @@ -30,7 +30,7 @@ func TestClone(t *testing.T) { c2 := c1.Clone() - if &c1.EndpointURLMap == &c2.EndpointURLMap || + if &c1.EndpointMap == &c2.EndpointMap || c1.MaxObjSizeGB == c2.MaxObjSizeGB || c1.Retryer == c2.Retryer { t.Fatal("Deep copy of pointer fields is missing") @@ -43,11 +43,11 @@ func TestClone(t *testing.T) { func TestEqual(t *testing.T) { f := 1.1 c1 := &Config{ - Region: "eu", - EndpointURL: "ep.com", - EndpointURLMap: map[string]string{"n1": "ep1", "n2": "ep2"}, - Bucket: "b1", - Prefix: "p1", + Region: "eu", + Endpoint: "ep.com", + EndpointMap: map[string]string{"n1": "ep1", "n2": "ep2"}, + Bucket: "b1", + Prefix: "p1", Credentials: Credentials{ AccessKeyID: "k1", SecretAccessKey: "k2", @@ -75,14 +75,14 @@ func TestCast(t *testing.T) { t.Fatal("want error when EndpointURL is not specified") } - c.EndpointURL = "url" + c.Endpoint = "url" err := c.Cast() if err != nil { t.Fatalf("got error during Cast: %v", err) } want := &Config{ - EndpointURL: "url", - PartSize: defaultPartSize, + Endpoint: "url", + PartSize: defaultPartSize, Retryer: &Retryer{ NumMaxRetries: defaultMaxRetries, }, diff --git a/pbm/storage/mio/minio.go b/pbm/storage/mio/minio.go index 22c1433a6..4466be547 100644 --- a/pbm/storage/mio/minio.go +++ b/pbm/storage/mio/minio.go @@ -23,7 +23,7 @@ const ( defaultPartSize int64 = 10 * 1024 * 1024 // 10Mb minPartSize int64 = 1024 * 1024 * 5 // 5Mb - // minio allows 50TiB, sensibile default is aligned with S3 + // minio allows 50TiB, sensible default is aligned with S3 defaultMaxObjSizeGB = 5018 // 4.9 TB defaultMaxRetries = 10 diff --git a/pbm/storage/mio/minio_test.go b/pbm/storage/mio/minio_test.go index 120920d50..cf83964d4 100644 --- a/pbm/storage/mio/minio_test.go +++ b/pbm/storage/mio/minio_test.go @@ -38,17 +38,17 @@ func TestMinio(t *testing.T) { epMinio := u.Host cfg := &Config{ - EndpointURL: epMinio, - Bucket: bucketName, - Prefix: "p1", - Secure: false, + Endpoint: epMinio, + Bucket: bucketName, + Prefix: "p1", + Secure: false, Credentials: Credentials{ SigVer: "V4", AccessKeyID: "minioadmin", SecretAccessKey: "minioadmin", }, } - minioCl, err := minio.New(cfg.EndpointURL, &minio.Options{ + minioCl, err := minio.New(cfg.Endpoint, &minio.Options{ Creds: credentials.NewStaticV4(cfg.Credentials.AccessKeyID, cfg.Credentials.SecretAccessKey, ""), Secure: false, }) @@ -72,7 +72,7 @@ func TestMinio(t *testing.T) { t.Run("with downloader", func(t *testing.T) { stg, err := NewWithDownloader(cfg, "node", nil, 0, 0, 0) if err != nil { - t.Fatalf("failed to create s3 storage: %s", err) + t.Fatalf("failed to create minio storage: %s", err) } storage.RunStorageBaseTests(t, stg, storage.Minio) From 36dee1072bcb641add247f8ca5e2f633fa4dd4ea Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 3 Oct 2025 11:14:11 +0200 Subject: [PATCH 47/95] Add forcePathStyle config option for Minio storage --- pbm/storage/mio/config.go | 7 ++++++- pbm/storage/mio/minio.go | 20 +++++++++++++++----- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/pbm/storage/mio/config.go b/pbm/storage/mio/config.go index 465b360e4..716c4c8ed 100644 --- a/pbm/storage/mio/config.go +++ b/pbm/storage/mio/config.go @@ -24,7 +24,8 @@ type Config struct { // InsecureSkipTLSVerify disables client verification of the server's // certificate chain and host name - InsecureSkipTLSVerify bool `bson:"insecureSkipTLSVerify" json:"insecureSkipTLSVerify" yaml:"insecureSkipTLSVerify"` + InsecureSkipTLSVerify bool `bson:"insecureSkipTLSVerify" json:"insecureSkipTLSVerify" yaml:"insecureSkipTLSVerify"` + ForcePathStyle *bool `bson:"forcePathStyle,omitempty" json:"forcePathStyle,omitempty" yaml:"forcePathStyle,omitempty"` } type Credentials struct { @@ -50,6 +51,10 @@ func (cfg *Config) Clone() *Config { v := *cfg.MaxObjSizeGB c.MaxObjSizeGB = &v } + if cfg.ForcePathStyle != nil { + v := *cfg.ForcePathStyle + c.ForcePathStyle = &v + } if cfg.Retryer != nil { v := *cfg.Retryer c.Retryer = &v diff --git a/pbm/storage/mio/minio.go b/pbm/storage/mio/minio.go index 4466be547..1d592aa23 100644 --- a/pbm/storage/mio/minio.go +++ b/pbm/storage/mio/minio.go @@ -118,12 +118,22 @@ func newMinio(cfg *Config, n string, l log.LogEvent) (*Minio, error) { transport = tr } + bucketLookup := minio.BucketLookupAuto + if cfg.ForcePathStyle != nil { + if *cfg.ForcePathStyle { + bucketLookup = minio.BucketLookupPath + } else { + bucketLookup = minio.BucketLookupDNS + } + } + cl, err := minio.New(cfg.resolveEndpointURL(n), &minio.Options{ - Creds: creds, - Secure: cfg.Secure, - Region: cfg.Region, - MaxRetries: cfg.Retryer.NumMaxRetries, - Transport: transport, + Creds: creds, + Secure: cfg.Secure, + Region: cfg.Region, + MaxRetries: cfg.Retryer.NumMaxRetries, + Transport: transport, + BucketLookup: bucketLookup, }) if err != nil { return nil, errors.Wrap(err, "minio session") From 4df0ee6b8aec0eafa3f29c1b238eb184f8f6016b Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 3 Oct 2025 11:21:15 +0200 Subject: [PATCH 48/95] Make gcsEndpointURL private --- pbm/storage/gcs/gcs.go | 2 +- pbm/storage/gcs/hmac_client.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pbm/storage/gcs/gcs.go b/pbm/storage/gcs/gcs.go index 63cc98540..69eba550a 100644 --- a/pbm/storage/gcs/gcs.go +++ b/pbm/storage/gcs/gcs.go @@ -13,7 +13,7 @@ import ( ) const ( - GCSEndpointURL = "storage.googleapis.com" + gcsEndpointURL = "storage.googleapis.com" defaultMaxObjSizeGB = 5018 // 4.9 TB ) diff --git a/pbm/storage/gcs/hmac_client.go b/pbm/storage/gcs/hmac_client.go index c47f69962..6d1950d98 100644 --- a/pbm/storage/gcs/hmac_client.go +++ b/pbm/storage/gcs/hmac_client.go @@ -36,7 +36,7 @@ func newHMACClient(opts *Config, l log.LogEvent) (*hmacClient, error) { } } - minioClient, err := minio.New(GCSEndpointURL, &minio.Options{ + minioClient, err := minio.New(gcsEndpointURL, &minio.Options{ Creds: credentials.NewStaticV2(opts.Credentials.HMACAccessKey, opts.Credentials.HMACSecret, ""), }) if err != nil { From dd3d3ff3cc5d3b64555d4e287fb6bc2795fbb51b Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 3 Oct 2025 11:48:35 +0200 Subject: [PATCH 49/95] Cleanup config reference doc --- packaging/conf/pbm-conf-reference.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/packaging/conf/pbm-conf-reference.yml b/packaging/conf/pbm-conf-reference.yml index 9f01a91ff..e69225716 100644 --- a/packaging/conf/pbm-conf-reference.yml +++ b/packaging/conf/pbm-conf-reference.yml @@ -91,11 +91,17 @@ ## An optional custom URL to access the bucket. Useful for S3-compatible storage (e.g. MinIO) # endpointUrl: -# add url map # ## Use https # secure: true +## Allow PBM to upload data to storage with self-issued TLS certificates. +## Use it with caution as it might leave a hole for man-in-the-middle attacks. +# insecureSkipTLSVerify: false + +## Enable path-style URL, by default it uses virtual-hosted style +# forcePathStyle: false + ## Access credentials # credentials: # access-key-id: @@ -106,9 +112,6 @@ ## The size of data chunks in bytes to be uploaded to the storage bucket in a single request. # partSize: 10485760 -## Data upload configuration -# maxUploadParts: 10,000 - ## Enable debug trace of HTTP communication # debugTrace: true From 8b174f95241ae2137a94409e57afc280a3796695 Mon Sep 17 00:00:00 2001 From: Ivan Groenewold <9805809+igroene@users.noreply.github.com> Date: Mon, 6 Oct 2025 06:46:16 -0300 Subject: [PATCH 50/95] PBM-1321 - pbm log says "waiting for the oplog" when it's actually waiting for the backup to finish (#1204) --- pbm/backup/logical.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pbm/backup/logical.go b/pbm/backup/logical.go index 7821e725d..2721f05e3 100644 --- a/pbm/backup/logical.go +++ b/pbm/backup/logical.go @@ -200,7 +200,7 @@ func (b *Backup) doLogical( return errors.Wrap(err, "generate archive meta v1") } - l.Info("dump finished, waiting for the oplog") + l.Info("dump finished for RS: %s", b.brief.SetName) err = ChangeRSState(b.leadConn, bcp.Name, rsMeta.Name, defs.StatusDumpDone, "") if err != nil { @@ -208,28 +208,35 @@ func (b *Backup) doLogical( } if inf.IsLeader() { + if b.brief.Sharded { + l.Info("checking status of dump on other nodes") + } err := b.reconcileStatus(ctx, bcp.Name, opid.String(), defs.StatusDumpDone, nil) if err != nil { return errors.Wrap(err, "check cluster for dump done") } } else { + l.Info("waiting for leader to validate dump done on all nodes") err = b.waitForStatus(ctx, bcp.Name, defs.StatusDumpDone, nil) if err != nil { return errors.Wrap(err, "waiting for dump done") } } + l.Info("stopping oplog slicer on this node") lastSavedTS, oplogSize, err := stopOplogSlicer() if err != nil { return errors.Wrap(err, "oplog") } + l.Info("setting last write timestamp on this node") err = SetRSLastWrite(b.leadConn, bcp.Name, rsMeta.Name, lastSavedTS) if err != nil { return errors.Wrap(err, "set shard's last write ts") } if inf.IsLeader() { + l.Info("setting last common write timestamp across all nodes") err = b.setClusterLastWrite(ctx, bcp.Name) if err != nil { return errors.Wrap(err, "set cluster last write ts") From f225781952aa9b8d5c6dd47e6b68ad45654fa73d Mon Sep 17 00:00:00 2001 From: Sandra Date: Mon, 6 Oct 2025 14:30:03 +0300 Subject: [PATCH 51/95] PBM_e2e_tests. Remove HMAC and add MinIO storage type with AWS bucket --- e2e-tests/cmd/pbm-test/run.go | 2 +- e2e-tests/cmd/pbm-test/run_physical.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e-tests/cmd/pbm-test/run.go b/e2e-tests/cmd/pbm-test/run.go index 57d7d4ee4..1e45159ae 100644 --- a/e2e-tests/cmd/pbm-test/run.go +++ b/e2e-tests/cmd/pbm-test/run.go @@ -26,7 +26,7 @@ func run(t *sharded.Cluster, typ testTyp) { }{ {"AWS", "/etc/pbm/aws.yaml"}, {"GCS", "/etc/pbm/gcs.yaml"}, - {"GCS_HMAC", "/etc/pbm/gcs_hmac.yaml"}, + {"AWS_MinIO", "/etc/pbm/aws_minio.yaml"}, {"Azure", "/etc/pbm/azure.yaml"}, {"FS", "/etc/pbm/fs.yaml"}, } diff --git a/e2e-tests/cmd/pbm-test/run_physical.go b/e2e-tests/cmd/pbm-test/run_physical.go index 7034b0d71..339389b96 100644 --- a/e2e-tests/cmd/pbm-test/run_physical.go +++ b/e2e-tests/cmd/pbm-test/run_physical.go @@ -17,7 +17,7 @@ func runPhysical(t *sharded.Cluster, typ testTyp) { }{ {"AWS", "/etc/pbm/aws.yaml"}, {"GCS", "/etc/pbm/gcs.yaml"}, - {"GCS_HMAC", "/etc/pbm/gcs_hmac.yaml"}, + {"AWS_MinIO", "/etc/pbm/aws_minio.yaml"}, {"Azure", "/etc/pbm/azure.yaml"}, {"FS", "/etc/pbm/fs.yaml"}, } From edb25baa5082c92c4ff351a8d60881b2c4ce659f Mon Sep 17 00:00:00 2001 From: Neha Oudin Date: Wed, 8 Oct 2025 09:46:33 +0200 Subject: [PATCH 52/95] =?UTF-8?q?fix:=20integrate=20review=20comments?= =?UTF-8?q?=C3=A9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pbm/storage/fs/fs.go | 2 +- pbm/storage/s3/s3.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pbm/storage/fs/fs.go b/pbm/storage/fs/fs.go index c6fd2da22..199c2c796 100644 --- a/pbm/storage/fs/fs.go +++ b/pbm/storage/fs/fs.go @@ -65,7 +65,7 @@ func (cfg *Config) IsSameStorage(other *Config) bool { func (cfg *Config) Cast() error { if cfg == nil { - return errors.New("Missing blackhole configuration with blackhole storage type.") + return errors.New("missing filesystem configuration with filesystem storage type.") } if cfg.Path == "" { return errors.New("path can't be empty") diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index 88b97e770..c32f307d1 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -201,7 +201,7 @@ func (cfg *Config) IsSameStorage(other *Config) bool { func (cfg *Config) Cast() error { if cfg == nil { - return errors.New("Missing S3 configuration with S3 storage type.") + return errors.New("missing S3 configuration with S3 storage type") } if cfg.Region == "" { cfg.Region = defaultS3Region From 753d11dd0c8b2974062dde733fbc27c0e5b9e11b Mon Sep 17 00:00:00 2001 From: Neha Oudin Date: Wed, 8 Oct 2025 09:47:22 +0200 Subject: [PATCH 53/95] fix: final punctuation error --- pbm/storage/fs/fs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbm/storage/fs/fs.go b/pbm/storage/fs/fs.go index 199c2c796..e24ed5a19 100644 --- a/pbm/storage/fs/fs.go +++ b/pbm/storage/fs/fs.go @@ -65,7 +65,7 @@ func (cfg *Config) IsSameStorage(other *Config) bool { func (cfg *Config) Cast() error { if cfg == nil { - return errors.New("missing filesystem configuration with filesystem storage type.") + return errors.New("missing filesystem configuration with filesystem storage type") } if cfg.Path == "" { return errors.New("path can't be empty") From 1367ad547e2b3db31a203d7144a0178f6294eb59 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 8 Oct 2025 11:58:57 +0200 Subject: [PATCH 54/95] Add guards in storage config for minio and oss --- pbm/storage/mio/config.go | 4 ++++ pbm/storage/mio/config_test.go | 9 +++++++-- pbm/storage/oss/client.go | 4 ++++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/pbm/storage/mio/config.go b/pbm/storage/mio/config.go index 716c4c8ed..7685150ae 100644 --- a/pbm/storage/mio/config.go +++ b/pbm/storage/mio/config.go @@ -92,6 +92,10 @@ func (cfg *Config) IsSameStorage(other *Config) bool { } func (cfg *Config) Cast() error { + if cfg == nil { + return errors.New("missing minio configuration with minio storage type") + } + if cfg.Endpoint == "" { return errors.New("endpointURL cannot be empty") } diff --git a/pbm/storage/mio/config_test.go b/pbm/storage/mio/config_test.go index e3215e289..327696a3e 100644 --- a/pbm/storage/mio/config_test.go +++ b/pbm/storage/mio/config_test.go @@ -69,14 +69,19 @@ func TestEqual(t *testing.T) { } func TestCast(t *testing.T) { - c := &Config{} + var c *Config + err := c.Cast() + if err == nil { + t.Fatal("sigsegv should happened instead") + } + c = &Config{} if err := c.Cast(); err == nil { t.Fatal("want error when EndpointURL is not specified") } c.Endpoint = "url" - err := c.Cast() + err = c.Cast() if err != nil { t.Fatalf("got error during Cast: %v", err) } diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index 7b897edd5..aa29ed5aa 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -2,6 +2,7 @@ package oss import ( "context" + "errors" "fmt" "time" @@ -63,6 +64,9 @@ type Credentials struct { } func (cfg *Config) Cast() error { + if cfg == nil { + return errors.New("missing oss configuration with oss storage type") + } if cfg.Region == "" { cfg.Region = defaultOSSRegion } From c3a35a046cc8f8fb905c8739e01289e2a53f3440 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 8 Oct 2025 12:05:19 +0200 Subject: [PATCH 55/95] Update pbm/storage/mio/config_test.go Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- pbm/storage/mio/config_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbm/storage/mio/config_test.go b/pbm/storage/mio/config_test.go index 327696a3e..b023ef98d 100644 --- a/pbm/storage/mio/config_test.go +++ b/pbm/storage/mio/config_test.go @@ -72,7 +72,7 @@ func TestCast(t *testing.T) { var c *Config err := c.Cast() if err == nil { - t.Fatal("sigsegv should happened instead") + t.Fatal("sigsegv should have happened instead") } c = &Config{} From 68bf4338ee064a5b3d9e390293299c6c0a001494 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 8 Oct 2025 15:53:15 +0200 Subject: [PATCH 56/95] Add CRC check when uploading to GCS with HMAC When uploading a file during a network interruption, it's possible to have currupted content in the final upload. That can be improved with additional check by calculating CRC and comparing it with GCS one. --- pbm/storage/gcs/hmac_client.go | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/pbm/storage/gcs/hmac_client.go b/pbm/storage/gcs/hmac_client.go index 6d1950d98..cfe00d1fd 100644 --- a/pbm/storage/gcs/hmac_client.go +++ b/pbm/storage/gcs/hmac_client.go @@ -2,6 +2,9 @@ package gcs import ( "context" + "encoding/base64" + "encoding/binary" + "hash/crc32" "io" "path" "runtime" @@ -73,22 +76,30 @@ func (h hmacClient) save(name string, data io.Reader, options ...storage.Option) partSize, storage.PrettySize(partSize)) } + crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + dataWithCRC := io.TeeReader(data, crc) + putOpts := minio.PutObjectOptions{ PartSize: uint64(partSize), NumThreads: uint(max(runtime.NumCPU()/2, 1)), } - - _, err := h.client.PutObject( + putInfo, err := h.client.PutObject( context.Background(), h.opts.Bucket, path.Join(h.opts.Prefix, name), - data, + dataWithCRC, -1, putOpts, ) if err != nil { return errors.Wrap(err, "PutObject") } + + localCRC := crcToBase64(crc.Sum32()) + if putInfo.ChecksumCRC32C != localCRC { + return errors.Errorf("wrong CRC after uploading %s", name) + } + return nil } @@ -215,3 +226,9 @@ func (h hmacClient) getPartialObject(name string, buf *storage.Arena, start, len return ch, nil } + +func crcToBase64(v uint32) string { + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, v) + return base64.StdEncoding.EncodeToString(buf) +} From 3161a312725e0fc8f652d77c86b8ecbbd586be0b Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 9 Oct 2025 09:44:57 +0200 Subject: [PATCH 57/95] Add tests for provoke GCS upload inconsistent issue ... when doing upload over unstable network --- pbm/storage/mio/minio_test.go | 175 ++++++++++++++++++++++++++++++++++ 1 file changed, 175 insertions(+) diff --git a/pbm/storage/mio/minio_test.go b/pbm/storage/mio/minio_test.go index cf83964d4..c18bbfa49 100644 --- a/pbm/storage/mio/minio_test.go +++ b/pbm/storage/mio/minio_test.go @@ -2,8 +2,12 @@ package mio import ( "context" + "io" "net/url" + "path" + "runtime" "testing" + "time" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" @@ -80,3 +84,174 @@ func TestMinio(t *testing.T) { storage.RunSplitMergeMWTests(t, stg) }) } + +// TestUploadGCS shows how it's possible to upload corrupted file +// without getting any error from the minio library. +// +// To simulate network interruption use: +// tc qdisc add dev eth0 root netem loss 100% +// +// To revert it to normal use: +// tc qdisc del dev eth0 root netem +func TestUploadGCS(t *testing.T) { + t.Skip("for manual invocation, it will be deleted after GCS HMAC is deprecated") + + ep := "storage.googleapis.com" + bucket := "gcs-bucket" + prefix := "test-prefix" + accessKeyID := "key-id" + secretAccessKey := "secret-key" + + fname := time.Now().Format("2006-01-02T15:04:05") + + mc, err := minio.New(ep, &minio.Options{ + Creds: credentials.NewStaticV2(accessKeyID, secretAccessKey, ""), + Secure: true, + }) + if err != nil { + t.Fatalf("minio client creation for GCS: %v", err) + } + t.Log("minio client created") + + t.Logf("uploading file: %s", fname) + + infR := NewInfiniteCustomReader() + r := io.LimitReader(infR, targetSizeBytes) + + putOpts := minio.PutObjectOptions{ + PartSize: uint64(defaultPartSize), + NumThreads: uint(max(runtime.NumCPU()/2, 1)), + } + info, err := mc.PutObject( + context.Background(), + bucket, + path.Join(prefix, fname), + r, + -1, + putOpts, + ) + if err != nil { + t.Fatalf("put object: %v", err) + } + + t.Logf("upload info: %#v", info) +} + +func TestUploadAWSSigV2(t *testing.T) { + t.Skip("for manual invocation, it will be deleted after GCS HMAC is deprecated") + + ep := "s3.amazonaws.com" + region := "eu-central-1" + bucket := "aws-bucket" + prefix := "test-prefix" + accessKeyID := "key-id" + secretAccessKey := "secret-key" + + fname := time.Now().Format("2006-01-02T15:04:05") + + mc, err := minio.New(ep, &minio.Options{ + Region: region, + Creds: credentials.NewStaticV2(accessKeyID, secretAccessKey, ""), + Secure: true, + }) + if err != nil { + t.Fatalf("minio client creation for aws: %v", err) + } + t.Log("minio client created for aws with sigV2") + + t.Logf("uploading file: %s", fname) + + infR := NewInfiniteCustomReader() + r := io.LimitReader(infR, targetSizeBytes) + + putOpts := minio.PutObjectOptions{ + PartSize: uint64(defaultPartSize), + NumThreads: uint(max(runtime.NumCPU()/2, 1)), + } + info, err := mc.PutObject( + context.Background(), + bucket, + path.Join(prefix, fname), + r, + -1, + putOpts, + ) + if err != nil { + t.Fatalf("put object: %v", err) + } + + t.Logf("upload info: %#v", info) +} + +func TestUploadAWSSigV4(t *testing.T) { + t.Skip("for manual invocation, it will be deleted after GCS HMAC is deprecated") + + ep := "s3.amazonaws.com" + region := "eu-central-1" + bucket := "aws-bucket" + prefix := "test-prefix" + accessKeyID := "key-id" + secretAccessKey := "secret-key" + + fname := time.Now().Format("2006-01-02T15:04:05") + + mc, err := minio.New(ep, &minio.Options{ + Region: region, + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: true, + }) + if err != nil { + t.Fatalf("minio client creation for aws: %v", err) + } + t.Log("minio client created for aws with sigV4") + + t.Logf("uploading file: %s ....", fname) + + infR := NewInfiniteCustomReader() + r := io.LimitReader(infR, targetSizeBytes) + + putOpts := minio.PutObjectOptions{ + PartSize: uint64(defaultPartSize), + NumThreads: uint(max(runtime.NumCPU()/2, 1)), + } + info, err := mc.PutObject( + context.Background(), + bucket, + path.Join(prefix, fname), + r, + -1, + putOpts, + ) + if err != nil { + t.Fatalf("put object: %v", err) + } + + t.Logf("upload info: %#v", info) +} + +const targetSizeBytes = 1000 * 1024 * 1024 + +type InfiniteCustomReader struct { + pattern []byte + patternIndex int +} + +func NewInfiniteCustomReader() *InfiniteCustomReader { + pattern := []byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22} + + return &InfiniteCustomReader{ + pattern: pattern, + patternIndex: 0, + } +} + +func (r *InfiniteCustomReader) Read(p []byte) (int, error) { + readLen := len(p) + + for i := range readLen { + p[i] = r.pattern[r.patternIndex] + r.patternIndex = (r.patternIndex + 1) % len(r.pattern) + } + + return readLen, nil +} From 3e10ed2a574e04bdbca62ada3bc3c261cda5374a Mon Sep 17 00:00:00 2001 From: Sandra Date: Thu, 9 Oct 2025 12:03:22 +0300 Subject: [PATCH 58/95] PBM_e2e_tests. Return HMAC for tests --- e2e-tests/cmd/pbm-test/run.go | 1 + e2e-tests/cmd/pbm-test/run_physical.go | 1 + 2 files changed, 2 insertions(+) diff --git a/e2e-tests/cmd/pbm-test/run.go b/e2e-tests/cmd/pbm-test/run.go index 1e45159ae..36013b456 100644 --- a/e2e-tests/cmd/pbm-test/run.go +++ b/e2e-tests/cmd/pbm-test/run.go @@ -26,6 +26,7 @@ func run(t *sharded.Cluster, typ testTyp) { }{ {"AWS", "/etc/pbm/aws.yaml"}, {"GCS", "/etc/pbm/gcs.yaml"}, + {"GCS_HMAC", "/etc/pbm/gcs_hmac.yaml"}, {"AWS_MinIO", "/etc/pbm/aws_minio.yaml"}, {"Azure", "/etc/pbm/azure.yaml"}, {"FS", "/etc/pbm/fs.yaml"}, diff --git a/e2e-tests/cmd/pbm-test/run_physical.go b/e2e-tests/cmd/pbm-test/run_physical.go index 339389b96..b89a7d820 100644 --- a/e2e-tests/cmd/pbm-test/run_physical.go +++ b/e2e-tests/cmd/pbm-test/run_physical.go @@ -17,6 +17,7 @@ func runPhysical(t *sharded.Cluster, typ testTyp) { }{ {"AWS", "/etc/pbm/aws.yaml"}, {"GCS", "/etc/pbm/gcs.yaml"}, + {"GCS_HMAC", "/etc/pbm/gcs_hmac.yaml"}, {"AWS_MinIO", "/etc/pbm/aws_minio.yaml"}, {"Azure", "/etc/pbm/azure.yaml"}, {"FS", "/etc/pbm/fs.yaml"}, From e5441de5f61a11a223c044c97b08872174c8e9de Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 9 Oct 2025 12:14:13 +0200 Subject: [PATCH 59/95] Log CRC in case of invalid upload for GCS HMAC --- pbm/storage/gcs/hmac_client.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pbm/storage/gcs/hmac_client.go b/pbm/storage/gcs/hmac_client.go index cfe00d1fd..fae5c1ce6 100644 --- a/pbm/storage/gcs/hmac_client.go +++ b/pbm/storage/gcs/hmac_client.go @@ -97,7 +97,8 @@ func (h hmacClient) save(name string, data io.Reader, options ...storage.Option) localCRC := crcToBase64(crc.Sum32()) if putInfo.ChecksumCRC32C != localCRC { - return errors.Errorf("wrong CRC after uploading %s", name) + return errors.Errorf("wrong CRC after uploading %s, GCS: %s, PBM: %s", + name, putInfo.ChecksumCRC32C, localCRC) } return nil From 1d6daf80ebf89c5a077a1c2563adf512191cabb8 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 10 Oct 2025 17:02:07 +0200 Subject: [PATCH 60/95] Cleanup existing Azure logic Remove unused constant: defaultUploadMaxBuff. --- pbm/storage/azure/azure.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pbm/storage/azure/azure.go b/pbm/storage/azure/azure.go index b7fac735e..d4314d45d 100644 --- a/pbm/storage/azure/azure.go +++ b/pbm/storage/azure/azure.go @@ -25,8 +25,7 @@ import ( const ( BlobURL = "https://%s.blob.core.windows.net" - defaultUploadBuff = 10 << 20 // 10Mb - defaultUploadMaxBuff = 5 + defaultUploadBuff = 10 << 20 // 10Mb defaultRetries = 10 @@ -180,10 +179,7 @@ func (b *Blob) Save(name string, data io.Reader, options ...storage.Option) erro } } - cc := runtime.NumCPU() / 2 - if cc == 0 { - cc = 1 - } + cc := max(runtime.NumCPU()/2, 1) if b.log != nil && opts.UseLogger { b.log.Debug("BufferSize is set to %d (~%dMb) | %d", bufsz, bufsz>>20, opts.Size) From a04f9bb42e8280b7493ff3688277a89a355972ff Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 13 Oct 2025 10:53:46 +0200 Subject: [PATCH 61/95] Remove creation logic for non-existing container ... when dealing with Azure storage. The other cloud providers (AWS, GCS) don't have that logic. It's the prerequisite that a container/bucket is created before PBM does the storage access. --- pbm/storage/azure/azure.go | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/pbm/storage/azure/azure.go b/pbm/storage/azure/azure.go index d4314d45d..235f16ab7 100644 --- a/pbm/storage/azure/azure.go +++ b/pbm/storage/azure/azure.go @@ -156,7 +156,7 @@ func New(opts *Config, node string, l log.LogEvent) (storage.Storage, error) { return nil, errors.Wrap(err, "init container") } - return storage.NewSplitMergeMW(b, opts.GetMaxObjSizeGB()), b.ensureContainer() + return storage.NewSplitMergeMW(b, opts.GetMaxObjSizeGB()), nil } func (*Blob) Type() storage.Type { @@ -342,22 +342,6 @@ func (b *Blob) Delete(name string) error { return nil } -func (b *Blob) ensureContainer() error { - _, err := b.c.ServiceClient().NewContainerClient(b.opts.Container).GetProperties(context.TODO(), nil) - // container already exists - if err == nil { - return nil - } - - var stgErr *azcore.ResponseError - if errors.As(err, &stgErr) && stgErr.StatusCode != http.StatusNotFound { - return errors.Wrap(err, "check container") - } - - _, err = b.c.CreateContainer(context.TODO(), b.opts.Container, nil) - return err -} - func (b *Blob) client() (*azblob.Client, error) { cred, err := azblob.NewSharedKeyCredential(b.opts.Account, b.opts.Credentials.Key) if err != nil { From 18eb1ae7538783da2f274f112fb168397328d598 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 13 Oct 2025 16:39:37 +0200 Subject: [PATCH 62/95] Extract Azure config --- pbm/storage/azure/azure.go | 132 ++++-------------------------------- pbm/storage/azure/config.go | 105 ++++++++++++++++++++++++++++ 2 files changed, 120 insertions(+), 117 deletions(-) create mode 100644 pbm/storage/azure/config.go diff --git a/pbm/storage/azure/azure.go b/pbm/storage/azure/azure.go index 235f16ab7..9044c6dd6 100644 --- a/pbm/storage/azure/azure.go +++ b/pbm/storage/azure/azure.go @@ -2,12 +2,9 @@ package azure import ( "context" - "fmt" "io" - "maps" "net/http" "path" - "reflect" "runtime" "strings" "time" @@ -34,110 +31,11 @@ const ( defaultMaxObjSizeGB = 194560 // 190 TB ) -//nolint:lll -type Config struct { - Account string `bson:"account" json:"account,omitempty" yaml:"account,omitempty"` - Container string `bson:"container" json:"container,omitempty" yaml:"container,omitempty"` - EndpointURL string `bson:"endpointUrl" json:"endpointUrl,omitempty" yaml:"endpointUrl,omitempty"` - EndpointURLMap map[string]string `bson:"endpointUrlMap,omitempty" json:"endpointUrlMap,omitempty" yaml:"endpointUrlMap,omitempty"` - Prefix string `bson:"prefix" json:"prefix,omitempty" yaml:"prefix,omitempty"` - Credentials Credentials `bson:"credentials" json:"-" yaml:"credentials"` - MaxObjSizeGB *float64 `bson:"maxObjSizeGB,omitempty" json:"maxObjSizeGB,omitempty" yaml:"maxObjSizeGB,omitempty"` -} - -func (cfg *Config) Clone() *Config { - if cfg == nil { - return nil - } - - rv := *cfg - rv.EndpointURLMap = maps.Clone(cfg.EndpointURLMap) - if cfg.MaxObjSizeGB != nil { - v := *cfg.MaxObjSizeGB - rv.MaxObjSizeGB = &v - } - return &rv -} - -func (cfg *Config) Equal(other *Config) bool { - if cfg == nil || other == nil { - return cfg == other - } - - if cfg.Account != other.Account { - return false - } - if cfg.Container != other.Container { - return false - } - if cfg.EndpointURL != other.EndpointURL { - return false - } - if !maps.Equal(cfg.EndpointURLMap, other.EndpointURLMap) { - return false - } - if cfg.Prefix != other.Prefix { - return false - } - if cfg.Credentials.Key != other.Credentials.Key { - return false - } - if !reflect.DeepEqual(cfg.MaxObjSizeGB, other.MaxObjSizeGB) { - return false - } - - return true -} - -// IsSameStorage identifies the same instance of the Azure storage. -func (cfg *Config) IsSameStorage(other *Config) bool { - if cfg == nil || other == nil { - return cfg == other - } - - if cfg.Account != other.Account { - return false - } - if cfg.Container != other.Container { - return false - } - if cfg.Prefix != other.Prefix { - return false - } - return true -} - -// resolveEndpointURL returns endpoint url based on provided -// EndpointURL or associated EndpointURLMap configuration fields. -// If specified EndpointURLMap overrides EndpointURL field. -func (cfg *Config) resolveEndpointURL(node string) string { - ep := cfg.EndpointURL - if epm, ok := cfg.EndpointURLMap[node]; ok { - ep = epm - } - if ep == "" { - ep = fmt.Sprintf(BlobURL, cfg.Account) - } - return ep -} - -func (cfg *Config) GetMaxObjSizeGB() float64 { - if cfg.MaxObjSizeGB != nil && *cfg.MaxObjSizeGB > 0 { - return *cfg.MaxObjSizeGB - } - return defaultMaxObjSizeGB -} - -type Credentials struct { - Key string `bson:"key" json:"key,omitempty" yaml:"key,omitempty"` -} - type Blob struct { opts *Config node string log log.LogEvent - // url *url.URL - c *azblob.Client + c *azblob.Client } func New(opts *Config, node string, l log.LogEvent) (storage.Storage, error) { @@ -159,6 +57,20 @@ func New(opts *Config, node string, l log.LogEvent) (storage.Storage, error) { return storage.NewSplitMergeMW(b, opts.GetMaxObjSizeGB()), nil } +func (b *Blob) client() (*azblob.Client, error) { + cred, err := azblob.NewSharedKeyCredential(b.opts.Account, b.opts.Credentials.Key) + if err != nil { + return nil, errors.Wrap(err, "create credentials") + } + + opts := &azblob.ClientOptions{} + opts.Retry = policy.RetryOptions{ + MaxRetries: defaultRetries, + } + epURL := b.opts.resolveEndpointURL(b.node) + return azblob.NewClientWithSharedKeyCredential(epURL, cred, opts) +} + func (*Blob) Type() storage.Type { return storage.Azure } @@ -342,20 +254,6 @@ func (b *Blob) Delete(name string) error { return nil } -func (b *Blob) client() (*azblob.Client, error) { - cred, err := azblob.NewSharedKeyCredential(b.opts.Account, b.opts.Credentials.Key) - if err != nil { - return nil, errors.Wrap(err, "create credentials") - } - - opts := &azblob.ClientOptions{} - opts.Retry = policy.RetryOptions{ - MaxRetries: defaultRetries, - } - epURL := b.opts.resolveEndpointURL(b.node) - return azblob.NewClientWithSharedKeyCredential(epURL, cred, opts) -} - func isNotFound(err error) bool { var stgErr *azcore.ResponseError if errors.As(err, &stgErr) { diff --git a/pbm/storage/azure/config.go b/pbm/storage/azure/config.go new file mode 100644 index 000000000..a6cbc90a0 --- /dev/null +++ b/pbm/storage/azure/config.go @@ -0,0 +1,105 @@ +package azure + +import ( + "fmt" + "maps" + "reflect" +) + +//nolint:lll +type Config struct { + Account string `bson:"account" json:"account,omitempty" yaml:"account,omitempty"` + Container string `bson:"container" json:"container,omitempty" yaml:"container,omitempty"` + EndpointURL string `bson:"endpointUrl" json:"endpointUrl,omitempty" yaml:"endpointUrl,omitempty"` + EndpointURLMap map[string]string `bson:"endpointUrlMap,omitempty" json:"endpointUrlMap,omitempty" yaml:"endpointUrlMap,omitempty"` + Prefix string `bson:"prefix" json:"prefix,omitempty" yaml:"prefix,omitempty"` + Credentials Credentials `bson:"credentials" json:"-" yaml:"credentials"` + MaxObjSizeGB *float64 `bson:"maxObjSizeGB,omitempty" json:"maxObjSizeGB,omitempty" yaml:"maxObjSizeGB,omitempty"` +} + +type Credentials struct { + Key string `bson:"key" json:"key,omitempty" yaml:"key,omitempty"` +} + +func (cfg *Config) Clone() *Config { + if cfg == nil { + return nil + } + + rv := *cfg + rv.EndpointURLMap = maps.Clone(cfg.EndpointURLMap) + if cfg.MaxObjSizeGB != nil { + v := *cfg.MaxObjSizeGB + rv.MaxObjSizeGB = &v + } + return &rv +} + +func (cfg *Config) Equal(other *Config) bool { + if cfg == nil || other == nil { + return cfg == other + } + + if cfg.Account != other.Account { + return false + } + if cfg.Container != other.Container { + return false + } + if cfg.EndpointURL != other.EndpointURL { + return false + } + if !maps.Equal(cfg.EndpointURLMap, other.EndpointURLMap) { + return false + } + if cfg.Prefix != other.Prefix { + return false + } + if cfg.Credentials.Key != other.Credentials.Key { + return false + } + if !reflect.DeepEqual(cfg.MaxObjSizeGB, other.MaxObjSizeGB) { + return false + } + + return true +} + +// IsSameStorage identifies the same instance of the Azure storage. +func (cfg *Config) IsSameStorage(other *Config) bool { + if cfg == nil || other == nil { + return cfg == other + } + + if cfg.Account != other.Account { + return false + } + if cfg.Container != other.Container { + return false + } + if cfg.Prefix != other.Prefix { + return false + } + return true +} + +// resolveEndpointURL returns endpoint url based on provided +// EndpointURL or associated EndpointURLMap configuration fields. +// If specified EndpointURLMap overrides EndpointURL field. +func (cfg *Config) resolveEndpointURL(node string) string { + ep := cfg.EndpointURL + if epm, ok := cfg.EndpointURLMap[node]; ok { + ep = epm + } + if ep == "" { + ep = fmt.Sprintf(BlobURL, cfg.Account) + } + return ep +} + +func (cfg *Config) GetMaxObjSizeGB() float64 { + if cfg.MaxObjSizeGB != nil && *cfg.MaxObjSizeGB > 0 { + return *cfg.MaxObjSizeGB + } + return defaultMaxObjSizeGB +} From d595884ddb00c1cf8c905663749558e87a09b6ae Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 13 Oct 2025 19:22:36 +0200 Subject: [PATCH 63/95] Add Retryer for Azure storage Following config parameters are available: - numMaxRetries - default: 3 - minRetryDelay - default: 800ms - maxRetryDelay - default: 60s --- pbm/config/config.go | 4 ++-- pbm/storage/azure/azure.go | 44 +++++++++++++++++++++---------------- pbm/storage/azure/config.go | 32 +++++++++++++++++++++++++++ 3 files changed, 59 insertions(+), 21 deletions(-) diff --git a/pbm/config/config.go b/pbm/config/config.go index f4b684515..7ad45d694 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -343,10 +343,10 @@ func (s *StorageConf) Cast() error { return s.Minio.Cast() case storage.OSS: return s.OSS.Cast() + case storage.Azure: + return s.Azure.Cast() case storage.GCS: return nil - case storage.Azure: // noop - return nil case storage.Blackhole: // noop return nil } diff --git a/pbm/storage/azure/azure.go b/pbm/storage/azure/azure.go index 9044c6dd6..bd2deec16 100644 --- a/pbm/storage/azure/azure.go +++ b/pbm/storage/azure/azure.go @@ -24,7 +24,8 @@ const ( defaultUploadBuff = 10 << 20 // 10Mb - defaultRetries = 10 + defaultMaxRetries = 3 + defaultMaxRetryDelay = 60 * time.Second maxBlocks = 50_000 @@ -32,42 +33,47 @@ const ( ) type Blob struct { - opts *Config + cfg *Config node string log log.LogEvent c *azblob.Client } -func New(opts *Config, node string, l log.LogEvent) (storage.Storage, error) { +func New(cfg *Config, node string, l log.LogEvent) (storage.Storage, error) { + err := cfg.Cast() + if err != nil { + return nil, errors.Wrap(err, "set defaults") + } if l == nil { l = log.DiscardEvent } b := &Blob{ - opts: opts, + cfg: cfg, node: node, log: l, } - var err error b.c, err = b.client() if err != nil { return nil, errors.Wrap(err, "init container") } - return storage.NewSplitMergeMW(b, opts.GetMaxObjSizeGB()), nil + return storage.NewSplitMergeMW(b, cfg.GetMaxObjSizeGB()), nil } func (b *Blob) client() (*azblob.Client, error) { - cred, err := azblob.NewSharedKeyCredential(b.opts.Account, b.opts.Credentials.Key) + cred, err := azblob.NewSharedKeyCredential(b.cfg.Account, b.cfg.Credentials.Key) if err != nil { return nil, errors.Wrap(err, "create credentials") } opts := &azblob.ClientOptions{} opts.Retry = policy.RetryOptions{ - MaxRetries: defaultRetries, + MaxRetries: b.cfg.Retryer.NumMaxRetries, + RetryDelay: b.cfg.Retryer.MinRetryDelay, + MaxRetryDelay: b.cfg.Retryer.MaxRetryDelay, } - epURL := b.opts.resolveEndpointURL(b.node) + epURL := b.cfg.resolveEndpointURL(b.node) return azblob.NewClientWithSharedKeyCredential(epURL, cred, opts) } @@ -98,8 +104,8 @@ func (b *Blob) Save(name string, data io.Reader, options ...storage.Option) erro } _, err := b.c.UploadStream(context.TODO(), - b.opts.Container, - path.Join(b.opts.Prefix, name), + b.cfg.Container, + path.Join(b.cfg.Prefix, name), data, &azblob.UploadStreamOptions{ BlockSize: int64(bufsz), @@ -110,13 +116,13 @@ func (b *Blob) Save(name string, data io.Reader, options ...storage.Option) erro } func (b *Blob) List(prefix, suffix string) ([]storage.FileInfo, error) { - prfx := path.Join(b.opts.Prefix, prefix) + prfx := path.Join(b.cfg.Prefix, prefix) if prfx != "" && !strings.HasSuffix(prfx, "/") { prfx += "/" } - pager := b.c.NewListBlobsFlatPager(b.opts.Container, &azblob.ListBlobsFlatOptions{ + pager := b.c.NewListBlobsFlatPager(b.cfg.Container, &azblob.ListBlobsFlatOptions{ Prefix: &prfx, }) @@ -160,8 +166,8 @@ func (b *Blob) FileStat(name string) (storage.FileInfo, error) { inf := storage.FileInfo{} p, err := b.c.ServiceClient(). - NewContainerClient(b.opts.Container). - NewBlockBlobClient(path.Join(b.opts.Prefix, name)). + NewContainerClient(b.cfg.Container). + NewBlockBlobClient(path.Join(b.cfg.Prefix, name)). GetProperties(context.TODO(), nil) if err != nil { if isNotFound(err) { @@ -183,8 +189,8 @@ func (b *Blob) FileStat(name string) (storage.FileInfo, error) { } func (b *Blob) Copy(src, dst string) error { - to := b.c.ServiceClient().NewContainerClient(b.opts.Container).NewBlockBlobClient(path.Join(b.opts.Prefix, dst)) - from := b.c.ServiceClient().NewContainerClient(b.opts.Container).NewBlockBlobClient(path.Join(b.opts.Prefix, src)) + to := b.c.ServiceClient().NewContainerClient(b.cfg.Container).NewBlockBlobClient(path.Join(b.cfg.Prefix, dst)) + from := b.c.ServiceClient().NewContainerClient(b.cfg.Container).NewBlockBlobClient(path.Join(b.cfg.Prefix, src)) r, err := to.StartCopyFromURL(context.TODO(), from.BlobClient().URL(), nil) if err != nil { return errors.Wrap(err, "start copy") @@ -224,7 +230,7 @@ func (b *Blob) DownloadStat() storage.DownloadStat { } func (b *Blob) SourceReader(name string) (io.ReadCloser, error) { - o, err := b.c.DownloadStream(context.TODO(), b.opts.Container, path.Join(b.opts.Prefix, name), nil) + o, err := b.c.DownloadStream(context.TODO(), b.cfg.Container, path.Join(b.cfg.Prefix, name), nil) if err != nil { if isNotFound(err) { return nil, storage.ErrNotExist @@ -243,7 +249,7 @@ func (b *Blob) SourceReader(name string) (io.ReadCloser, error) { } func (b *Blob) Delete(name string) error { - _, err := b.c.DeleteBlob(context.TODO(), b.opts.Container, path.Join(b.opts.Prefix, name), nil) + _, err := b.c.DeleteBlob(context.TODO(), b.cfg.Container, path.Join(b.cfg.Prefix, name), nil) if err != nil { if isNotFound(err) { return storage.ErrNotExist diff --git a/pbm/storage/azure/config.go b/pbm/storage/azure/config.go index a6cbc90a0..1fb2599b8 100644 --- a/pbm/storage/azure/config.go +++ b/pbm/storage/azure/config.go @@ -4,6 +4,9 @@ import ( "fmt" "maps" "reflect" + "time" + + "github.com/percona/percona-backup-mongodb/pbm/errors" ) //nolint:lll @@ -14,6 +17,7 @@ type Config struct { EndpointURLMap map[string]string `bson:"endpointUrlMap,omitempty" json:"endpointUrlMap,omitempty" yaml:"endpointUrlMap,omitempty"` Prefix string `bson:"prefix" json:"prefix,omitempty" yaml:"prefix,omitempty"` Credentials Credentials `bson:"credentials" json:"-" yaml:"credentials"` + Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` MaxObjSizeGB *float64 `bson:"maxObjSizeGB,omitempty" json:"maxObjSizeGB,omitempty" yaml:"maxObjSizeGB,omitempty"` } @@ -21,6 +25,12 @@ type Credentials struct { Key string `bson:"key" json:"key,omitempty" yaml:"key,omitempty"` } +type Retryer struct { + NumMaxRetries int32 `bson:"numMaxRetries" json:"numMaxRetries" yaml:"numMaxRetries"` + MinRetryDelay time.Duration `bson:"minRetryDelay" json:"minRetryDelay" yaml:"minRetryDelay"` + MaxRetryDelay time.Duration `bson:"maxRetryDelay" json:"maxRetryDelay" yaml:"maxRetryDelay"` +} + func (cfg *Config) Clone() *Config { if cfg == nil { return nil @@ -83,6 +93,28 @@ func (cfg *Config) IsSameStorage(other *Config) bool { return true } +func (cfg *Config) Cast() error { + if cfg == nil { + return errors.New("missing azure configuration with azure storage type") + } + + if cfg.Retryer == nil { + cfg.Retryer = &Retryer{ + NumMaxRetries: defaultMaxRetries, + MaxRetryDelay: defaultMaxRetryDelay, + } + } else { + if cfg.Retryer.NumMaxRetries == 0 { + cfg.Retryer.NumMaxRetries = defaultMaxRetries + } + if cfg.Retryer.MaxRetryDelay == 0 { + cfg.Retryer.MaxRetryDelay = defaultMaxRetryDelay + } + } + + return nil +} + // resolveEndpointURL returns endpoint url based on provided // EndpointURL or associated EndpointURLMap configuration fields. // If specified EndpointURLMap overrides EndpointURL field. From 413ede99b17fb4cb234ddac297f477909cfac9be Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 14 Oct 2025 10:16:27 +0200 Subject: [PATCH 64/95] Add Azure config methods and tests --- pbm/storage/azure/azure.go | 1 + pbm/storage/azure/config.go | 38 ++++--------- pbm/storage/azure/config_test.go | 91 ++++++++++++++++++++++++++++++++ 3 files changed, 103 insertions(+), 27 deletions(-) create mode 100644 pbm/storage/azure/config_test.go diff --git a/pbm/storage/azure/azure.go b/pbm/storage/azure/azure.go index bd2deec16..d554ea6fd 100644 --- a/pbm/storage/azure/azure.go +++ b/pbm/storage/azure/azure.go @@ -25,6 +25,7 @@ const ( defaultUploadBuff = 10 << 20 // 10Mb defaultMaxRetries = 3 + defaultMinRetryDelay = 800 * time.Millisecond defaultMaxRetryDelay = 60 * time.Second maxBlocks = 50_000 diff --git a/pbm/storage/azure/config.go b/pbm/storage/azure/config.go index 1fb2599b8..9d1df5bec 100644 --- a/pbm/storage/azure/config.go +++ b/pbm/storage/azure/config.go @@ -25,6 +25,8 @@ type Credentials struct { Key string `bson:"key" json:"key,omitempty" yaml:"key,omitempty"` } +// Retryer is configuration for retry behavior described: +// https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore@v1.19.1/policy#RetryOptions type Retryer struct { NumMaxRetries int32 `bson:"numMaxRetries" json:"numMaxRetries" yaml:"numMaxRetries"` MinRetryDelay time.Duration `bson:"minRetryDelay" json:"minRetryDelay" yaml:"minRetryDelay"` @@ -38,6 +40,10 @@ func (cfg *Config) Clone() *Config { rv := *cfg rv.EndpointURLMap = maps.Clone(cfg.EndpointURLMap) + if cfg.Retryer != nil { + v := *cfg.Retryer + rv.Retryer = &v + } if cfg.MaxObjSizeGB != nil { v := *cfg.MaxObjSizeGB rv.MaxObjSizeGB = &v @@ -46,33 +52,7 @@ func (cfg *Config) Clone() *Config { } func (cfg *Config) Equal(other *Config) bool { - if cfg == nil || other == nil { - return cfg == other - } - - if cfg.Account != other.Account { - return false - } - if cfg.Container != other.Container { - return false - } - if cfg.EndpointURL != other.EndpointURL { - return false - } - if !maps.Equal(cfg.EndpointURLMap, other.EndpointURLMap) { - return false - } - if cfg.Prefix != other.Prefix { - return false - } - if cfg.Credentials.Key != other.Credentials.Key { - return false - } - if !reflect.DeepEqual(cfg.MaxObjSizeGB, other.MaxObjSizeGB) { - return false - } - - return true + return reflect.DeepEqual(cfg, other) } // IsSameStorage identifies the same instance of the Azure storage. @@ -101,12 +81,16 @@ func (cfg *Config) Cast() error { if cfg.Retryer == nil { cfg.Retryer = &Retryer{ NumMaxRetries: defaultMaxRetries, + MinRetryDelay: defaultMinRetryDelay, MaxRetryDelay: defaultMaxRetryDelay, } } else { if cfg.Retryer.NumMaxRetries == 0 { cfg.Retryer.NumMaxRetries = defaultMaxRetries } + if cfg.Retryer.MinRetryDelay == 0 { + cfg.Retryer.MinRetryDelay = defaultMinRetryDelay + } if cfg.Retryer.MaxRetryDelay == 0 { cfg.Retryer.MaxRetryDelay = defaultMaxRetryDelay } diff --git a/pbm/storage/azure/config_test.go b/pbm/storage/azure/config_test.go new file mode 100644 index 000000000..153e24e38 --- /dev/null +++ b/pbm/storage/azure/config_test.go @@ -0,0 +1,91 @@ +package azure + +import ( + "reflect" + "testing" + "time" + + "github.com/google/go-cmp/cmp" +) + +func TestClone(t *testing.T) { + f := 1.1 + c1 := &Config{ + Account: "acc", + Container: "cnt", + EndpointURL: "ep.com", + EndpointURLMap: map[string]string{"n1": "ep1", "n2": "ep2"}, + Prefix: "p1", + Credentials: Credentials{ + Key: "k1", + }, + MaxObjSizeGB: &f, + Retryer: &Retryer{ + NumMaxRetries: 5, + MinRetryDelay: 10 * time.Second, + MaxRetryDelay: 20 * time.Second, + }, + } + + c2 := c1.Clone() + + if &c1.EndpointURLMap == &c2.EndpointURLMap || + c1.MaxObjSizeGB == c2.MaxObjSizeGB || + c1.Retryer == c2.Retryer { + t.Fatal("Deep copy of pointer fields is missing") + } + if !reflect.DeepEqual(c1, c2) { + t.Fatalf("Clone is not performed, diff=%s", cmp.Diff(*c1, *c2)) + } +} + +func TestEqual(t *testing.T) { + f := 1.1 + c1 := &Config{ + Account: "acc", + Container: "cnt", + EndpointURL: "ep.com", + EndpointURLMap: map[string]string{"n1": "ep1", "n2": "ep2"}, + Prefix: "p1", + Credentials: Credentials{ + Key: "k1", + }, + MaxObjSizeGB: &f, + Retryer: &Retryer{ + NumMaxRetries: 5, + MinRetryDelay: 10 * time.Second, + MaxRetryDelay: 20 * time.Second, + }, + } + + c2 := c1.Clone() + + if !c1.Equal(c2) { + t.Fatalf("cfg should be equal, diff=%s", cmp.Diff(*c1, *c2)) + } +} + +func TestCast(t *testing.T) { + var c *Config + err := c.Cast() + if err == nil { + t.Fatal("sigsegv should have happened instead") + } + + c = &Config{} + err = c.Cast() + if err != nil { + t.Fatalf("got error during Cast: %v", err) + } + want := &Config{ + Retryer: &Retryer{ + NumMaxRetries: defaultMaxRetries, + MinRetryDelay: defaultMinRetryDelay, + MaxRetryDelay: defaultMaxRetryDelay, + }, + } + + if !c.Equal(want) { + t.Fatalf("wrong config after Cast, diff=%s", cmp.Diff(*c, *want)) + } +} From fd2ca0057940a70bf7ec45ac81a73074895d9d17 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 14 Oct 2025 10:48:39 +0200 Subject: [PATCH 65/95] Add configuration reference for Azure retryer --- packaging/conf/pbm-conf-reference.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/packaging/conf/pbm-conf-reference.yml b/packaging/conf/pbm-conf-reference.yml index e69225716..70a2bbc46 100644 --- a/packaging/conf/pbm-conf-reference.yml +++ b/packaging/conf/pbm-conf-reference.yml @@ -69,8 +69,8 @@ ## Retry upload configuration options. # retryer: # numMaxRetries: 3 -# minRetryDelay: 30 -# maxRetryDelay: 5 +# minRetryDelay: 30ms +# maxRetryDelay: 300s # ## The maximum object size that will be stored on the storage # maxObjSizeGB: 5018 @@ -183,6 +183,12 @@ # credentials: # key: # +## Retry upload configuration options. +# retryer: +# numMaxRetries: 3 +# minRetryDelay: 800ms +# maxRetryDelay: 60s +# ## The maximum object size that will be stored on the storage # maxObjSizeGB: 194560 From f3fac584d283c258326e479df4be47ba836f2bb9 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 15 Oct 2025 18:38:22 +0200 Subject: [PATCH 66/95] Bench for upload perf tests for S3 vs Minio client --- pbm/storage/mio/minio_test.go | 56 ++++++++++++++++++++++ pbm/storage/s3/s3_test.go | 89 +++++++++++++++++++++++++++++++++++ 2 files changed, 145 insertions(+) diff --git a/pbm/storage/mio/minio_test.go b/pbm/storage/mio/minio_test.go index c18bbfa49..2276a944a 100644 --- a/pbm/storage/mio/minio_test.go +++ b/pbm/storage/mio/minio_test.go @@ -255,3 +255,59 @@ func (r *InfiniteCustomReader) Read(p []byte) (int, error) { return readLen, nil } + +func BenchmarkMinioUpload(b *testing.B) { + fsize := int64(500 * 1024 * 1024) + numThreds := uint(max(runtime.GOMAXPROCS(0), 1)) + partSize := uint64(defaultPartSize) + // partSize := uint64(50 * 1024 * 1024) + // partSize := uint64(100 * 1024 * 1024) + + ep := "s3.amazonaws.com" + region := "eu-central-1" + bucket := "" + prefix := "" + accessKeyID := "" + secretAccessKey := "" + + mc, err := minio.New(ep, &minio.Options{ + Region: region, + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: true, + }) + if err != nil { + b.Fatalf("minio client creation for aws: %v", err) + } + b.Logf("minio client: file size=%d bytes; part size=%d bytes; NumThreads=%d", + fsize, partSize, numThreds) + + b.ResetTimer() + b.SetBytes(fsize) + + for b.Loop() { + b.StopTimer() + infR := NewInfiniteCustomReader() + r := io.LimitReader(infR, fsize) + + fname := time.Now().Format("2006-01-02T15:04:05") + b.Logf("uploading file: %s ....", fname) + + putOpts := minio.PutObjectOptions{ + PartSize: partSize, + NumThreads: numThreds, + } + + b.StartTimer() + _, err = mc.PutObject( + context.Background(), + bucket, + path.Join(prefix, fname), + r, + -1, + putOpts, + ) + if err != nil { + b.Fatalf("put object: %v", err) + } + } +} diff --git a/pbm/storage/s3/s3_test.go b/pbm/storage/s3/s3_test.go index ffd5edf18..61623df05 100644 --- a/pbm/storage/s3/s3_test.go +++ b/pbm/storage/s3/s3_test.go @@ -2,13 +2,19 @@ package s3 import ( "context" + "io" + "net/http" + "path" + "runtime" "testing" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/modules/minio" @@ -238,3 +244,86 @@ func TestToClientLogMode(t *testing.T) { }) } } + +func BenchmarkAWSUpload(b *testing.B) { + fsize := int64(500 * 1024 * 1024) + numThreds := max(runtime.GOMAXPROCS(0), 1) + partSize := defaultPartSize + // partSize := int64(50 * 1024 * 1024) + // partSize := int64(100 * 1024 * 1024) + + region := "eu-central-1" + bucket := "" + prefix := "" + accessKeyID := "" + secretAccessKey := "" + + cfgOpts := []func(*config.LoadOptions) error{ + config.WithRegion(region), + config.WithHTTPClient(&http.Client{}), + config.WithCredentialsProvider( + credentials.NewStaticCredentialsProvider(accessKeyID, secretAccessKey, ""), + ), + } + awsCfg, err := config.LoadDefaultConfig(context.Background(), cfgOpts...) + if err != nil { + b.Fatalf("load default aws config: %v", err) + } + s3Client := s3.NewFromConfig(awsCfg) + b.Logf("aws s3 client: file size=%d bytes; part size=%d bytes; NumThreads=%d", + fsize, partSize, numThreds) + + b.ResetTimer() + b.SetBytes(fsize) + + for b.Loop() { + b.StopTimer() + infR := NewInfiniteCustomReader() + r := io.LimitReader(infR, fsize) + + fname := time.Now().Format("2006-01-02T15:04:05") + b.Logf("uploading file: %s ....", fname) + + putInput := &s3.PutObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(path.Join(prefix, fname)), + Body: r, + StorageClass: types.StorageClass(types.StorageClassStandard), + } + + b.StartTimer() + _, err := manager.NewUploader(s3Client, func(u *manager.Uploader) { + u.PartSize = partSize + u.LeavePartsOnError = true + u.Concurrency = numThreds + }).Upload(context.Background(), putInput) + if err != nil { + b.Fatalf("put object: %v", err) + } + } +} + +type InfiniteCustomReader struct { + pattern []byte + patternIndex int +} + +func NewInfiniteCustomReader() *InfiniteCustomReader { + pattern := []byte{0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22} + + return &InfiniteCustomReader{ + pattern: pattern, + patternIndex: 0, + } +} + +func (r *InfiniteCustomReader) Read(p []byte) (int, error) { + readLen := len(p) + + for i := range readLen { + p[i] = r.pattern[r.patternIndex] + r.patternIndex = (r.patternIndex + 1) % len(r.pattern) + } + + return readLen, nil +} From 8724d6b5ceaa8b00ef930d48391974e29ccb615e Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 16 Oct 2025 11:38:33 +0200 Subject: [PATCH 67/95] Add bench for minio storage Save --- pbm/storage/mio/minio_test.go | 65 ++++++++++++++++++++++++++++++----- 1 file changed, 57 insertions(+), 8 deletions(-) diff --git a/pbm/storage/mio/minio_test.go b/pbm/storage/mio/minio_test.go index 2276a944a..4b28c0670 100644 --- a/pbm/storage/mio/minio_test.go +++ b/pbm/storage/mio/minio_test.go @@ -2,6 +2,7 @@ package mio import ( "context" + "flag" "io" "net/url" "path" @@ -14,6 +15,7 @@ import ( "github.com/testcontainers/testcontainers-go" tcminio "github.com/testcontainers/testcontainers-go/modules/minio" + "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/storage" ) @@ -256,12 +258,15 @@ func (r *InfiniteCustomReader) Read(p []byte) (int, error) { return readLen, nil } -func BenchmarkMinioUpload(b *testing.B) { - fsize := int64(500 * 1024 * 1024) +var ( + fileSize = flag.Int64("file-size", 500, "file size in MB that will be uploaded") + partSize = flag.Int64("part-size", 10, "part size in MB that will be used to upload file") +) + +func BenchmarkMinioPutObject(b *testing.B) { numThreds := uint(max(runtime.GOMAXPROCS(0), 1)) - partSize := uint64(defaultPartSize) - // partSize := uint64(50 * 1024 * 1024) - // partSize := uint64(100 * 1024 * 1024) + fsize := *fileSize * 1024 * 1024 + pSize := *partSize * 1024 * 1024 ep := "s3.amazonaws.com" region := "eu-central-1" @@ -278,8 +283,8 @@ func BenchmarkMinioUpload(b *testing.B) { if err != nil { b.Fatalf("minio client creation for aws: %v", err) } - b.Logf("minio client: file size=%d bytes; part size=%d bytes; NumThreads=%d", - fsize, partSize, numThreds) + b.Logf("minio client: file size=%s; part size=%s; NumThreads=%d", + storage.PrettySize(fsize), storage.PrettySize(pSize), numThreds) b.ResetTimer() b.SetBytes(fsize) @@ -293,7 +298,7 @@ func BenchmarkMinioUpload(b *testing.B) { b.Logf("uploading file: %s ....", fname) putOpts := minio.PutObjectOptions{ - PartSize: partSize, + PartSize: uint64(pSize), NumThreads: numThreds, } @@ -311,3 +316,47 @@ func BenchmarkMinioUpload(b *testing.B) { } } } + +func BenchmarkMinioStorageSave(b *testing.B) { + numThreds := uint(max(runtime.GOMAXPROCS(0), 1)) + fsize := *fileSize * 1024 * 1024 + pSize := *partSize * 1024 * 1024 + + cfg := &Config{ + Endpoint: "s3.amazonaws.com", + Region: "eu-central-1", + Bucket: "", + Prefix: "", + Credentials: Credentials{ + AccessKeyID: "", + SecretAccessKey: "", + }, + PartSize: pSize, + } + + s, err := New(cfg, "", log.DiscardEvent) + if err != nil { + b.Fatalf("minio storage creation: %v", err) + } + b.Logf("minio client: file size=%s; part size=%s; NumThreads=%d", + storage.PrettySize(fsize), storage.PrettySize(pSize), numThreds) + + b.ResetTimer() + b.SetBytes(fsize) + + for b.Loop() { + b.StopTimer() + + infR := NewInfiniteCustomReader() + r := io.LimitReader(infR, fsize) + + fname := time.Now().Format("2006-01-02T15:04:05") + b.Logf("saving file: %s ....", fname) + + b.StartTimer() + err := s.Save(fname, r) + if err != nil { + b.Fatalf("save %s: %v", fname, err) + } + } +} From 1a13d0f017d777a95387218287ac9378a9b5b7b1 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 17 Oct 2025 11:40:37 +0200 Subject: [PATCH 68/95] Add bench for S3 storage Save --- pbm/storage/s3/s3_test.go | 63 ++++++++++++++++++++++++++++++++++----- 1 file changed, 55 insertions(+), 8 deletions(-) diff --git a/pbm/storage/s3/s3_test.go b/pbm/storage/s3/s3_test.go index 61623df05..10b1c9380 100644 --- a/pbm/storage/s3/s3_test.go +++ b/pbm/storage/s3/s3_test.go @@ -2,6 +2,7 @@ package s3 import ( "context" + "flag" "io" "net/http" "path" @@ -19,6 +20,7 @@ import ( "github.com/testcontainers/testcontainers-go/modules/minio" "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/storage" ) @@ -245,12 +247,15 @@ func TestToClientLogMode(t *testing.T) { } } -func BenchmarkAWSUpload(b *testing.B) { - fsize := int64(500 * 1024 * 1024) +var ( + fileSize = flag.Int64("file-size", 500, "file size that will be uploaded") + partSize = flag.Int64("part-size", 10, "part size that will be used to upload file") +) + +func BenchmarkS3Upload(b *testing.B) { numThreds := max(runtime.GOMAXPROCS(0), 1) - partSize := defaultPartSize - // partSize := int64(50 * 1024 * 1024) - // partSize := int64(100 * 1024 * 1024) + fsize := *fileSize * 1024 * 1024 + pSize := *partSize * 1024 * 1024 region := "eu-central-1" bucket := "" @@ -270,8 +275,8 @@ func BenchmarkAWSUpload(b *testing.B) { b.Fatalf("load default aws config: %v", err) } s3Client := s3.NewFromConfig(awsCfg) - b.Logf("aws s3 client: file size=%d bytes; part size=%d bytes; NumThreads=%d", - fsize, partSize, numThreds) + b.Logf("aws s3 client: file size=%s; part size=%s; NumThreads=%d", + storage.PrettySize(fsize), storage.PrettySize(pSize), numThreds) b.ResetTimer() b.SetBytes(fsize) @@ -293,7 +298,7 @@ func BenchmarkAWSUpload(b *testing.B) { b.StartTimer() _, err := manager.NewUploader(s3Client, func(u *manager.Uploader) { - u.PartSize = partSize + u.PartSize = pSize u.LeavePartsOnError = true u.Concurrency = numThreds }).Upload(context.Background(), putInput) @@ -303,6 +308,48 @@ func BenchmarkAWSUpload(b *testing.B) { } } +func BenchmarkS3StorageSave(b *testing.B) { + numThreds := max(runtime.GOMAXPROCS(0), 1) + fsize := *fileSize * 1024 * 1024 + pSize := *partSize * 1024 * 1024 + + cfg := &Config{ + Region: "eu-central-1", + Bucket: "", + Prefix: "", + Credentials: Credentials{ + AccessKeyID: "", + SecretAccessKey: "", + }, + UploadPartSize: int(pSize), + } + + s, err := New(cfg, "", log.DiscardEvent) + if err != nil { + b.Fatalf("s3 storage creation: %v", err) + } + b.Logf("aws s3 client: file size=%s; part size=%s; NumThreads=%d", + storage.PrettySize(fsize), storage.PrettySize(pSize), numThreds) + + b.ResetTimer() + b.SetBytes(fsize) + + for b.Loop() { + b.StopTimer() + infR := NewInfiniteCustomReader() + r := io.LimitReader(infR, fsize) + + fname := time.Now().Format("2006-01-02T15:04:05") + b.Logf("uploading file: %s ....", fname) + + b.StartTimer() + err := s.Save(fname, r) + if err != nil { + b.Fatalf("save %s: %v", fname, err) + } + } +} + type InfiniteCustomReader struct { pattern []byte patternIndex int From bc8f47795e96da798cf3ead3ac7c9a096b66c438 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 20 Oct 2025 12:40:16 +0200 Subject: [PATCH 69/95] Add bench for List and FileStat for S3 & Minio --- pbm/storage/mio/minio_test.go | 63 +++++++++++++++++++++++++++++++++++ pbm/storage/s3/s3_test.go | 58 ++++++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+) diff --git a/pbm/storage/mio/minio_test.go b/pbm/storage/mio/minio_test.go index 4b28c0670..5b2f046b6 100644 --- a/pbm/storage/mio/minio_test.go +++ b/pbm/storage/mio/minio_test.go @@ -360,3 +360,66 @@ func BenchmarkMinioStorageSave(b *testing.B) { } } } + +func BenchmarkMinioStorageList(b *testing.B) { + cfg := &Config{ + Endpoint: "s3.amazonaws.com", + Region: "eu-central-1", + Bucket: "", + Prefix: "", + Credentials: Credentials{ + AccessKeyID: "", + SecretAccessKey: "", + }, + } + + s, err := New(cfg, "", log.DiscardEvent) + if err != nil { + b.Fatalf("minio storage creation: %v", err) + } + + b.ResetTimer() + + for b.Loop() { + fis, err := s.List("", "") + if err != nil { + b.Fatalf("list: %v", err) + } + b.Logf("got %d files", len(fis)) + + fname := time.Now().Format("2006-01-02T15:04:05") + b.Logf("saving file: %s ....", fname) + } +} + +func BenchmarkMinioStorageFileStat(b *testing.B) { + cfg := &Config{ + Endpoint: "s3.amazonaws.com", + Region: "eu-central-1", + Bucket: "", + Prefix: "", + Credentials: Credentials{ + AccessKeyID: "", + SecretAccessKey: "", + }, + } + + s, err := New(cfg, "", log.DiscardEvent) + if err != nil { + b.Fatalf("minio storage creation: %v", err) + } + + b.ResetTimer() + + for b.Loop() { + fi, err := s.FileStat("2025-10-17T17:13:31") + if err != nil { + b.Fatalf("file stat: %v", err) + } + b.Logf("file stat: %s, %d", fi.Name, fi.Size) + fi, err = s.FileStat("abc") + if err != storage.ErrNotExist { + b.Fatal("files should not exist") + } + } +} diff --git a/pbm/storage/s3/s3_test.go b/pbm/storage/s3/s3_test.go index 10b1c9380..2b1ab9395 100644 --- a/pbm/storage/s3/s3_test.go +++ b/pbm/storage/s3/s3_test.go @@ -350,6 +350,64 @@ func BenchmarkS3StorageSave(b *testing.B) { } } +func BenchmarkS3StorageList(b *testing.B) { + cfg := &Config{ + Region: "eu-central-1", + Bucket: "", + Prefix: "", + Credentials: Credentials{ + AccessKeyID: "", + SecretAccessKey: "", + }, + } + + s, err := New(cfg, "", log.DiscardEvent) + if err != nil { + b.Fatalf("s3 storage creation: %v", err) + } + + b.ResetTimer() + + for b.Loop() { + fis, err := s.List("", "") + if err != nil { + b.Fatalf("list: %v", err) + } + b.Logf("got %d files", len(fis)) + } +} + +func BenchmarkS3StorageFileStat(b *testing.B) { + cfg := &Config{ + Region: "eu-central-1", + Bucket: "", + Prefix: "", + Credentials: Credentials{ + AccessKeyID: "", + SecretAccessKey: "", + }, + } + + s, err := New(cfg, "", log.DiscardEvent) + if err != nil { + b.Fatalf("s3 storage creation: %v", err) + } + + b.ResetTimer() + + for b.Loop() { + fi, err := s.FileStat("2025-10-17T17:05:18") + if err != nil { + b.Fatalf("file stat: %v", err) + } + b.Logf("file stat: %s, %d", fi.Name, fi.Size) + fi, err = s.FileStat("abc") + if err != storage.ErrNotExist { + b.Fatal("files should not exist") + } + } +} + type InfiniteCustomReader struct { pattern []byte patternIndex int From 725e71678e4b24a0500163551371309deac0a7ae Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 21 Oct 2025 12:09:50 +0200 Subject: [PATCH 70/95] Add benchmark description for storage functions --- pbm/storage/mio/minio_test.go | 23 +++++++++++++++++++++++ pbm/storage/s3/s3_test.go | 23 +++++++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/pbm/storage/mio/minio_test.go b/pbm/storage/mio/minio_test.go index 5b2f046b6..cca5da8e7 100644 --- a/pbm/storage/mio/minio_test.go +++ b/pbm/storage/mio/minio_test.go @@ -263,6 +263,17 @@ var ( partSize = flag.Int64("part-size", 10, "part size in MB that will be used to upload file") ) +// BenchmarkMinioPutObject measures the performance of uploading file on the Minio SDK level. +// It allows specifying --file-size and --part-size flags. +// Example that was used in the microbenchmarking tests: +/* +go test ./pbm/storage/mio -bench=BenchmarkMinioPutObject -run=^$ -v \ +-benchtime=5x \ +-cpu=1,2,4,8 \ +-benchmem \ +-file-size=500 \ +-part-size=100 +*/ func BenchmarkMinioPutObject(b *testing.B) { numThreds := uint(max(runtime.GOMAXPROCS(0), 1)) fsize := *fileSize * 1024 * 1024 @@ -317,6 +328,18 @@ func BenchmarkMinioPutObject(b *testing.B) { } } +// BenchmarkMinioStorageSave measures the performance of uploading file on the +// PBM's storage interface level. +// It allows specifying --file-size and --part-size flags. +// Example that was used in the microbenchmarking tests: +/* +go test ./pbm/storage/mio -bench=BenchmarkMinioStorageSave -run=^$ -v \ +-benchtime=5x \ +-cpu=1,2,4,8 \ +-benchmem \ +-file-size=500 \ +-part-size=100 +*/ func BenchmarkMinioStorageSave(b *testing.B) { numThreds := uint(max(runtime.GOMAXPROCS(0), 1)) fsize := *fileSize * 1024 * 1024 diff --git a/pbm/storage/s3/s3_test.go b/pbm/storage/s3/s3_test.go index 2b1ab9395..e5ace74ba 100644 --- a/pbm/storage/s3/s3_test.go +++ b/pbm/storage/s3/s3_test.go @@ -252,6 +252,17 @@ var ( partSize = flag.Int64("part-size", 10, "part size that will be used to upload file") ) +// BenchmarkS3Upload measures the performance of uploading file on the AWS S3 SDK level. +// It allows specifying --file-size and --part-size flags. +// Example that was used in the microbenchmarking tests: +/* +go test ./pbm/storage/s3 -bench=BenchmarkS3Upload -run=^$ -v \ +-benchtime=5x \ +-cpu=1,2,4,8 \ +-benchmem \ +-file-size=500 \ +-part-size=100 +*/ func BenchmarkS3Upload(b *testing.B) { numThreds := max(runtime.GOMAXPROCS(0), 1) fsize := *fileSize * 1024 * 1024 @@ -308,6 +319,18 @@ func BenchmarkS3Upload(b *testing.B) { } } +// BenchmarkS3StorageSave measures the performance of uploading file on the +// PBM's storage interface level. +// It allows specifying --file-size and --part-size flags. +// Example that was used in the microbenchmarking tests: +/* +go test ./pbm/storage/s3 -bench=BenchmarkS3StorageSave -run=^$ -v \ +-benchtime=5x \ +-cpu=1,2,4,8 \ +-benchmem \ +-file-size=500 \ +-part-size=100 +*/ func BenchmarkS3StorageSave(b *testing.B) { numThreds := max(runtime.GOMAXPROCS(0), 1) fsize := *fileSize * 1024 * 1024 From b7ddb15716355ee093e322770fa7dfe34ef43fe7 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 21 Oct 2025 13:46:13 +0200 Subject: [PATCH 71/95] Fix log msg while cleaning up storage after failed backup --- pbm/backup/backup.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pbm/backup/backup.go b/pbm/backup/backup.go index a4162af9b..1a3e3d263 100644 --- a/pbm/backup/backup.go +++ b/pbm/backup/backup.go @@ -317,7 +317,8 @@ func (b *Backup) Run(ctx context.Context, bcp *ctrl.BackupCmd, opid ctrl.OPID, l } if err := DeleteBackupFiles(stg, bcp.Name); err != nil { - l.Error("Failed to delete leftover files for canceled backup %q", bcpm.Name) + l.Error("failed to delete leftover files for canceled backup %q: %v", + bcpm.Name, err) } }() From 0e5bd63ed5e7a5a7274aa0eaf14a193eef639925 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 21 Oct 2025 19:50:39 +0200 Subject: [PATCH 72/95] Improve logging msg when fetching pbm file parts ... in split-merge mw. --- pbm/storage/split_merge_mw.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbm/storage/split_merge_mw.go b/pbm/storage/split_merge_mw.go index ff30b6235..5caacacaf 100644 --- a/pbm/storage/split_merge_mw.go +++ b/pbm/storage/split_merge_mw.go @@ -245,7 +245,7 @@ func (sm *SplitMergeMiddleware) fileWithParts(name string) ([]FileInfo, error) { fi, err := sm.s.FileStat(name) if err != nil { - return res, errors.Wrap(err, "fetching pbm file parts base") + return res, errors.Wrapf(err, "fetching pbm file parts base for %s", name) } res = append(res, fi) From 445bfe98c08516ecbd9fba4970e5ec330ceaaec7 Mon Sep 17 00:00:00 2001 From: Oleksandr Havryliak <88387200+olexandr-havryliak@users.noreply.github.com> Date: Wed, 22 Oct 2025 11:36:44 +0300 Subject: [PATCH 73/95] PBM use pytest-shard for better parallelization, drop 6.0 support (#1211) --- .github/workflows/ci.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a20e03729..c7671c456 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -41,8 +41,8 @@ jobs: strategy: fail-fast: false matrix: - psmdb: ["6.0", "7.0", "8.0"] - test: [logical, physical, incremental, external] + psmdb: ["7.0", "8.0"] + shard: [0, 1, 2, 3, 4, 5, 6] env: PBM_BRANCH: ${{ github.event.inputs.pbm_branch || github.ref_name }} GO_VER: ${{ github.event.inputs.go_ver || '1.25-bookworm' }} @@ -63,9 +63,9 @@ jobs: docker compose up -d working-directory: psmdb-testing/pbm-functional/pytest - - name: Test ${{ matrix.test }} backup/restore on PSMDB ${{ matrix.psmdb }} for PBM PR/branch ${{ github.event.pull_request.title || env.PR_NUMBER || env.PBM_BRANCH }} + - name: Run pytest shard number ${{ matrix.shard }} on PSMDB ${{ matrix.psmdb }} for PBM PR/branch ${{ github.event.pull_request.title || env.PR_NUMBER || env.PBM_BRANCH }} run: | - docker compose run test pytest -s --junitxml=junit.xml -k ${{ matrix.test }} + docker compose run test pytest -s --junitxml=junit.xml --shard-id=${{ matrix.shard }} --num-shards=7 -m 'not jenkins and not skip' working-directory: psmdb-testing/pbm-functional/pytest - name: Fetch coverage files @@ -78,7 +78,7 @@ jobs: - name: Upload coverage reports uses: actions/upload-artifact@v4 with: - name: reports-${{ matrix.test }}-${{ matrix.psmdb }} + name: reports-${{ matrix.shard }}-${{ matrix.psmdb }} path: psmdb-testing/pbm-functional/pytest/reports/ if: success() || failure() From e4263ab518ec0f1767d3b9c031b461aa39f171c7 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 23 Oct 2025 18:43:24 +0200 Subject: [PATCH 74/95] Bump testcontainers dependency github.com/testcontainers/testcontainers-go v0.34.0 -> v0.39.0 github.com/testcontainers/testcontainers-go/modules/mongodb v0.34.0 -> v0.39.0 --- go.mod | 13 +- go.sum | 36 +- vendor/dario.cat/mergo/.gitignore | 3 + vendor/dario.cat/mergo/FUNDING.json | 7 + vendor/dario.cat/mergo/README.md | 105 +- vendor/dario.cat/mergo/SECURITY.md | 4 +- vendor/dario.cat/mergo/map.go | 2 +- vendor/dario.cat/mergo/merge.go | 2 +- .../github.com/docker/docker/errdefs/defs.go | 69 - .../github.com/docker/docker/errdefs/doc.go | 8 - .../docker/docker/errdefs/helpers.go | 309 -- .../docker/docker/errdefs/http_helpers.go | 49 - vendor/github.com/docker/docker/errdefs/is.go | 78 - .../docker/pkg/archive/archive_deprecated.go | 259 -- .../docker/pkg/archive/changes_deprecated.go | 56 - .../docker/pkg/archive/copy_deprecated.go | 130 - .../docker/pkg/archive/diff_deprecated.go | 37 - .../docker/pkg/archive/path_deprecated.go | 10 - .../docker/docker/pkg/archive/utils.go | 42 - .../pkg/archive/whiteouts_deprecated.go | 10 - .../docker/pkg/archive/wrap_deprecated.go | 14 - .../docker/docker/pkg/idtools/idtools.go | 223 - .../docker/pkg/idtools/idtools_windows.go | 12 - .../github.com/ebitengine/purego/.gitignore | 1 + vendor/github.com/ebitengine/purego/LICENSE | 201 + vendor/github.com/ebitengine/purego/README.md | 97 + .../github.com/ebitengine/purego/abi_amd64.h | 99 + .../github.com/ebitengine/purego/abi_arm64.h | 39 + vendor/github.com/ebitengine/purego/cgo.go | 19 + .../github.com/ebitengine/purego/dlerror.go | 17 + vendor/github.com/ebitengine/purego/dlfcn.go | 99 + .../ebitengine/purego/dlfcn_android.go | 34 + .../ebitengine/purego/dlfcn_darwin.go | 19 + .../ebitengine/purego/dlfcn_freebsd.go | 14 + .../ebitengine/purego/dlfcn_linux.go | 16 + .../ebitengine/purego/dlfcn_nocgo_freebsd.go | 11 + .../ebitengine/purego/dlfcn_nocgo_linux.go | 19 + .../ebitengine/purego/dlfcn_playground.go | 24 + .../ebitengine/purego/dlfcn_stubs.s | 26 + vendor/github.com/ebitengine/purego/func.go | 436 ++ .../ebitengine/purego/go_runtime.go | 13 + .../purego/internal/cgo/dlfcn_cgo_unix.go | 56 + .../ebitengine/purego/internal/cgo/empty.go | 6 + .../purego/internal/cgo/syscall_cgo_unix.go | 55 + .../purego/internal/fakecgo/abi_amd64.h | 99 + .../purego/internal/fakecgo/abi_arm64.h | 39 + .../purego/internal/fakecgo/asm_amd64.s | 39 + .../purego/internal/fakecgo/asm_arm64.s | 36 + .../purego/internal/fakecgo/callbacks.go | 93 + .../ebitengine/purego/internal/fakecgo/doc.go | 32 + .../purego/internal/fakecgo/freebsd.go | 27 + .../internal/fakecgo/go_darwin_amd64.go | 73 + .../internal/fakecgo/go_darwin_arm64.go | 88 + .../internal/fakecgo/go_freebsd_amd64.go | 95 + .../internal/fakecgo/go_freebsd_arm64.go | 98 + .../purego/internal/fakecgo/go_libinit.go | 72 + .../purego/internal/fakecgo/go_linux_amd64.go | 95 + .../purego/internal/fakecgo/go_linux_arm64.go | 98 + .../purego/internal/fakecgo/go_setenv.go | 18 + .../purego/internal/fakecgo/go_util.go | 37 + .../purego/internal/fakecgo/iscgo.go | 19 + .../purego/internal/fakecgo/libcgo.go | 39 + .../purego/internal/fakecgo/libcgo_darwin.go | 22 + .../purego/internal/fakecgo/libcgo_freebsd.go | 16 + .../purego/internal/fakecgo/libcgo_linux.go | 16 + .../purego/internal/fakecgo/setenv.go | 19 + .../purego/internal/fakecgo/symbols.go | 221 + .../purego/internal/fakecgo/symbols_darwin.go | 29 + .../internal/fakecgo/symbols_freebsd.go | 29 + .../purego/internal/fakecgo/symbols_linux.go | 29 + .../internal/fakecgo/trampolines_amd64.s | 104 + .../internal/fakecgo/trampolines_arm64.s | 72 + .../internal/fakecgo/trampolines_stubs.s | 90 + .../purego/internal/strings/strings.go | 40 + vendor/github.com/ebitengine/purego/is_ios.go | 13 + vendor/github.com/ebitengine/purego/nocgo.go | 25 + .../ebitengine/purego/struct_amd64.go | 260 ++ .../ebitengine/purego/struct_arm64.go | 274 ++ .../ebitengine/purego/struct_other.go | 16 + .../github.com/ebitengine/purego/sys_amd64.s | 164 + .../github.com/ebitengine/purego/sys_arm64.s | 92 + .../ebitengine/purego/sys_unix_arm64.s | 70 + .../github.com/ebitengine/purego/syscall.go | 53 + .../ebitengine/purego/syscall_cgo_linux.go | 21 + .../ebitengine/purego/syscall_sysv.go | 223 + .../ebitengine/purego/syscall_windows.go | 46 + .../ebitengine/purego/zcallback_amd64.s | 2014 +++++++++ .../ebitengine/purego/zcallback_arm64.s | 4014 +++++++++++++++++ .../shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go | 92 - .../shirou/gopsutil/v3/cpu/cpu_darwin.go | 117 - .../shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go | 111 - .../gopsutil/v3/cpu/cpu_darwin_nocgo.go | 14 - .../v3/internal/common/common_darwin.go | 66 - .../shirou/gopsutil/v3/mem/mem_darwin.go | 72 - .../shirou/gopsutil/v3/mem/mem_darwin_cgo.go | 58 - .../gopsutil/v3/mem/mem_darwin_nocgo.go | 89 - .../shirou/gopsutil/v3/net/net_fallback.go | 93 - .../shirou/gopsutil/v3/net/net_linux_111.go | 12 - .../shirou/gopsutil/v3/net/net_linux_116.go | 12 - .../shirou/gopsutil/v3/process/process_bsd.go | 76 - .../gopsutil/v3/process/process_darwin.go | 325 -- .../gopsutil/v3/process/process_darwin_cgo.go | 222 - .../v3/process/process_darwin_nocgo.go | 127 - .../gopsutil/v3/process/process_fallback.go | 203 - .../v3/process/process_freebsd_amd64.go | 192 - .../gopsutil/v3/process/process_plan9.go | 203 - .../shirou/gopsutil/{v3 => v4}/LICENSE | 0 .../shirou/gopsutil/{v3 => v4}/common/env.go | 16 +- .../shirou/gopsutil/{v3 => v4}/cpu/cpu.go | 6 +- .../shirou/gopsutil/{v3 => v4}/cpu/cpu_aix.go | 2 +- .../gopsutil/{v3 => v4}/cpu/cpu_aix_cgo.go | 2 +- .../shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go | 157 + .../shirou/gopsutil/v4/cpu/cpu_darwin.go | 203 + .../gopsutil/v4/cpu/cpu_darwin_arm64.go | 80 + .../gopsutil/v4/cpu/cpu_darwin_fallback.go | 13 + .../gopsutil/{v3 => v4}/cpu/cpu_dragonfly.go | 12 +- .../{v3 => v4}/cpu/cpu_dragonfly_amd64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_fallback.go | 4 +- .../gopsutil/{v3 => v4}/cpu/cpu_freebsd.go | 16 +- .../{v3 => v4}/cpu/cpu_freebsd_386.go | 1 + .../{v3 => v4}/cpu/cpu_freebsd_amd64.go | 1 + .../{v3 => v4}/cpu/cpu_freebsd_arm.go | 1 + .../{v3 => v4}/cpu/cpu_freebsd_arm64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_linux.go | 14 +- .../gopsutil/{v3 => v4}/cpu/cpu_netbsd.go | 13 +- .../{v3 => v4}/cpu/cpu_netbsd_amd64.go | 1 + .../shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go | 10 + .../{v3 => v4}/cpu/cpu_netbsd_arm64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_openbsd.go | 13 +- .../{v3 => v4}/cpu/cpu_openbsd_386.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_amd64.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_arm.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_arm64.go | 1 + .../gopsutil/v4/cpu/cpu_openbsd_riscv64.go | 11 + .../gopsutil/{v3 => v4}/cpu/cpu_plan9.go | 11 +- .../gopsutil/{v3 => v4}/cpu/cpu_solaris.go | 46 +- .../gopsutil/{v3 => v4}/cpu/cpu_windows.go | 116 +- .../{v3 => v4}/internal/common/binary.go | 9 +- .../{v3 => v4}/internal/common/common.go | 37 +- .../v4/internal/common/common_darwin.go | 400 ++ .../internal/common/common_freebsd.go | 2 +- .../internal/common/common_linux.go | 67 +- .../internal/common/common_netbsd.go | 2 +- .../internal/common/common_openbsd.go | 2 +- .../v4/internal/common/common_testing.go | 14 + .../{v3 => v4}/internal/common/common_unix.go | 24 +- .../internal/common/common_windows.go | 24 +- .../{v3 => v4}/internal/common/endian.go | 1 + .../v4/internal/common/readlink_linux.go | 53 + .../{v3 => v4}/internal/common/sleep.go | 3 +- .../{v3 => v4}/internal/common/warnings.go | 1 + .../shirou/gopsutil/v4/mem/ex_linux.go | 40 + .../shirou/gopsutil/v4/mem/ex_windows.go | 62 + .../shirou/gopsutil/{v3 => v4}/mem/mem.go | 3 +- .../shirou/gopsutil/{v3 => v4}/mem/mem_aix.go | 8 +- .../gopsutil/{v3 => v4}/mem/mem_aix_cgo.go | 2 +- .../gopsutil/{v3 => v4}/mem/mem_aix_nocgo.go | 12 +- .../shirou/gopsutil/{v3 => v4}/mem/mem_bsd.go | 2 +- .../shirou/gopsutil/v4/mem/mem_darwin.go | 130 + .../gopsutil/{v3 => v4}/mem/mem_fallback.go | 10 +- .../gopsutil/{v3 => v4}/mem/mem_freebsd.go | 17 +- .../gopsutil/{v3 => v4}/mem/mem_linux.go | 38 +- .../gopsutil/{v3 => v4}/mem/mem_netbsd.go | 6 +- .../gopsutil/{v3 => v4}/mem/mem_openbsd.go | 9 +- .../{v3 => v4}/mem/mem_openbsd_386.go | 2 +- .../{v3 => v4}/mem/mem_openbsd_amd64.go | 1 + .../{v3 => v4}/mem/mem_openbsd_arm.go | 2 +- .../{v3 => v4}/mem/mem_openbsd_arm64.go | 2 +- .../gopsutil/v4/mem/mem_openbsd_riscv64.go | 38 + .../gopsutil/{v3 => v4}/mem/mem_plan9.go | 7 +- .../gopsutil/{v3 => v4}/mem/mem_solaris.go | 26 +- .../gopsutil/{v3 => v4}/mem/mem_windows.go | 65 +- .../shirou/gopsutil/{v3 => v4}/net/net.go | 93 +- .../shirou/gopsutil/{v3 => v4}/net/net_aix.go | 90 +- .../gopsutil/{v3 => v4}/net/net_aix_cgo.go | 6 +- .../gopsutil/{v3 => v4}/net/net_aix_nocgo.go | 12 +- .../gopsutil/{v3 => v4}/net/net_darwin.go | 68 +- .../shirou/gopsutil/v4/net/net_fallback.go | 71 + .../gopsutil/{v3 => v4}/net/net_freebsd.go | 44 +- .../gopsutil/{v3 => v4}/net/net_linux.go | 188 +- .../gopsutil/{v3 => v4}/net/net_openbsd.go | 138 +- .../gopsutil/{v3 => v4}/net/net_solaris.go | 73 +- .../gopsutil/{v3 => v4}/net/net_unix.go | 68 +- .../gopsutil/{v3 => v4}/net/net_windows.go | 124 +- .../gopsutil/{v3 => v4}/process/process.go | 77 +- .../shirou/gopsutil/v4/process/process_bsd.go | 76 + .../gopsutil/v4/process/process_darwin.go | 483 ++ .../process/process_darwin_amd64.go | 22 + .../process/process_darwin_arm64.go | 23 +- .../gopsutil/v4/process/process_fallback.go | 203 + .../{v3 => v4}/process/process_freebsd.go | 103 +- .../{v3 => v4}/process/process_freebsd_386.go | 26 + .../v4/process/process_freebsd_amd64.go | 224 + .../{v3 => v4}/process/process_freebsd_arm.go | 26 + .../process/process_freebsd_arm64.go | 58 +- .../{v3 => v4}/process/process_linux.go | 109 +- .../{v3 => v4}/process/process_openbsd.go | 88 +- .../{v3 => v4}/process/process_openbsd_386.go | 3 +- .../process/process_openbsd_amd64.go | 2 + .../{v3 => v4}/process/process_openbsd_arm.go | 3 +- .../process/process_openbsd_arm64.go | 3 +- .../v4/process/process_openbsd_riscv64.go | 205 + .../gopsutil/v4/process/process_plan9.go | 203 + .../{v3 => v4}/process/process_posix.go | 13 +- .../{v3 => v4}/process/process_solaris.go | 70 +- .../{v3 => v4}/process/process_windows.go | 176 +- .../process/process_windows_32bit.go | 76 +- .../process/process_windows_64bit.go | 38 +- .../shoenig/go-m1cpu/.golangci.yaml | 12 - vendor/github.com/shoenig/go-m1cpu/LICENSE | 363 -- vendor/github.com/shoenig/go-m1cpu/Makefile | 12 - vendor/github.com/shoenig/go-m1cpu/README.md | 66 - vendor/github.com/shoenig/go-m1cpu/cpu.go | 213 - .../shoenig/go-m1cpu/incompatible.go | 53 - .../testcontainers-go/.gitignore | 9 +- .../testcontainers-go/.golangci.yml | 109 +- .../testcontainers-go/CONTRIBUTING.md | 2 +- .../testcontainers/testcontainers-go/Makefile | 86 +- .../testcontainers/testcontainers-go/Pipfile | 4 +- .../testcontainers-go/Pipfile.lock | 498 +- .../testcontainers-go/RELEASING.md | 18 +- .../testcontainers-go/cleanup.go | 100 +- .../testcontainers-go/commons-test.mk | 9 +- .../testcontainers-go/container.go | 206 +- .../testcontainers-go/docker.go | 637 ++- .../testcontainers-go/docker_auth.go | 50 +- .../testcontainers-go/docker_client.go | 5 +- .../testcontainers-go/docker_mounts.go | 62 +- .../testcontainers-go/exec/processor.go | 47 +- .../testcontainers/testcontainers-go/file.go | 6 +- .../testcontainers-go/generic.go | 46 +- .../testcontainers/testcontainers-go/image.go | 11 +- .../internal/config/config.go | 31 +- .../internal/core/bootstrap.go | 11 +- .../internal/core/docker_host.go | 23 +- .../internal/core/docker_rootless.go | 12 +- .../testcontainers-go/internal/core/images.go | 2 +- .../testcontainers-go/internal/core/labels.go | 5 +- .../testcontainers-go/internal/version.go | 2 +- .../testcontainers-go/lifecycle.go | 294 +- .../testcontainers-go/log/logger.go | 73 + .../testcontainers-go/logger.go | 103 - .../testcontainers-go/logger_option.go | 45 + .../testcontainers-go/mkdocs.yml | 49 +- .../testcontainers-go/modules/mongodb/cli.go | 32 + .../modules/mongodb/mongodb.go | 158 +- .../modules/mongodb/mount/entrypoint-tc.sh | 32 + .../testcontainers-go/mounts.go | 51 +- .../testcontainers-go/network.go | 15 +- .../testcontainers-go/options.go | 232 +- .../testcontainers-go/parallel.go | 7 +- .../testcontainers-go/port_forwarding.go | 291 +- .../testcontainers-go/provider.go | 9 +- .../testcontainers-go/reaper.go | 47 +- .../testcontainers-go/requirements.txt | 4 +- .../testcontainers-go/runtime.txt | 2 +- .../sonar-project.properties | 21 - .../testcontainers-go/testcontainers.go | 2 +- .../testcontainers-go/testing.go | 69 +- .../testcontainers-go/validator.go | 7 + .../testcontainers-go/wait/all.go | 12 +- .../testcontainers-go/wait/exec.go | 2 +- .../testcontainers-go/wait/exit.go | 3 +- .../testcontainers-go/wait/file.go | 2 +- .../testcontainers-go/wait/host_port.go | 103 +- .../testcontainers-go/wait/http.go | 8 +- .../testcontainers-go/wait/log.go | 111 +- .../testcontainers-go/wait/nop.go | 8 +- .../testcontainers-go/wait/sql.go | 24 +- .../testcontainers-go/wait/tls.go | 167 + .../testcontainers-go/wait/wait.go | 8 +- .../testcontainers-go/wait/walk.go | 81 + vendor/github.com/yusufpapurcu/wmi/wmi.go | 12 + vendor/modules.txt | 43 +- 274 files changed, 17228 insertions(+), 6703 deletions(-) create mode 100644 vendor/dario.cat/mergo/FUNDING.json delete mode 100644 vendor/github.com/docker/docker/errdefs/defs.go delete mode 100644 vendor/github.com/docker/docker/errdefs/doc.go delete mode 100644 vendor/github.com/docker/docker/errdefs/helpers.go delete mode 100644 vendor/github.com/docker/docker/errdefs/http_helpers.go delete mode 100644 vendor/github.com/docker/docker/errdefs/is.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/path_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/utils.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go create mode 100644 vendor/github.com/ebitengine/purego/.gitignore create mode 100644 vendor/github.com/ebitengine/purego/LICENSE create mode 100644 vendor/github.com/ebitengine/purego/README.md create mode 100644 vendor/github.com/ebitengine/purego/abi_amd64.h create mode 100644 vendor/github.com/ebitengine/purego/abi_arm64.h create mode 100644 vendor/github.com/ebitengine/purego/cgo.go create mode 100644 vendor/github.com/ebitengine/purego/dlerror.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_android.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_darwin.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_linux.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_nocgo_freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_nocgo_linux.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_playground.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_stubs.s create mode 100644 vendor/github.com/ebitengine/purego/func.go create mode 100644 vendor/github.com/ebitengine/purego/go_runtime.go create mode 100644 vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go create mode 100644 vendor/github.com/ebitengine/purego/internal/cgo/empty.go create mode 100644 vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s create mode 100644 vendor/github.com/ebitengine/purego/internal/strings/strings.go create mode 100644 vendor/github.com/ebitengine/purego/is_ios.go create mode 100644 vendor/github.com/ebitengine/purego/nocgo.go create mode 100644 vendor/github.com/ebitengine/purego/struct_amd64.go create mode 100644 vendor/github.com/ebitengine/purego/struct_arm64.go create mode 100644 vendor/github.com/ebitengine/purego/struct_other.go create mode 100644 vendor/github.com/ebitengine/purego/sys_amd64.s create mode 100644 vendor/github.com/ebitengine/purego/sys_arm64.s create mode 100644 vendor/github.com/ebitengine/purego/sys_unix_arm64.s create mode 100644 vendor/github.com/ebitengine/purego/syscall.go create mode 100644 vendor/github.com/ebitengine/purego/syscall_cgo_linux.go create mode 100644 vendor/github.com/ebitengine/purego/syscall_sysv.go create mode 100644 vendor/github.com/ebitengine/purego/syscall_windows.go create mode 100644 vendor/github.com/ebitengine/purego/zcallback_amd64.s create mode 100644 vendor/github.com/ebitengine/purego/zcallback_arm64.s delete mode 100644 vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/LICENSE (100%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/common/env.go (51%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_aix.go (85%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_aix_cgo.go (96%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_dragonfly.go (91%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_dragonfly_amd64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_fallback.go (83%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd.go (91%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_386.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_amd64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_arm.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_arm64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_linux.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_netbsd.go (87%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_netbsd_amd64.go (71%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_netbsd_arm64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd.go (88%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_386.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_amd64.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_arm.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_arm64.go (73%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_plan9.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_solaris.go (84%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_windows.go (63%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/binary.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common.go (90%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_freebsd.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_linux.go (83%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_netbsd.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_openbsd.go (96%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/common_testing.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_unix.go (61%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_windows.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/endian.go (88%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/readlink_linux.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/sleep.go (77%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/warnings.go (92%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_aix.go (58%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_aix_cgo.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_aix_nocgo.go (83%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_bsd.go (98%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_fallback.go (62%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_freebsd.go (91%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_linux.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_netbsd.go (90%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd.go (90%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_386.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_amd64.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_arm.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_arm64.go (93%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_plan9.go (91%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_solaris.go (90%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_windows.go (67%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net.go (67%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_aix.go (67%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_aix_cgo.go (88%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_aix_nocgo.go (89%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_darwin.go (77%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_freebsd.go (57%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_linux.go (74%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_openbsd.go (65%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_solaris.go (58%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_unix.go (61%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_windows.go (80%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process.go (87%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_darwin_amd64.go (87%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_darwin_arm64.go (85%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd.go (67%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd_386.go (83%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd_arm.go (83%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd_arm64.go (76%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_linux.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_386.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_amd64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_arm.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_arm64.go (98%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_posix.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_solaris.go (68%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_windows.go (84%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_windows_32bit.go (52%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_windows_64bit.go (68%) delete mode 100644 vendor/github.com/shoenig/go-m1cpu/.golangci.yaml delete mode 100644 vendor/github.com/shoenig/go-m1cpu/LICENSE delete mode 100644 vendor/github.com/shoenig/go-m1cpu/Makefile delete mode 100644 vendor/github.com/shoenig/go-m1cpu/README.md delete mode 100644 vendor/github.com/shoenig/go-m1cpu/cpu.go delete mode 100644 vendor/github.com/shoenig/go-m1cpu/incompatible.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/log/logger.go delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/logger.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/logger_option.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/modules/mongodb/cli.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/modules/mongodb/mount/entrypoint-tc.sh delete mode 100644 vendor/github.com/testcontainers/testcontainers-go/sonar-project.properties create mode 100644 vendor/github.com/testcontainers/testcontainers-go/validator.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/tls.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/walk.go diff --git a/go.mod b/go.mod index 1ce6e94b5..9e75bdffd 100644 --- a/go.mod +++ b/go.mod @@ -30,9 +30,9 @@ require ( github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.10.1 github.com/spf13/viper v1.21.0 - github.com/testcontainers/testcontainers-go v0.34.0 + github.com/testcontainers/testcontainers-go v0.39.0 github.com/testcontainers/testcontainers-go/modules/minio v0.34.0 - github.com/testcontainers/testcontainers-go/modules/mongodb v0.34.0 + github.com/testcontainers/testcontainers-go/modules/mongodb v0.39.0 go.mongodb.org/mongo-driver v1.17.4 golang.org/x/mod v0.28.0 golang.org/x/sync v0.17.0 @@ -48,7 +48,7 @@ require ( cloud.google.com/go/compute/metadata v0.8.0 // indirect cloud.google.com/go/iam v1.5.2 // indirect cloud.google.com/go/monitoring v1.24.2 // indirect - dario.cat/mergo v1.0.0 // indirect + dario.cat/mergo v1.0.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect @@ -81,6 +81,7 @@ require ( github.com/distribution/reference v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect + github.com/ebitengine/purego v0.8.4 // indirect github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -103,7 +104,6 @@ require ( github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/go-archive v0.1.0 // indirect github.com/moby/patternmatcher v0.6.0 // indirect - github.com/moby/sys/atomicwriter v0.1.0 // indirect github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/sys/user v0.4.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect @@ -119,8 +119,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/rs/xid v1.6.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect - github.com/shirou/gopsutil/v3 v3.23.12 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/shirou/gopsutil/v4 v4.25.6 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spf13/afero v1.15.0 // indirect @@ -136,7 +135,7 @@ require ( github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect diff --git a/go.sum b/go.sum index b57700f5b..4254e990c 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ cloud.google.com/go/storage v1.56.2 h1:DzxQ4ppJe4OSTtZLtCqscC3knyW919eNl0zLLpojn cloud.google.com/go/storage v1.56.2/go.mod h1:C9xuCZgFl3buo2HZU/1FncgvvOgTAs/rnh4gF4lMg0s= cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w= @@ -127,6 +127,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw= +github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= @@ -163,8 +165,6 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6 github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -267,12 +267,8 @@ github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= -github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs= +github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= @@ -295,26 +291,21 @@ github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjb github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/testcontainers/testcontainers-go v0.34.0 h1:5fbgF0vIN5u+nD3IWabQwRybuB4GY8G2HHgCkbMzMHo= -github.com/testcontainers/testcontainers-go v0.34.0/go.mod h1:6P/kMkQe8yqPHfPWNulFGdFHTD8HB2vLq/231xY2iPQ= +github.com/testcontainers/testcontainers-go v0.39.0 h1:uCUJ5tA+fcxbFAB0uP3pIK3EJ2IjjDUHFSZ1H1UxAts= +github.com/testcontainers/testcontainers-go v0.39.0/go.mod h1:qmHpkG7H5uPf/EvOORKvS6EuDkBUPE3zpVGaH9NL7f8= github.com/testcontainers/testcontainers-go/modules/minio v0.34.0 h1:OpUqT7VV/d+wriDMHcCZCUfOoFE6wiHnGVzJOXqq8lU= github.com/testcontainers/testcontainers-go/modules/minio v0.34.0/go.mod h1:0iaOtVNCzu04KcXHgmdNE7aelKaMUwC9x1M0oe6h1sw= -github.com/testcontainers/testcontainers-go/modules/mongodb v0.34.0 h1:o3bgcECyBFfMwqexCH/6vIJ8XzbCffCP/Euesu33rgY= -github.com/testcontainers/testcontainers-go/modules/mongodb v0.34.0/go.mod h1:ljLR42dN7k40CX0dp30R8BRIB3OOdvr7rBANEpfmMs4= +github.com/testcontainers/testcontainers-go/modules/mongodb v0.39.0 h1:DFCNstqIngh9+OdBRU/EVe+c9h+qlUdY+vzSc0lTFmw= +github.com/testcontainers/testcontainers-go/modules/mongodb v0.39.0/go.mod h1:XpEcg+jhF8ICVVH+R1pxXv39TFKuchTZ7zAhzbx1nLU= github.com/tinylib/msgp v1.4.0 h1:SYOeDRiydzOw9kSiwdYp9UcBgPFtLU2WDHaJXyHruf8= github.com/tinylib/msgp v1.4.0/go.mod h1:cvjFkb4RiC8qSBOPMGPSzSAx47nAsfhLVTCZZNuHv5o= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= @@ -330,10 +321,12 @@ github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gi github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw= go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.mongodb.org/mongo-driver/v2 v2.3.0 h1:sh55yOXA2vUjW1QYw/2tRlHSQViwDyPnW61AwpZ4rtU= +go.mongodb.org/mongo-driver/v2 v2.3.0/go.mod h1:jHeEDJHJq7tm6ZF45Issun9dbogjfnPySb1vXA7EeAI= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= @@ -400,7 +393,6 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore index 529c3412b..45ad0f1ae 100644 --- a/vendor/dario.cat/mergo/.gitignore +++ b/vendor/dario.cat/mergo/.gitignore @@ -13,6 +13,9 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out +# Golang/Intellij +.idea + # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 .glide/ diff --git a/vendor/dario.cat/mergo/FUNDING.json b/vendor/dario.cat/mergo/FUNDING.json new file mode 100644 index 000000000..0585e1fe1 --- /dev/null +++ b/vendor/dario.cat/mergo/FUNDING.json @@ -0,0 +1,7 @@ +{ + "drips": { + "ethereum": { + "ownedBy": "0x6160020e7102237aC41bdb156e94401692D76930" + } + } +} diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md index 7d0cf9f32..0e4a59afd 100644 --- a/vendor/dario.cat/mergo/README.md +++ b/vendor/dario.cat/mergo/README.md @@ -44,13 +44,21 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the ## Status -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). +Mergo is stable and frozen, ready for production. Check a short list of the projects using at large scale it [here](https://github.com/imdario/mergo#mergo-in-the-wild). + +No new features are accepted. They will be considered for a future v2 that improves the implementation and fixes bugs for corner cases. ### Important notes #### 1.0.0 -In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. +In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. No more v1 versions will be released. + +If the vanity URL is causing issues in your project due to a dependency pulling Mergo - it isn't a direct dependency in your project - it is recommended to use [replace](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) to pin the version to the last one with the old import URL: + +``` +replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16 +``` #### 0.3.9 @@ -64,55 +72,23 @@ If you were using Mergo before April 6th, 2015, please check your project works If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: -Buy Me a Coffee at ko-fi.com Donate using Liberapay Become my sponsor ### Mergo in the wild -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) -- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) -- [tjpnz/structbot](https://github.com/tjpnz/structbot) +Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including: + +* [containerd/containerd](https://github.com/containerd/containerd) +* [datadog/datadog-agent](https://github.com/datadog/datadog-agent) +* [docker/cli/](https://github.com/docker/cli/) +* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +* [go-micro/go-micro](https://github.com/go-micro/go-micro) +* [grafana/loki](https://github.com/grafana/loki) +* [masterminds/sprig](github.com/Masterminds/sprig) +* [moby/moby](https://github.com/moby/moby) +* [slackhq/nebula](https://github.com/slackhq/nebula) +* [volcano-sh/volcano](https://github.com/volcano-sh/volcano) ## Install @@ -141,6 +117,39 @@ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { } ``` +If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`: + +```go +package main + +import ( + "fmt" + + "dario.cat/mergo" +) + +type Foo struct { + A *string + B int64 +} + +func main() { + first := "first" + second := "second" + src := Foo{ + A: &first, + B: 2, + } + + dest := Foo{ + A: &second, + B: 1, + } + + mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference) +} +``` + Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. ```go @@ -181,10 +190,6 @@ func main() { } ``` -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v3 - ### Transformers Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? diff --git a/vendor/dario.cat/mergo/SECURITY.md b/vendor/dario.cat/mergo/SECURITY.md index a5de61f77..3788fcc1c 100644 --- a/vendor/dario.cat/mergo/SECURITY.md +++ b/vendor/dario.cat/mergo/SECURITY.md @@ -4,8 +4,8 @@ | Version | Supported | | ------- | ------------------ | -| 0.3.x | :white_check_mark: | -| < 0.3 | :x: | +| 1.x.x | :white_check_mark: | +| < 1.0 | :x: | ## Security contact information diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go index b50d5c2a4..759b4f74f 100644 --- a/vendor/dario.cat/mergo/map.go +++ b/vendor/dario.cat/mergo/map.go @@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { + if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue { dstMap[fieldName] = src.Field(i).Interface() } } diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go index 0ef9b2138..fd47c95b2 100644 --- a/vendor/dario.cat/mergo/merge.go +++ b/vendor/dario.cat/mergo/merge.go @@ -269,7 +269,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { return } - } else { + } else if src.Elem().Kind() != reflect.Struct { if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { dst.Set(src) } diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go deleted file mode 100644 index a5523c3e9..000000000 --- a/vendor/github.com/docker/docker/errdefs/defs.go +++ /dev/null @@ -1,69 +0,0 @@ -package errdefs - -// ErrNotFound signals that the requested object doesn't exist -type ErrNotFound interface { - NotFound() -} - -// ErrInvalidParameter signals that the user input is invalid -type ErrInvalidParameter interface { - InvalidParameter() -} - -// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. -// A change in state should be able to clear this error. -type ErrConflict interface { - Conflict() -} - -// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action -type ErrUnauthorized interface { - Unauthorized() -} - -// ErrUnavailable signals that the requested action/subsystem is not available. -type ErrUnavailable interface { - Unavailable() -} - -// ErrForbidden signals that the requested action cannot be performed under any circumstances. -// When a ErrForbidden is returned, the caller should never retry the action. -type ErrForbidden interface { - Forbidden() -} - -// ErrSystem signals that some internal error occurred. -// An example of this would be a failed mount request. -type ErrSystem interface { - System() -} - -// ErrNotModified signals that an action can't be performed because it's already in the desired state -type ErrNotModified interface { - NotModified() -} - -// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. -type ErrNotImplemented interface { - NotImplemented() -} - -// ErrUnknown signals that the kind of error that occurred is not known. -type ErrUnknown interface { - Unknown() -} - -// ErrCancelled signals that the action was cancelled. -type ErrCancelled interface { - Cancelled() -} - -// ErrDeadline signals that the deadline was reached before the action completed. -type ErrDeadline interface { - DeadlineExceeded() -} - -// ErrDataLoss indicates that data was lost or there is data corruption. -type ErrDataLoss interface { - DataLoss() -} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go deleted file mode 100644 index efbe8ba92..000000000 --- a/vendor/github.com/docker/docker/errdefs/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. -// Errors that cross the package boundary should implement one (and only one) of these interfaces. -// -// Packages should not reference these interfaces directly, only implement them. -// To check if a particular error implements one of these interfaces, there are helper -// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. -// If you must assert on these interfaces, be sure to check the causal chain (`err.Unwrap()`). -package errdefs diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go deleted file mode 100644 index 2a9f7ffd8..000000000 --- a/vendor/github.com/docker/docker/errdefs/helpers.go +++ /dev/null @@ -1,309 +0,0 @@ -package errdefs - -import ( - "context" - - cerrdefs "github.com/containerd/errdefs" -) - -type errNotFound struct{ error } - -func (errNotFound) NotFound() {} - -func (e errNotFound) Cause() error { - return e.error -} - -func (e errNotFound) Unwrap() error { - return e.error -} - -// NotFound creates an [ErrNotFound] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrNotFound], -func NotFound(err error) error { - if err == nil || cerrdefs.IsNotFound(err) { - return err - } - return errNotFound{err} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { - return e.error -} - -func (e errInvalidParameter) Unwrap() error { - return e.error -} - -// InvalidParameter creates an [ErrInvalidParameter] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrInvalidParameter], -func InvalidParameter(err error) error { - if err == nil || cerrdefs.IsInvalidArgument(err) { - return err - } - return errInvalidParameter{err} -} - -type errConflict struct{ error } - -func (errConflict) Conflict() {} - -func (e errConflict) Cause() error { - return e.error -} - -func (e errConflict) Unwrap() error { - return e.error -} - -// Conflict creates an [ErrConflict] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrConflict], -func Conflict(err error) error { - if err == nil || cerrdefs.IsConflict(err) { - return err - } - return errConflict{err} -} - -type errUnauthorized struct{ error } - -func (errUnauthorized) Unauthorized() {} - -func (e errUnauthorized) Cause() error { - return e.error -} - -func (e errUnauthorized) Unwrap() error { - return e.error -} - -// Unauthorized creates an [ErrUnauthorized] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrUnauthorized], -func Unauthorized(err error) error { - if err == nil || cerrdefs.IsUnauthorized(err) { - return err - } - return errUnauthorized{err} -} - -type errUnavailable struct{ error } - -func (errUnavailable) Unavailable() {} - -func (e errUnavailable) Cause() error { - return e.error -} - -func (e errUnavailable) Unwrap() error { - return e.error -} - -// Unavailable creates an [ErrUnavailable] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrUnavailable], -func Unavailable(err error) error { - if err == nil || cerrdefs.IsUnavailable(err) { - return err - } - return errUnavailable{err} -} - -type errForbidden struct{ error } - -func (errForbidden) Forbidden() {} - -func (e errForbidden) Cause() error { - return e.error -} - -func (e errForbidden) Unwrap() error { - return e.error -} - -// Forbidden creates an [ErrForbidden] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrForbidden], -func Forbidden(err error) error { - if err == nil || cerrdefs.IsPermissionDenied(err) { - return err - } - return errForbidden{err} -} - -type errSystem struct{ error } - -func (errSystem) System() {} - -func (e errSystem) Cause() error { - return e.error -} - -func (e errSystem) Unwrap() error { - return e.error -} - -// System creates an [ErrSystem] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrSystem], -func System(err error) error { - if err == nil || cerrdefs.IsInternal(err) { - return err - } - return errSystem{err} -} - -type errNotModified struct{ error } - -func (errNotModified) NotModified() {} - -func (e errNotModified) Cause() error { - return e.error -} - -func (e errNotModified) Unwrap() error { - return e.error -} - -// NotModified creates an [ErrNotModified] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [NotModified], -func NotModified(err error) error { - if err == nil || cerrdefs.IsNotModified(err) { - return err - } - return errNotModified{err} -} - -type errNotImplemented struct{ error } - -func (errNotImplemented) NotImplemented() {} - -func (e errNotImplemented) Cause() error { - return e.error -} - -func (e errNotImplemented) Unwrap() error { - return e.error -} - -// NotImplemented creates an [ErrNotImplemented] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrNotImplemented], -func NotImplemented(err error) error { - if err == nil || cerrdefs.IsNotImplemented(err) { - return err - } - return errNotImplemented{err} -} - -type errUnknown struct{ error } - -func (errUnknown) Unknown() {} - -func (e errUnknown) Cause() error { - return e.error -} - -func (e errUnknown) Unwrap() error { - return e.error -} - -// Unknown creates an [ErrUnknown] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrUnknown], -func Unknown(err error) error { - if err == nil || cerrdefs.IsUnknown(err) { - return err - } - return errUnknown{err} -} - -type errCancelled struct{ error } - -func (errCancelled) Cancelled() {} - -func (e errCancelled) Cause() error { - return e.error -} - -func (e errCancelled) Unwrap() error { - return e.error -} - -// Cancelled creates an [ErrCancelled] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrCancelled], -func Cancelled(err error) error { - if err == nil || cerrdefs.IsCanceled(err) { - return err - } - return errCancelled{err} -} - -type errDeadline struct{ error } - -func (errDeadline) DeadlineExceeded() {} - -func (e errDeadline) Cause() error { - return e.error -} - -func (e errDeadline) Unwrap() error { - return e.error -} - -// Deadline creates an [ErrDeadline] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrDeadline], -func Deadline(err error) error { - if err == nil || cerrdefs.IsDeadlineExceeded(err) { - return err - } - return errDeadline{err} -} - -type errDataLoss struct{ error } - -func (errDataLoss) DataLoss() {} - -func (e errDataLoss) Cause() error { - return e.error -} - -func (e errDataLoss) Unwrap() error { - return e.error -} - -// DataLoss creates an [ErrDataLoss] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrDataLoss], -func DataLoss(err error) error { - if err == nil || cerrdefs.IsDataLoss(err) { - return err - } - return errDataLoss{err} -} - -// FromContext returns the error class from the passed in context -func FromContext(ctx context.Context) error { - e := ctx.Err() - if e == nil { - return nil - } - - if e == context.Canceled { - return Cancelled(e) - } - if e == context.DeadlineExceeded { - return Deadline(e) - } - return Unknown(e) -} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go deleted file mode 100644 index 823ff2d9f..000000000 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ /dev/null @@ -1,49 +0,0 @@ -package errdefs - -import ( - "net/http" -) - -// FromStatusCode creates an errdef error, based on the provided HTTP status-code -// -// Deprecated: Use [cerrdefs.ToNative] instead -func FromStatusCode(err error, statusCode int) error { - if err == nil { - return nil - } - switch statusCode { - case http.StatusNotFound: - return NotFound(err) - case http.StatusBadRequest: - return InvalidParameter(err) - case http.StatusConflict: - return Conflict(err) - case http.StatusUnauthorized: - return Unauthorized(err) - case http.StatusServiceUnavailable: - return Unavailable(err) - case http.StatusForbidden: - return Forbidden(err) - case http.StatusNotModified: - return NotModified(err) - case http.StatusNotImplemented: - return NotImplemented(err) - case http.StatusInternalServerError: - if IsCancelled(err) || IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) { - return err - } - return System(err) - default: - switch { - case statusCode >= http.StatusOK && statusCode < http.StatusBadRequest: - // it's a client error - return err - case statusCode >= http.StatusBadRequest && statusCode < http.StatusInternalServerError: - return InvalidParameter(err) - case statusCode >= http.StatusInternalServerError && statusCode < 600: - return System(err) - default: - return Unknown(err) - } - } -} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go deleted file mode 100644 index ceb754a95..000000000 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ /dev/null @@ -1,78 +0,0 @@ -package errdefs - -import ( - "context" - "errors" - - cerrdefs "github.com/containerd/errdefs" -) - -// IsNotFound returns if the passed in error is an [ErrNotFound], -// -// Deprecated: use containerd [cerrdefs.IsNotFound] -var IsNotFound = cerrdefs.IsNotFound - -// IsInvalidParameter returns if the passed in error is an [ErrInvalidParameter]. -// -// Deprecated: use containerd [cerrdefs.IsInvalidArgument] -var IsInvalidParameter = cerrdefs.IsInvalidArgument - -// IsConflict returns if the passed in error is an [ErrConflict]. -// -// Deprecated: use containerd [cerrdefs.IsConflict] -var IsConflict = cerrdefs.IsConflict - -// IsUnauthorized returns if the passed in error is an [ErrUnauthorized]. -// -// Deprecated: use containerd [cerrdefs.IsUnauthorized] -var IsUnauthorized = cerrdefs.IsUnauthorized - -// IsUnavailable returns if the passed in error is an [ErrUnavailable]. -// -// Deprecated: use containerd [cerrdefs.IsUnavailable] -var IsUnavailable = cerrdefs.IsUnavailable - -// IsForbidden returns if the passed in error is an [ErrForbidden]. -// -// Deprecated: use containerd [cerrdefs.IsPermissionDenied] -var IsForbidden = cerrdefs.IsPermissionDenied - -// IsSystem returns if the passed in error is an [ErrSystem]. -// -// Deprecated: use containerd [cerrdefs.IsInternal] -var IsSystem = cerrdefs.IsInternal - -// IsNotModified returns if the passed in error is an [ErrNotModified]. -// -// Deprecated: use containerd [cerrdefs.IsNotModified] -var IsNotModified = cerrdefs.IsNotModified - -// IsNotImplemented returns if the passed in error is an [ErrNotImplemented]. -// -// Deprecated: use containerd [cerrdefs.IsNotImplemented] -var IsNotImplemented = cerrdefs.IsNotImplemented - -// IsUnknown returns if the passed in error is an [ErrUnknown]. -// -// Deprecated: use containerd [cerrdefs.IsUnknown] -var IsUnknown = cerrdefs.IsUnknown - -// IsCancelled returns if the passed in error is an [ErrCancelled]. -// -// Deprecated: use containerd [cerrdefs.IsCanceled] -var IsCancelled = cerrdefs.IsCanceled - -// IsDeadline returns if the passed in error is an [ErrDeadline]. -// -// Deprecated: use containerd [cerrdefs.IsDeadlineExceeded] -var IsDeadline = cerrdefs.IsDeadlineExceeded - -// IsDataLoss returns if the passed in error is an [ErrDataLoss]. -// -// Deprecated: use containerd [cerrdefs.IsDataLoss] -var IsDataLoss = cerrdefs.IsDataLoss - -// IsContext returns if the passed in error is due to context cancellation or deadline exceeded. -func IsContext(err error) bool { - return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go deleted file mode 100644 index 5bdbdef20..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go +++ /dev/null @@ -1,259 +0,0 @@ -// Package archive provides helper functions for dealing with archive files. -package archive - -import ( - "archive/tar" - "io" - "os" - - "github.com/docker/docker/pkg/idtools" - "github.com/moby/go-archive" - "github.com/moby/go-archive/compression" - "github.com/moby/go-archive/tarheader" -) - -// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a -// tar, but that do not have their own header entry. -// -// Deprecated: use [archive.ImpliedDirectoryMode] instead. -const ImpliedDirectoryMode = archive.ImpliedDirectoryMode - -type ( - // Compression is the state represents if compressed or not. - // - // Deprecated: use [compression.Compression] instead. - Compression = compression.Compression - // WhiteoutFormat is the format of whiteouts unpacked - // - // Deprecated: use [archive.WhiteoutFormat] instead. - WhiteoutFormat = archive.WhiteoutFormat - - // TarOptions wraps the tar options. - // - // Deprecated: use [archive.TarOptions] instead. - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression compression.Compression - NoLchown bool - IDMap idtools.IdentityMapping - ChownOpts *idtools.Identity - IncludeSourceDir bool - // WhiteoutFormat is the expected on disk format for whiteout files. - // This format will be converted to the standard format on pack - // and from the standard format on unpack. - WhiteoutFormat archive.WhiteoutFormat - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - InUserNS bool - // Allow unpacking to succeed in spite of failures to set extended - // attributes on the unpacked files due to the destination filesystem - // not supporting them or a lack of permissions. Extended attributes - // were probably in the archive for a reason, so set this option at - // your own peril. - BestEffortXattrs bool - } -) - -// Archiver implements the Archiver interface and allows the reuse of most utility functions of -// this package with a pluggable Untar function. Also, to facilitate the passing of specific id -// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. -// -// Deprecated: use [archive.Archiver] instead. -type Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - IDMapping idtools.IdentityMapping -} - -// NewDefaultArchiver returns a new Archiver without any IdentityMapping -// -// Deprecated: use [archive.NewDefaultArchiver] instead. -func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar} -} - -const ( - Uncompressed = compression.None // Deprecated: use [compression.None] instead. - Bzip2 = compression.Bzip2 // Deprecated: use [compression.Bzip2] instead. - Gzip = compression.Gzip // Deprecated: use [compression.Gzip] instead. - Xz = compression.Xz // Deprecated: use [compression.Xz] instead. - Zstd = compression.Zstd // Deprecated: use [compression.Zstd] instead. -) - -const ( - AUFSWhiteoutFormat = archive.AUFSWhiteoutFormat // Deprecated: use [archive.AUFSWhiteoutFormat] instead. - OverlayWhiteoutFormat = archive.OverlayWhiteoutFormat // Deprecated: use [archive.OverlayWhiteoutFormat] instead. -) - -// IsArchivePath checks if the (possibly compressed) file at the given path -// starts with a tar file header. -// -// Deprecated: use [archive.IsArchivePath] instead. -func IsArchivePath(path string) bool { - return archive.IsArchivePath(path) -} - -// DetectCompression detects the compression algorithm of the source. -// -// Deprecated: use [compression.Detect] instead. -func DetectCompression(source []byte) archive.Compression { - return compression.Detect(source) -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -// -// Deprecated: use [compression.DecompressStream] instead. -func DecompressStream(arch io.Reader) (io.ReadCloser, error) { - return compression.DecompressStream(arch) -} - -// CompressStream compresses the dest with specified compression algorithm. -// -// Deprecated: use [compression.CompressStream] instead. -func CompressStream(dest io.Writer, comp compression.Compression) (io.WriteCloser, error) { - return compression.CompressStream(dest, comp) -} - -// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper. -// -// Deprecated: use [archive.TarModifierFunc] instead. -type TarModifierFunc = archive.TarModifierFunc - -// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. -// -// Deprecated: use [archive.ReplaceFileTarWrapper] instead. -func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]archive.TarModifierFunc) io.ReadCloser { - return archive.ReplaceFileTarWrapper(inputTarStream, mods) -} - -// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. -// -// Deprecated: use [tarheader.FileInfoHeaderNoLookups] instead. -func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { - return tarheader.FileInfoHeaderNoLookups(fi, link) -} - -// FileInfoHeader creates a populated Header from fi. -// -// Deprecated: use [archive.FileInfoHeader] instead. -func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - return archive.FileInfoHeader(name, fi, link) -} - -// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem -// to a tar header -// -// Deprecated: use [archive.ReadSecurityXattrToTarHeader] instead. -func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { - return archive.ReadSecurityXattrToTarHeader(path, hdr) -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -// -// Deprecated: use [archive.Tar] instead. -func Tar(path string, compression archive.Compression) (io.ReadCloser, error) { - return archive.TarWithOptions(path, &archive.TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive with the given options. -// -// Deprecated: use [archive.TarWithOptions] instead. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - return archive.TarWithOptions(srcPath, toArchiveOpt(options)) -} - -// Tarballer is a lower-level interface to TarWithOptions. -// -// Deprecated: use [archive.Tarballer] instead. -type Tarballer = archive.Tarballer - -// NewTarballer constructs a new tarballer using TarWithOptions. -// -// Deprecated: use [archive.Tarballer] instead. -func NewTarballer(srcPath string, options *TarOptions) (*archive.Tarballer, error) { - return archive.NewTarballer(srcPath, toArchiveOpt(options)) -} - -// Unpack unpacks the decompressedArchive to dest with options. -// -// Deprecated: use [archive.Unpack] instead. -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - return archive.Unpack(decompressedArchive, dest, toArchiveOpt(options)) -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// -// Deprecated: use [archive.Untar] instead. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return archive.Untar(tarArchive, dest, toArchiveOpt(options)) -} - -// UntarUncompressed reads a stream of bytes from `tarArchive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -// -// Deprecated: use [archive.UntarUncompressed] instead. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return archive.UntarUncompressed(tarArchive, dest, toArchiveOpt(options)) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - return (&archive.Archiver{ - Untar: func(reader io.Reader, s string, options *archive.TarOptions) error { - return archiver.Untar(reader, s, &TarOptions{ - IDMap: archiver.IDMapping, - }) - }, - IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping), - }).TarUntar(src, dst) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - return (&archive.Archiver{ - Untar: func(reader io.Reader, s string, options *archive.TarOptions) error { - return archiver.Untar(reader, s, &TarOptions{ - IDMap: archiver.IDMapping, - }) - }, - IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping), - }).UntarPath(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - return (&archive.Archiver{ - Untar: func(reader io.Reader, s string, options *archive.TarOptions) error { - return archiver.Untar(reader, s, nil) - }, - IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping), - }).CopyWithTar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - return (&archive.Archiver{ - Untar: func(reader io.Reader, s string, options *archive.TarOptions) error { - return archiver.Untar(reader, s, nil) - }, - IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping), - }).CopyFileWithTar(src, dst) -} - -// IdentityMapping returns the IdentityMapping of the archiver. -func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping { - return archiver.IDMapping -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go deleted file mode 100644 index 48c75235c..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go +++ /dev/null @@ -1,56 +0,0 @@ -package archive - -import ( - "io" - - "github.com/docker/docker/pkg/idtools" - "github.com/moby/go-archive" -) - -// ChangeType represents the change -// -// Deprecated: use [archive.ChangeType] instead. -type ChangeType = archive.ChangeType - -const ( - ChangeModify = archive.ChangeModify // Deprecated: use [archive.ChangeModify] instead. - ChangeAdd = archive.ChangeAdd // Deprecated: use [archive.ChangeAdd] instead. - ChangeDelete = archive.ChangeDelete // Deprecated: use [archive.ChangeDelete] instead. -) - -// Change represents a change. -// -// Deprecated: use [archive.Change] instead. -type Change = archive.Change - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -// -// Deprecated: use [archive.Changes] instead. -func Changes(layers []string, rw string) ([]archive.Change, error) { - return archive.Changes(layers, rw) -} - -// FileInfo describes the information of a file. -// -// Deprecated: use [archive.FileInfo] instead. -type FileInfo = archive.FileInfo - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// -// Deprecated: use [archive.ChangesDirs] instead. -func ChangesDirs(newDir, oldDir string) ([]archive.Change, error) { - return archive.ChangesDirs(newDir, oldDir) -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -// -// Deprecated: use [archive.ChangesSize] instead. -func ChangesSize(newDir string, changes []archive.Change) int64 { - return archive.ChangesSize(newDir, changes) -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []archive.Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) { - return archive.ExportChanges(dir, changes, idtools.ToUserIdentityMapping(idMap)) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go deleted file mode 100644 index 1901e55c5..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go +++ /dev/null @@ -1,130 +0,0 @@ -package archive - -import ( - "io" - - "github.com/moby/go-archive" - "github.com/moby/go-archive/compression" -) - -var ( - ErrNotDirectory = archive.ErrNotDirectory // Deprecated: use [archive.ErrNotDirectory] instead. - ErrDirNotExists = archive.ErrDirNotExists // Deprecated: use [archive.ErrDirNotExists] instead. - ErrCannotCopyDir = archive.ErrCannotCopyDir // Deprecated: use [archive.ErrCannotCopyDir] instead. - ErrInvalidCopySource = archive.ErrInvalidCopySource // Deprecated: use [archive.ErrInvalidCopySource] instead. -) - -// PreserveTrailingDotOrSeparator returns the given cleaned path. -// -// Deprecated: use [archive.PreserveTrailingDotOrSeparator] instead. -func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string) string { - return archive.PreserveTrailingDotOrSeparator(cleanedPath, originalPath) -} - -// SplitPathDirEntry splits the given path between its directory name and its -// basename. -// -// Deprecated: use [archive.SplitPathDirEntry] instead. -func SplitPathDirEntry(path string) (dir, base string) { - return archive.SplitPathDirEntry(path) -} - -// TarResource archives the resource described by the given CopyInfo to a Tar -// archive. -// -// Deprecated: use [archive.TarResource] instead. -func TarResource(sourceInfo archive.CopyInfo) (content io.ReadCloser, err error) { - return archive.TarResource(sourceInfo) -} - -// TarResourceRebase is like TarResource but renames the first path element of -// items in the resulting tar archive to match the given rebaseName if not "". -// -// Deprecated: use [archive.TarResourceRebase] instead. -func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, _ error) { - return archive.TarResourceRebase(sourcePath, rebaseName) -} - -// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase -// parameters to be sent to TarWithOptions. -// -// Deprecated: use [archive.TarResourceRebaseOpts] instead. -func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { - filter := []string{sourceBase} - return &TarOptions{ - Compression: compression.None, - IncludeFiles: filter, - IncludeSourceDir: true, - RebaseNames: map[string]string{ - sourceBase: rebaseName, - }, - } -} - -// CopyInfo holds basic info about the source or destination path of a copy operation. -// -// Deprecated: use [archive.CopyInfo] instead. -type CopyInfo = archive.CopyInfo - -// CopyInfoSourcePath stats the given path to create a CopyInfo struct. -// struct representing that resource for the source of an archive copy -// operation. -// -// Deprecated: use [archive.CopyInfoSourcePath] instead. -func CopyInfoSourcePath(path string, followLink bool) (archive.CopyInfo, error) { - return archive.CopyInfoSourcePath(path, followLink) -} - -// CopyInfoDestinationPath stats the given path to create a CopyInfo -// struct representing that resource for the destination of an archive copy -// operation. -// -// Deprecated: use [archive.CopyInfoDestinationPath] instead. -func CopyInfoDestinationPath(path string) (info archive.CopyInfo, err error) { - return archive.CopyInfoDestinationPath(path) -} - -// PrepareArchiveCopy prepares the given srcContent archive. -// -// Deprecated: use [archive.PrepareArchiveCopy] instead. -func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo archive.CopyInfo) (dstDir string, content io.ReadCloser, err error) { - return archive.PrepareArchiveCopy(srcContent, srcInfo, dstInfo) -} - -// RebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurrence of oldBase with newBase at the beginning of entry names. -// -// Deprecated: use [archive.RebaseArchiveEntries] instead. -func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { - return archive.RebaseArchiveEntries(srcContent, oldBase, newBase) -} - -// CopyResource performs an archive copy from the given source path to the -// given destination path. -// -// Deprecated: use [archive.CopyResource] instead. -func CopyResource(srcPath, dstPath string, followLink bool) error { - return archive.CopyResource(srcPath, dstPath, followLink) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -// -// Deprecated: use [archive.CopyTo] instead. -func CopyTo(content io.Reader, srcInfo archive.CopyInfo, dstPath string) error { - return archive.CopyTo(content, srcInfo, dstPath) -} - -// ResolveHostSourcePath decides real path need to be copied. -// -// Deprecated: use [archive.ResolveHostSourcePath] instead. -func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, _ error) { - return archive.ResolveHostSourcePath(path, followLink) -} - -// GetRebaseName normalizes and compares path and resolvedPath. -// -// Deprecated: use [archive.GetRebaseName] instead. -func GetRebaseName(path, resolvedPath string) (string, string) { - return archive.GetRebaseName(path, resolvedPath) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go deleted file mode 100644 index dd5e0d5ef..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go +++ /dev/null @@ -1,37 +0,0 @@ -package archive - -import ( - "io" - - "github.com/moby/go-archive" -) - -// UnpackLayer unpack `layer` to a `dest`. -// -// Deprecated: use [archive.UnpackLayer] instead. -func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { - return archive.UnpackLayer(dest, layer, toArchiveOpt(options)) -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. -// -// Deprecated: use [archive.ApplyLayer] instead. -func ApplyLayer(dest string, layer io.Reader) (int64, error) { - return archive.ApplyLayer(dest, layer) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. -// -// Deprecated: use [archive.ApplyUncompressedLayer] instead. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { - return archive.ApplyUncompressedLayer(dest, layer, toArchiveOpt(options)) -} - -// IsEmpty checks if the tar archive is empty (doesn't contain any entries). -// -// Deprecated: use [archive.IsEmpty] instead. -func IsEmpty(rd io.Reader) (bool, error) { - return archive.IsEmpty(rd) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/path_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/path_deprecated.go deleted file mode 100644 index 0fa74de68..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/path_deprecated.go +++ /dev/null @@ -1,10 +0,0 @@ -package archive - -import "github.com/moby/go-archive" - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path is the system drive. -// -// Deprecated: use [archive.CheckSystemDriveAndRemoveDriveLetter] instead. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return archive.CheckSystemDriveAndRemoveDriveLetter(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/utils.go b/vendor/github.com/docker/docker/pkg/archive/utils.go deleted file mode 100644 index 692cf1602..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/utils.go +++ /dev/null @@ -1,42 +0,0 @@ -package archive - -import ( - "github.com/docker/docker/pkg/idtools" - "github.com/moby/go-archive" -) - -// ToArchiveOpt converts an [TarOptions] to a [archive.TarOptions]. -// -// Deprecated: use [archive.TarOptions] instead, this utility is for internal use to transition to the [github.com/moby/go-archive] module. -func ToArchiveOpt(options *TarOptions) *archive.TarOptions { - return toArchiveOpt(options) -} - -func toArchiveOpt(options *TarOptions) *archive.TarOptions { - if options == nil { - return nil - } - - var chownOpts *archive.ChownOpts - if options.ChownOpts != nil { - chownOpts = &archive.ChownOpts{ - UID: options.ChownOpts.UID, - GID: options.ChownOpts.GID, - } - } - - return &archive.TarOptions{ - IncludeFiles: options.IncludeFiles, - ExcludePatterns: options.ExcludePatterns, - Compression: options.Compression, - NoLchown: options.NoLchown, - IDMap: idtools.ToUserIdentityMapping(options.IDMap), - ChownOpts: chownOpts, - IncludeSourceDir: options.IncludeSourceDir, - WhiteoutFormat: options.WhiteoutFormat, - NoOverwriteDirNonDir: options.NoOverwriteDirNonDir, - RebaseNames: options.RebaseNames, - InUserNS: options.InUserNS, - BestEffortXattrs: options.BestEffortXattrs, - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go deleted file mode 100644 index 0ab8590b1..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go +++ /dev/null @@ -1,10 +0,0 @@ -package archive - -import "github.com/moby/go-archive" - -const ( - WhiteoutPrefix = archive.WhiteoutPrefix // Deprecated: use [archive.WhiteoutPrefix] instead. - WhiteoutMetaPrefix = archive.WhiteoutMetaPrefix // Deprecated: use [archive.WhiteoutMetaPrefix] instead. - WhiteoutLinkDir = archive.WhiteoutLinkDir // Deprecated: use [archive.WhiteoutLinkDir] instead. - WhiteoutOpaqueDir = archive.WhiteoutOpaqueDir // Deprecated: use [archive.WhiteoutOpaqueDir] instead. -) diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go deleted file mode 100644 index e5d3fa9a9..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go +++ /dev/null @@ -1,14 +0,0 @@ -package archive - -import ( - "io" - - "github.com/moby/go-archive" -) - -// Generate generates a new archive from the content provided as input. -// -// Deprecated: use [archive.Generate] instead. -func Generate(input ...string) (io.Reader, error) { - return archive.Generate(input...) -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go deleted file mode 100644 index 982f81d4f..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ /dev/null @@ -1,223 +0,0 @@ -package idtools - -import ( - "fmt" - "os" - - "github.com/moby/sys/user" -) - -// IDMap contains a single entry for user namespace range remapping. An array -// of IDMap entries represents the structure that will be provided to the Linux -// kernel for creating a user namespace. -// -// Deprecated: use [user.IDMap] instead. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -// MkdirAllAndChown creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership and permissions. -// -// Deprecated: use [user.MkdirAllAndChown] instead. -func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error { - return user.MkdirAllAndChown(path, mode, owner.UID, owner.GID) -} - -// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership and permissions. -// Note that unlike os.Mkdir(), this function does not return IsExist error -// in case path already exists. -// -// Deprecated: use [user.MkdirAndChown] instead. -func MkdirAndChown(path string, mode os.FileMode, owner Identity) error { - return user.MkdirAndChown(path, mode, owner.UID, owner.GID) -} - -// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership or permissions will be performed -// -// Deprecated: use [user.MkdirAllAndChown] with the [user.WithOnlyNew] option instead. -func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error { - return user.MkdirAllAndChown(path, mode, owner.UID, owner.GID, user.WithOnlyNew) -} - -// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -// -// Deprecated: use [(user.IdentityMapping).RootPair] instead. -func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - return getRootUIDGID(uidMap, gidMap) -} - -// getRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func getRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - uid, err := toHost(0, uidMap) - if err != nil { - return -1, -1, err - } - gid, err := toHost(0, gidMap) - if err != nil { - return -1, -1, err - } - return uid, gid, nil -} - -// toContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func toContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { - contID := m.ContainerID + (hostID - m.HostID) - return contID, nil - } - } - return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) -} - -// toHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func toHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (contID - m.ContainerID) - return hostID, nil - } - } - return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) -} - -// Identity is either a UID and GID pair or a SID (but not both) -type Identity struct { - UID int - GID int - SID string -} - -// Chown changes the numeric uid and gid of the named file to id.UID and id.GID. -// -// Deprecated: this method is deprecated and will be removed in the next release. -func (id Identity) Chown(name string) error { - return os.Chown(name, id.UID, id.GID) -} - -// IdentityMapping contains a mappings of UIDs and GIDs. -// The zero value represents an empty mapping. -// -// Deprecated: this type is deprecated and will be removed in the next release. -type IdentityMapping struct { - UIDMaps []IDMap `json:"UIDMaps"` - GIDMaps []IDMap `json:"GIDMaps"` -} - -// FromUserIdentityMapping converts a [user.IdentityMapping] to an [idtools.IdentityMapping]. -// -// Deprecated: use [user.IdentityMapping] directly, this is transitioning to user package. -func FromUserIdentityMapping(u user.IdentityMapping) IdentityMapping { - return IdentityMapping{ - UIDMaps: fromUserIDMap(u.UIDMaps), - GIDMaps: fromUserIDMap(u.GIDMaps), - } -} - -func fromUserIDMap(u []user.IDMap) []IDMap { - if u == nil { - return nil - } - m := make([]IDMap, len(u)) - for i := range u { - m[i] = IDMap{ - ContainerID: int(u[i].ID), - HostID: int(u[i].ParentID), - Size: int(u[i].Count), - } - } - return m -} - -// ToUserIdentityMapping converts an [idtools.IdentityMapping] to a [user.IdentityMapping]. -// -// Deprecated: use [user.IdentityMapping] directly, this is transitioning to user package. -func ToUserIdentityMapping(u IdentityMapping) user.IdentityMapping { - return user.IdentityMapping{ - UIDMaps: toUserIDMap(u.UIDMaps), - GIDMaps: toUserIDMap(u.GIDMaps), - } -} - -func toUserIDMap(u []IDMap) []user.IDMap { - if u == nil { - return nil - } - m := make([]user.IDMap, len(u)) - for i := range u { - m[i] = user.IDMap{ - ID: int64(u[i].ContainerID), - ParentID: int64(u[i].HostID), - Count: int64(u[i].Size), - } - } - return m -} - -// RootPair returns a uid and gid pair for the root user. The error is ignored -// because a root user always exists, and the defaults are correct when the uid -// and gid maps are empty. -func (i IdentityMapping) RootPair() Identity { - uid, gid, _ := getRootUIDGID(i.UIDMaps, i.GIDMaps) - return Identity{UID: uid, GID: gid} -} - -// ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids -func (i IdentityMapping) ToHost(pair Identity) (Identity, error) { - var err error - target := i.RootPair() - - if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.UIDMaps) - if err != nil { - return target, err - } - } - - if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.GIDMaps) - } - return target, err -} - -// ToContainer returns the container UID and GID for the host uid and gid -func (i IdentityMapping) ToContainer(pair Identity) (int, int, error) { - uid, err := toContainer(pair.UID, i.UIDMaps) - if err != nil { - return -1, -1, err - } - gid, err := toContainer(pair.GID, i.GIDMaps) - return uid, gid, err -} - -// Empty returns true if there are no id mappings -func (i IdentityMapping) Empty() bool { - return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0 -} - -// CurrentIdentity returns the identity of the current process -// -// Deprecated: use [os.Getuid] and [os.Getegid] instead. -func CurrentIdentity() Identity { - return Identity{UID: os.Getuid(), GID: os.Getegid()} -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go deleted file mode 100644 index f83f59f30..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -package idtools - -const ( - SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" -) - -// TODO(thaJeztah): these magic consts need a source of reference, and should be defined in a canonical location -const ( - ContainerAdministratorSidString = "S-1-5-93-2-1" - - ContainerUserSidString = "S-1-5-93-2-2" -) diff --git a/vendor/github.com/ebitengine/purego/.gitignore b/vendor/github.com/ebitengine/purego/.gitignore new file mode 100644 index 000000000..b25c15b81 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/.gitignore @@ -0,0 +1 @@ +*~ diff --git a/vendor/github.com/ebitengine/purego/LICENSE b/vendor/github.com/ebitengine/purego/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/ebitengine/purego/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ebitengine/purego/README.md b/vendor/github.com/ebitengine/purego/README.md new file mode 100644 index 000000000..f1ff9053a --- /dev/null +++ b/vendor/github.com/ebitengine/purego/README.md @@ -0,0 +1,97 @@ +# purego +[![Go Reference](https://pkg.go.dev/badge/github.com/ebitengine/purego?GOOS=darwin.svg)](https://pkg.go.dev/github.com/ebitengine/purego?GOOS=darwin) + +A library for calling C functions from Go without Cgo. + +> This is beta software so expect bugs and potentially API breaking changes +> but each release will be tagged to avoid breaking people's code. +> Bug reports are encouraged. + +## Motivation + +The [Ebitengine](https://github.com/hajimehoshi/ebiten) game engine was ported to use only Go on Windows. This enabled +cross-compiling to Windows from any other operating system simply by setting `GOOS=windows`. The purego project was +born to bring that same vision to the other platforms supported by Ebitengine. + +## Benefits + +- **Simple Cross-Compilation**: No C means you can build for other platforms easily without a C compiler. +- **Faster Compilation**: Efficiently cache your entirely Go builds. +- **Smaller Binaries**: Using Cgo generates a C wrapper function for each C function called. Purego doesn't! +- **Dynamic Linking**: Load symbols at runtime and use it as a plugin system. +- **Foreign Function Interface**: Call into other languages that are compiled into shared objects. +- **Cgo Fallback**: Works even with CGO_ENABLED=1 so incremental porting is possible. +This also means unsupported GOARCHs (freebsd/riscv64, linux/mips, etc.) will still work +except for float arguments and return values. + +## Supported Platforms + +- **FreeBSD**: amd64, arm64 +- **Linux**: amd64, arm64 +- **macOS / iOS**: amd64, arm64 +- **Windows**: 386*, amd64, arm*, arm64 + +`*` These architectures only support SyscallN and NewCallback + +## Example + +The example below only showcases purego use for macOS and Linux. The other platforms require special handling which can +be seen in the complete example at [examples/libc](https://github.com/ebitengine/purego/tree/main/examples/libc) which supports Windows and FreeBSD. + +```go +package main + +import ( + "fmt" + "runtime" + + "github.com/ebitengine/purego" +) + +func getSystemLibrary() string { + switch runtime.GOOS { + case "darwin": + return "/usr/lib/libSystem.B.dylib" + case "linux": + return "libc.so.6" + default: + panic(fmt.Errorf("GOOS=%s is not supported", runtime.GOOS)) + } +} + +func main() { + libc, err := purego.Dlopen(getSystemLibrary(), purego.RTLD_NOW|purego.RTLD_GLOBAL) + if err != nil { + panic(err) + } + var puts func(string) + purego.RegisterLibFunc(&puts, libc, "puts") + puts("Calling C from Go without Cgo!") +} +``` + +Then to run: `CGO_ENABLED=0 go run main.go` + +## Questions + +If you have questions about how to incorporate purego in your project or want to discuss +how it works join the [Discord](https://discord.gg/HzGZVD6BkY)! + +### External Code + +Purego uses code that originates from the Go runtime. These files are under the BSD-3 +License that can be found [in the Go Source](https://github.com/golang/go/blob/master/LICENSE). +This is a list of the copied files: + +* `abi_*.h` from package `runtime/cgo` +* `zcallback_darwin_*.s` from package `runtime` +* `internal/fakecgo/abi_*.h` from package `runtime/cgo` +* `internal/fakecgo/asm_GOARCH.s` from package `runtime/cgo` +* `internal/fakecgo/callbacks.go` from package `runtime/cgo` +* `internal/fakecgo/go_GOOS_GOARCH.go` from package `runtime/cgo` +* `internal/fakecgo/iscgo.go` from package `runtime/cgo` +* `internal/fakecgo/setenv.go` from package `runtime/cgo` +* `internal/fakecgo/freebsd.go` from package `runtime/cgo` + +The files `abi_*.h` and `internal/fakecgo/abi_*.h` are the same because Bazel does not support cross-package use of +`#include` so we need each one once per package. (cf. [issue](https://github.com/bazelbuild/rules_go/issues/3636)) diff --git a/vendor/github.com/ebitengine/purego/abi_amd64.h b/vendor/github.com/ebitengine/purego/abi_amd64.h new file mode 100644 index 000000000..9949435fe --- /dev/null +++ b/vendor/github.com/ebitengine/purego/abi_amd64.h @@ -0,0 +1,99 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These save the frame pointer, so in general, functions that use +// these should have zero frame size to suppress the automatic frame +// pointer, though it's harmless to not do this. + +#ifdef GOOS_windows + +// REGS_HOST_TO_ABI0_STACK is the stack bytes used by +// PUSH_REGS_HOST_TO_ABI0. +#define REGS_HOST_TO_ABI0_STACK (28*8 + 8) + +// PUSH_REGS_HOST_TO_ABI0 prepares for transitioning from +// the host ABI to Go ABI0 code. It saves all registers that are +// callee-save in the host ABI and caller-save in Go ABI0 and prepares +// for entry to Go. +// +// Save DI SI BP BX R12 R13 R14 R15 X6-X15 registers and the DF flag. +// Clear the DF flag for the Go ABI. +// MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +#define PUSH_REGS_HOST_TO_ABI0() \ + PUSHFQ \ + CLD \ + ADJSP $(REGS_HOST_TO_ABI0_STACK - 8) \ + MOVQ DI, (0*0)(SP) \ + MOVQ SI, (1*8)(SP) \ + MOVQ BP, (2*8)(SP) \ + MOVQ BX, (3*8)(SP) \ + MOVQ R12, (4*8)(SP) \ + MOVQ R13, (5*8)(SP) \ + MOVQ R14, (6*8)(SP) \ + MOVQ R15, (7*8)(SP) \ + MOVUPS X6, (8*8)(SP) \ + MOVUPS X7, (10*8)(SP) \ + MOVUPS X8, (12*8)(SP) \ + MOVUPS X9, (14*8)(SP) \ + MOVUPS X10, (16*8)(SP) \ + MOVUPS X11, (18*8)(SP) \ + MOVUPS X12, (20*8)(SP) \ + MOVUPS X13, (22*8)(SP) \ + MOVUPS X14, (24*8)(SP) \ + MOVUPS X15, (26*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*0)(SP), DI \ + MOVQ (1*8)(SP), SI \ + MOVQ (2*8)(SP), BP \ + MOVQ (3*8)(SP), BX \ + MOVQ (4*8)(SP), R12 \ + MOVQ (5*8)(SP), R13 \ + MOVQ (6*8)(SP), R14 \ + MOVQ (7*8)(SP), R15 \ + MOVUPS (8*8)(SP), X6 \ + MOVUPS (10*8)(SP), X7 \ + MOVUPS (12*8)(SP), X8 \ + MOVUPS (14*8)(SP), X9 \ + MOVUPS (16*8)(SP), X10 \ + MOVUPS (18*8)(SP), X11 \ + MOVUPS (20*8)(SP), X12 \ + MOVUPS (22*8)(SP), X13 \ + MOVUPS (24*8)(SP), X14 \ + MOVUPS (26*8)(SP), X15 \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK - 8) \ + POPFQ + +#else +// SysV ABI + +#define REGS_HOST_TO_ABI0_STACK (6*8) + +// SysV MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +// Both SysV and Go require DF to be cleared, so that's already clear. +// The SysV and Go frame pointer conventions are compatible. +#define PUSH_REGS_HOST_TO_ABI0() \ + ADJSP $(REGS_HOST_TO_ABI0_STACK) \ + MOVQ BP, (5*8)(SP) \ + LEAQ (5*8)(SP), BP \ + MOVQ BX, (0*8)(SP) \ + MOVQ R12, (1*8)(SP) \ + MOVQ R13, (2*8)(SP) \ + MOVQ R14, (3*8)(SP) \ + MOVQ R15, (4*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*8)(SP), BX \ + MOVQ (1*8)(SP), R12 \ + MOVQ (2*8)(SP), R13 \ + MOVQ (3*8)(SP), R14 \ + MOVQ (4*8)(SP), R15 \ + MOVQ (5*8)(SP), BP \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK) + +#endif diff --git a/vendor/github.com/ebitengine/purego/abi_arm64.h b/vendor/github.com/ebitengine/purego/abi_arm64.h new file mode 100644 index 000000000..5d5061ec1 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/abi_arm64.h @@ -0,0 +1,39 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These macros save and restore the callee-saved registers +// from the stack, but they don't adjust stack pointer, so +// the user should prepare stack space in advance. +// SAVE_R19_TO_R28(offset) saves R19 ~ R28 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+9*8)(RSP). +// +// SAVE_F8_TO_F15(offset) saves F8 ~ F15 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+7*8)(RSP). +// +// R29 is not saved because Go will save and restore it. + +#define SAVE_R19_TO_R28(offset) \ + STP (R19, R20), ((offset)+0*8)(RSP) \ + STP (R21, R22), ((offset)+2*8)(RSP) \ + STP (R23, R24), ((offset)+4*8)(RSP) \ + STP (R25, R26), ((offset)+6*8)(RSP) \ + STP (R27, g), ((offset)+8*8)(RSP) +#define RESTORE_R19_TO_R28(offset) \ + LDP ((offset)+0*8)(RSP), (R19, R20) \ + LDP ((offset)+2*8)(RSP), (R21, R22) \ + LDP ((offset)+4*8)(RSP), (R23, R24) \ + LDP ((offset)+6*8)(RSP), (R25, R26) \ + LDP ((offset)+8*8)(RSP), (R27, g) /* R28 */ +#define SAVE_F8_TO_F15(offset) \ + FSTPD (F8, F9), ((offset)+0*8)(RSP) \ + FSTPD (F10, F11), ((offset)+2*8)(RSP) \ + FSTPD (F12, F13), ((offset)+4*8)(RSP) \ + FSTPD (F14, F15), ((offset)+6*8)(RSP) +#define RESTORE_F8_TO_F15(offset) \ + FLDPD ((offset)+0*8)(RSP), (F8, F9) \ + FLDPD ((offset)+2*8)(RSP), (F10, F11) \ + FLDPD ((offset)+4*8)(RSP), (F12, F13) \ + FLDPD ((offset)+6*8)(RSP), (F14, F15) diff --git a/vendor/github.com/ebitengine/purego/cgo.go b/vendor/github.com/ebitengine/purego/cgo.go new file mode 100644 index 000000000..7d5abef34 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/cgo.go @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build cgo && (darwin || freebsd || linux) + +package purego + +// if CGO_ENABLED=1 import the Cgo runtime to ensure that it is set up properly. +// This is required since some frameworks need TLS setup the C way which Go doesn't do. +// We currently don't support ios in fakecgo mode so force Cgo or fail +// Even if CGO_ENABLED=1 the Cgo runtime is not imported unless `import "C"` is used. +// which will import this package automatically. Normally this isn't an issue since it +// usually isn't possible to call into C without using that import. However, with purego +// it is since we don't use `import "C"`! +import ( + _ "runtime/cgo" + + _ "github.com/ebitengine/purego/internal/cgo" +) diff --git a/vendor/github.com/ebitengine/purego/dlerror.go b/vendor/github.com/ebitengine/purego/dlerror.go new file mode 100644 index 000000000..95cdfe16f --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlerror.go @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 The Ebitengine Authors + +//go:build darwin || freebsd || linux + +package purego + +// Dlerror represents an error value returned from Dlopen, Dlsym, or Dlclose. +// +// This type is not available on Windows as there is no counterpart to it on Windows. +type Dlerror struct { + s string +} + +func (e Dlerror) Error() string { + return e.s +} diff --git a/vendor/github.com/ebitengine/purego/dlfcn.go b/vendor/github.com/ebitengine/purego/dlfcn.go new file mode 100644 index 000000000..cd1bf293c --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn.go @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build (darwin || freebsd || linux) && !android && !faketime + +package purego + +import ( + "unsafe" +) + +// Unix Specification for dlfcn.h: https://pubs.opengroup.org/onlinepubs/7908799/xsh/dlfcn.h.html + +var ( + fnDlopen func(path string, mode int) uintptr + fnDlsym func(handle uintptr, name string) uintptr + fnDlerror func() string + fnDlclose func(handle uintptr) bool +) + +func init() { + RegisterFunc(&fnDlopen, dlopenABI0) + RegisterFunc(&fnDlsym, dlsymABI0) + RegisterFunc(&fnDlerror, dlerrorABI0) + RegisterFunc(&fnDlclose, dlcloseABI0) +} + +// Dlopen examines the dynamic library or bundle file specified by path. If the file is compatible +// with the current process and has not already been loaded into the +// current process, it is loaded and linked. After being linked, if it contains +// any initializer functions, they are called, before Dlopen +// returns. It returns a handle that can be used with Dlsym and Dlclose. +// A second call to Dlopen with the same path will return the same handle, but the internal +// reference count for the handle will be incremented. Therefore, all +// Dlopen calls should be balanced with a Dlclose call. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.LoadLibrary], [golang.org/x/sys/windows.LoadLibraryEx], +// [golang.org/x/sys/windows.NewLazyDLL], or [golang.org/x/sys/windows.NewLazySystemDLL] for Windows instead. +func Dlopen(path string, mode int) (uintptr, error) { + u := fnDlopen(path, mode) + if u == 0 { + return 0, Dlerror{fnDlerror()} + } + return u, nil +} + +// Dlsym takes a "handle" of a dynamic library returned by Dlopen and the symbol name. +// It returns the address where that symbol is loaded into memory. If the symbol is not found, +// in the specified library or any of the libraries that were automatically loaded by Dlopen +// when that library was loaded, Dlsym returns zero. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.GetProcAddress] for Windows instead. +func Dlsym(handle uintptr, name string) (uintptr, error) { + u := fnDlsym(handle, name) + if u == 0 { + return 0, Dlerror{fnDlerror()} + } + return u, nil +} + +// Dlclose decrements the reference count on the dynamic library handle. +// If the reference count drops to zero and no other loaded libraries +// use symbols in it, then the dynamic library is unloaded. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.FreeLibrary] for Windows instead. +func Dlclose(handle uintptr) error { + if fnDlclose(handle) { + return Dlerror{fnDlerror()} + } + return nil +} + +func loadSymbol(handle uintptr, name string) (uintptr, error) { + return Dlsym(handle, name) +} + +// these functions exist in dlfcn_stubs.s and are calling C functions linked to in dlfcn_GOOS.go +// the indirection is necessary because a function is actually a pointer to the pointer to the code. +// sadly, I do not know of anyway to remove the assembly stubs entirely because //go:linkname doesn't +// appear to work if you link directly to the C function on darwin arm64. + +//go:linkname dlopen dlopen +var dlopen uint8 +var dlopenABI0 = uintptr(unsafe.Pointer(&dlopen)) + +//go:linkname dlsym dlsym +var dlsym uint8 +var dlsymABI0 = uintptr(unsafe.Pointer(&dlsym)) + +//go:linkname dlclose dlclose +var dlclose uint8 +var dlcloseABI0 = uintptr(unsafe.Pointer(&dlclose)) + +//go:linkname dlerror dlerror +var dlerror uint8 +var dlerrorABI0 = uintptr(unsafe.Pointer(&dlerror)) diff --git a/vendor/github.com/ebitengine/purego/dlfcn_android.go b/vendor/github.com/ebitengine/purego/dlfcn_android.go new file mode 100644 index 000000000..0d5341764 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn_android.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package purego + +import "github.com/ebitengine/purego/internal/cgo" + +// Source for constants: https://android.googlesource.com/platform/bionic/+/refs/heads/main/libc/include/dlfcn.h + +const ( + is64bit = 1 << (^uintptr(0) >> 63) / 2 + is32bit = 1 - is64bit + RTLD_DEFAULT = is32bit * 0xffffffff + RTLD_LAZY = 0x00000001 + RTLD_NOW = is64bit * 0x00000002 + RTLD_LOCAL = 0x00000000 + RTLD_GLOBAL = is64bit*0x00100 | is32bit*0x00000002 +) + +func Dlopen(path string, mode int) (uintptr, error) { + return cgo.Dlopen(path, mode) +} + +func Dlsym(handle uintptr, name string) (uintptr, error) { + return cgo.Dlsym(handle, name) +} + +func Dlclose(handle uintptr) error { + return cgo.Dlclose(handle) +} + +func loadSymbol(handle uintptr, name string) (uintptr, error) { + return Dlsym(handle, name) +} diff --git a/vendor/github.com/ebitengine/purego/dlfcn_darwin.go b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go new file mode 100644 index 000000000..27f560715 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package purego + +// Source for constants: https://opensource.apple.com/source/dyld/dyld-360.14/include/dlfcn.h.auto.html + +const ( + RTLD_DEFAULT = 1<<64 - 2 // Pseudo-handle for dlsym so search for any loaded symbol + RTLD_LAZY = 0x1 // Relocations are performed at an implementation-dependent time. + RTLD_NOW = 0x2 // Relocations are performed when the object is loaded. + RTLD_LOCAL = 0x4 // All symbols are not made available for relocation processing by other modules. + RTLD_GLOBAL = 0x8 // All symbols are available for relocation processing of other modules. +) + +//go:cgo_import_dynamic purego_dlopen dlopen "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlsym dlsym "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlerror dlerror "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlclose dlclose "/usr/lib/libSystem.B.dylib" diff --git a/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go b/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go new file mode 100644 index 000000000..6b371620d --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package purego + +// Constants as defined in https://github.com/freebsd/freebsd-src/blob/main/include/dlfcn.h +const ( + intSize = 32 << (^uint(0) >> 63) // 32 or 64 + RTLD_DEFAULT = 1< C) +// +// string <=> char* +// bool <=> _Bool +// uintptr <=> uintptr_t +// uint <=> uint32_t or uint64_t +// uint8 <=> uint8_t +// uint16 <=> uint16_t +// uint32 <=> uint32_t +// uint64 <=> uint64_t +// int <=> int32_t or int64_t +// int8 <=> int8_t +// int16 <=> int16_t +// int32 <=> int32_t +// int64 <=> int64_t +// float32 <=> float +// float64 <=> double +// struct <=> struct (WIP - darwin only) +// func <=> C function +// unsafe.Pointer, *T <=> void* +// []T => void* +// +// There is a special case when the last argument of fptr is a variadic interface (or []interface} +// it will be expanded into a call to the C function as if it had the arguments in that slice. +// This means that using arg ...interface{} is like a cast to the function with the arguments inside arg. +// This is not the same as C variadic. +// +// # Memory +// +// In general it is not possible for purego to guarantee the lifetimes of objects returned or received from +// calling functions using RegisterFunc. For arguments to a C function it is important that the C function doesn't +// hold onto a reference to Go memory. This is the same as the [Cgo rules]. +// +// However, there are some special cases. When passing a string as an argument if the string does not end in a null +// terminated byte (\x00) then the string will be copied into memory maintained by purego. The memory is only valid for +// that specific call. Therefore, if the C code keeps a reference to that string it may become invalid at some +// undefined time. However, if the string does already contain a null-terminated byte then no copy is done. +// It is then the responsibility of the caller to ensure the string stays alive as long as it's needed in C memory. +// This can be done using runtime.KeepAlive or allocating the string in C memory using malloc. When a C function +// returns a null-terminated pointer to char a Go string can be used. Purego will allocate a new string in Go memory +// and copy the data over. This string will be garbage collected whenever Go decides it's no longer referenced. +// This C created string will not be freed by purego. If the pointer to char is not null-terminated or must continue +// to point to C memory (because it's a buffer for example) then use a pointer to byte and then convert that to a slice +// using unsafe.Slice. Doing this means that it becomes the responsibility of the caller to care about the lifetime +// of the pointer +// +// # Structs +// +// Purego can handle the most common structs that have fields of builtin types like int8, uint16, float32, etc. However, +// it does not support aligning fields properly. It is therefore the responsibility of the caller to ensure +// that all padding is added to the Go struct to match the C one. See `BoolStructFn` in struct_test.go for an example. +// +// # Example +// +// All functions below call this C function: +// +// char *foo(char *str); +// +// // Let purego convert types +// var foo func(s string) string +// goString := foo("copied") +// // Go will garbage collect this string +// +// // Manually, handle allocations +// var foo2 func(b string) *byte +// mustFree := foo2("not copied\x00") +// defer free(mustFree) +// +// [Cgo rules]: https://pkg.go.dev/cmd/cgo#hdr-Go_references_to_C +func RegisterFunc(fptr interface{}, cfn uintptr) { + fn := reflect.ValueOf(fptr).Elem() + ty := fn.Type() + if ty.Kind() != reflect.Func { + panic("purego: fptr must be a function pointer") + } + if ty.NumOut() > 1 { + panic("purego: function can only return zero or one values") + } + if cfn == 0 { + panic("purego: cfn is nil") + } + if ty.NumOut() == 1 && (ty.Out(0).Kind() == reflect.Float32 || ty.Out(0).Kind() == reflect.Float64) && + runtime.GOARCH != "arm64" && runtime.GOARCH != "amd64" { + panic("purego: float returns are not supported") + } + { + // this code checks how many registers and stack this function will use + // to avoid crashing with too many arguments + var ints int + var floats int + var stack int + for i := 0; i < ty.NumIn(); i++ { + arg := ty.In(i) + switch arg.Kind() { + case reflect.Func: + // This only does preliminary testing to ensure the CDecl argument + // is the first argument. Full testing is done when the callback is actually + // created in NewCallback. + for j := 0; j < arg.NumIn(); j++ { + in := arg.In(j) + if !in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + if j != 0 { + panic("purego: CDecl must be the first argument") + } + } + case reflect.String, reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Ptr, reflect.UnsafePointer, + reflect.Slice, reflect.Bool: + if ints < numOfIntegerRegisters() { + ints++ + } else { + stack++ + } + case reflect.Float32, reflect.Float64: + const is32bit = unsafe.Sizeof(uintptr(0)) == 4 + if is32bit { + panic("purego: floats only supported on 64bit platforms") + } + if floats < numOfFloats { + floats++ + } else { + stack++ + } + case reflect.Struct: + if runtime.GOOS != "darwin" || (runtime.GOARCH != "amd64" && runtime.GOARCH != "arm64") { + panic("purego: struct arguments are only supported on darwin amd64 & arm64") + } + if arg.Size() == 0 { + continue + } + addInt := func(u uintptr) { + ints++ + } + addFloat := func(u uintptr) { + floats++ + } + addStack := func(u uintptr) { + stack++ + } + _ = addStruct(reflect.New(arg).Elem(), &ints, &floats, &stack, addInt, addFloat, addStack, nil) + default: + panic("purego: unsupported kind " + arg.Kind().String()) + } + } + if ty.NumOut() == 1 && ty.Out(0).Kind() == reflect.Struct { + if runtime.GOOS != "darwin" { + panic("purego: struct return values only supported on darwin arm64 & amd64") + } + outType := ty.Out(0) + checkStructFieldsSupported(outType) + if runtime.GOARCH == "amd64" && outType.Size() > maxRegAllocStructSize { + // on amd64 if struct is bigger than 16 bytes allocate the return struct + // and pass it in as a hidden first argument. + ints++ + } + } + sizeOfStack := maxArgs - numOfIntegerRegisters() + if stack > sizeOfStack { + panic("purego: too many arguments") + } + } + v := reflect.MakeFunc(ty, func(args []reflect.Value) (results []reflect.Value) { + if len(args) > 0 { + if variadic, ok := args[len(args)-1].Interface().([]interface{}); ok { + // subtract one from args bc the last argument in args is []interface{} + // which we are currently expanding + tmp := make([]reflect.Value, len(args)-1+len(variadic)) + n := copy(tmp, args[:len(args)-1]) + for i, v := range variadic { + tmp[n+i] = reflect.ValueOf(v) + } + args = tmp + } + } + var sysargs [maxArgs]uintptr + stack := sysargs[numOfIntegerRegisters():] + var floats [numOfFloats]uintptr + var numInts int + var numFloats int + var numStack int + var addStack, addInt, addFloat func(x uintptr) + if runtime.GOARCH == "arm64" || runtime.GOOS != "windows" { + // Windows arm64 uses the same calling convention as macOS and Linux + addStack = func(x uintptr) { + stack[numStack] = x + numStack++ + } + addInt = func(x uintptr) { + if numInts >= numOfIntegerRegisters() { + addStack(x) + } else { + sysargs[numInts] = x + numInts++ + } + } + addFloat = func(x uintptr) { + if numFloats < len(floats) { + floats[numFloats] = x + numFloats++ + } else { + addStack(x) + } + } + } else { + // On Windows amd64 the arguments are passed in the numbered registered. + // So the first int is in the first integer register and the first float + // is in the second floating register if there is already a first int. + // This is in contrast to how macOS and Linux pass arguments which + // tries to use as many registers as possible in the calling convention. + addStack = func(x uintptr) { + sysargs[numStack] = x + numStack++ + } + addInt = addStack + addFloat = addStack + } + + var keepAlive []interface{} + defer func() { + runtime.KeepAlive(keepAlive) + runtime.KeepAlive(args) + }() + var syscall syscall15Args + if ty.NumOut() == 1 && ty.Out(0).Kind() == reflect.Struct { + outType := ty.Out(0) + if runtime.GOARCH == "amd64" && outType.Size() > maxRegAllocStructSize { + val := reflect.New(outType) + keepAlive = append(keepAlive, val) + addInt(val.Pointer()) + } else if runtime.GOARCH == "arm64" && outType.Size() > maxRegAllocStructSize { + isAllFloats, numFields := isAllSameFloat(outType) + if !isAllFloats || numFields > 4 { + val := reflect.New(outType) + keepAlive = append(keepAlive, val) + syscall.arm64_r8 = val.Pointer() + } + } + } + for _, v := range args { + switch v.Kind() { + case reflect.String: + ptr := strings.CString(v.String()) + keepAlive = append(keepAlive, ptr) + addInt(uintptr(unsafe.Pointer(ptr))) + case reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + addInt(uintptr(v.Uint())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + addInt(uintptr(v.Int())) + case reflect.Ptr, reflect.UnsafePointer, reflect.Slice: + // There is no need to keepAlive this pointer separately because it is kept alive in the args variable + addInt(v.Pointer()) + case reflect.Func: + addInt(NewCallback(v.Interface())) + case reflect.Bool: + if v.Bool() { + addInt(1) + } else { + addInt(0) + } + case reflect.Float32: + addFloat(uintptr(math.Float32bits(float32(v.Float())))) + case reflect.Float64: + addFloat(uintptr(math.Float64bits(v.Float()))) + case reflect.Struct: + keepAlive = addStruct(v, &numInts, &numFloats, &numStack, addInt, addFloat, addStack, keepAlive) + default: + panic("purego: unsupported kind: " + v.Kind().String()) + } + } + if runtime.GOARCH == "arm64" || runtime.GOOS != "windows" { + // Use the normal arm64 calling convention even on Windows + syscall = syscall15Args{ + cfn, + sysargs[0], sysargs[1], sysargs[2], sysargs[3], sysargs[4], sysargs[5], + sysargs[6], sysargs[7], sysargs[8], sysargs[9], sysargs[10], sysargs[11], + sysargs[12], sysargs[13], sysargs[14], + floats[0], floats[1], floats[2], floats[3], floats[4], floats[5], floats[6], floats[7], + syscall.arm64_r8, + } + runtime_cgocall(syscall15XABI0, unsafe.Pointer(&syscall)) + } else { + // This is a fallback for Windows amd64, 386, and arm. Note this may not support floats + syscall.a1, syscall.a2, _ = syscall_syscall15X(cfn, sysargs[0], sysargs[1], sysargs[2], sysargs[3], sysargs[4], + sysargs[5], sysargs[6], sysargs[7], sysargs[8], sysargs[9], sysargs[10], sysargs[11], + sysargs[12], sysargs[13], sysargs[14]) + syscall.f1 = syscall.a2 // on amd64 a2 stores the float return. On 32bit platforms floats aren't support + } + if ty.NumOut() == 0 { + return nil + } + outType := ty.Out(0) + v := reflect.New(outType).Elem() + switch outType.Kind() { + case reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v.SetUint(uint64(syscall.a1)) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v.SetInt(int64(syscall.a1)) + case reflect.Bool: + v.SetBool(byte(syscall.a1) != 0) + case reflect.UnsafePointer: + // We take the address and then dereference it to trick go vet from creating a possible miss-use of unsafe.Pointer + v.SetPointer(*(*unsafe.Pointer)(unsafe.Pointer(&syscall.a1))) + case reflect.Ptr: + v = reflect.NewAt(outType, unsafe.Pointer(&syscall.a1)).Elem() + case reflect.Func: + // wrap this C function in a nicely typed Go function + v = reflect.New(outType) + RegisterFunc(v.Interface(), syscall.a1) + case reflect.String: + v.SetString(strings.GoString(syscall.a1)) + case reflect.Float32: + // NOTE: syscall.r2 is only the floating return value on 64bit platforms. + // On 32bit platforms syscall.r2 is the upper part of a 64bit return. + v.SetFloat(float64(math.Float32frombits(uint32(syscall.f1)))) + case reflect.Float64: + // NOTE: syscall.r2 is only the floating return value on 64bit platforms. + // On 32bit platforms syscall.r2 is the upper part of a 64bit return. + v.SetFloat(math.Float64frombits(uint64(syscall.f1))) + case reflect.Struct: + v = getStruct(outType, syscall) + default: + panic("purego: unsupported return kind: " + outType.Kind().String()) + } + return []reflect.Value{v} + }) + fn.Set(v) +} + +// maxRegAllocStructSize is the biggest a struct can be while still fitting in registers. +// if it is bigger than this than enough space must be allocated on the heap and then passed into +// the function as the first parameter on amd64 or in R8 on arm64. +// +// If you change this make sure to update it in objc_runtime_darwin.go +const maxRegAllocStructSize = 16 + +func isAllSameFloat(ty reflect.Type) (allFloats bool, numFields int) { + allFloats = true + root := ty.Field(0).Type + for root.Kind() == reflect.Struct { + root = root.Field(0).Type + } + first := root.Kind() + if first != reflect.Float32 && first != reflect.Float64 { + allFloats = false + } + for i := 0; i < ty.NumField(); i++ { + f := ty.Field(i).Type + if f.Kind() == reflect.Struct { + var structNumFields int + allFloats, structNumFields = isAllSameFloat(f) + numFields += structNumFields + continue + } + numFields++ + if f.Kind() != first { + allFloats = false + } + } + return allFloats, numFields +} + +func checkStructFieldsSupported(ty reflect.Type) { + for i := 0; i < ty.NumField(); i++ { + f := ty.Field(i).Type + if f.Kind() == reflect.Array { + f = f.Elem() + } else if f.Kind() == reflect.Struct { + checkStructFieldsSupported(f) + continue + } + switch f.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Uintptr, reflect.Ptr, reflect.UnsafePointer, reflect.Float64, reflect.Float32: + default: + panic(fmt.Sprintf("purego: struct field type %s is not supported", f)) + } + } +} + +func roundUpTo8(val uintptr) uintptr { + return (val + 7) &^ 7 +} + +func numOfIntegerRegisters() int { + switch runtime.GOARCH { + case "arm64": + return 8 + case "amd64": + return 6 + default: + // since this platform isn't supported and can therefore only access + // integer registers it is fine to return the maxArgs + return maxArgs + } +} diff --git a/vendor/github.com/ebitengine/purego/go_runtime.go b/vendor/github.com/ebitengine/purego/go_runtime.go new file mode 100644 index 000000000..13671ff23 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/go_runtime.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux || windows + +package purego + +import ( + "unsafe" +) + +//go:linkname runtime_cgocall runtime.cgocall +func runtime_cgocall(fn uintptr, arg unsafe.Pointer) int32 // from runtime/sys_libc.go diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go b/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go new file mode 100644 index 000000000..b09ecac1c --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +//go:build freebsd || linux + +package cgo + +/* + #cgo LDFLAGS: -ldl + +#include +#include +*/ +import "C" + +import ( + "errors" + "unsafe" +) + +func Dlopen(filename string, flag int) (uintptr, error) { + cfilename := C.CString(filename) + defer C.free(unsafe.Pointer(cfilename)) + handle := C.dlopen(cfilename, C.int(flag)) + if handle == nil { + return 0, errors.New(C.GoString(C.dlerror())) + } + return uintptr(handle), nil +} + +func Dlsym(handle uintptr, symbol string) (uintptr, error) { + csymbol := C.CString(symbol) + defer C.free(unsafe.Pointer(csymbol)) + symbolAddr := C.dlsym(*(*unsafe.Pointer)(unsafe.Pointer(&handle)), csymbol) + if symbolAddr == nil { + return 0, errors.New(C.GoString(C.dlerror())) + } + return uintptr(symbolAddr), nil +} + +func Dlclose(handle uintptr) error { + result := C.dlclose(*(*unsafe.Pointer)(unsafe.Pointer(&handle))) + if result != 0 { + return errors.New(C.GoString(C.dlerror())) + } + return nil +} + +// all that is needed is to assign each dl function because then its +// symbol will then be made available to the linker and linked to inside dlfcn.go +var ( + _ = C.dlopen + _ = C.dlsym + _ = C.dlerror + _ = C.dlclose +) diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/empty.go b/vendor/github.com/ebitengine/purego/internal/cgo/empty.go new file mode 100644 index 000000000..1d7cffe2a --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/cgo/empty.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package cgo + +// Empty so that importing this package doesn't cause issue for certain platforms. diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go b/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go new file mode 100644 index 000000000..37ff24d5c --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build freebsd || (linux && !(arm64 || amd64)) + +package cgo + +// this file is placed inside internal/cgo and not package purego +// because Cgo and assembly files can't be in the same package. + +/* + #cgo LDFLAGS: -ldl + +#include +#include +#include +#include + +typedef struct syscall15Args { + uintptr_t fn; + uintptr_t a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15; + uintptr_t f1, f2, f3, f4, f5, f6, f7, f8; + uintptr_t err; +} syscall15Args; + +void syscall15(struct syscall15Args *args) { + assert((args->f1|args->f2|args->f3|args->f4|args->f5|args->f6|args->f7|args->f8) == 0); + uintptr_t (*func_name)(uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, + uintptr_t a7, uintptr_t a8, uintptr_t a9, uintptr_t a10, uintptr_t a11, uintptr_t a12, + uintptr_t a13, uintptr_t a14, uintptr_t a15); + *(void**)(&func_name) = (void*)(args->fn); + uintptr_t r1 = func_name(args->a1,args->a2,args->a3,args->a4,args->a5,args->a6,args->a7,args->a8,args->a9, + args->a10,args->a11,args->a12,args->a13,args->a14,args->a15); + args->a1 = r1; + args->err = errno; +} + +*/ +import "C" +import "unsafe" + +// assign purego.syscall15XABI0 to the C version of this function. +var Syscall15XABI0 = unsafe.Pointer(C.syscall15) + +//go:nosplit +func Syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + args := C.syscall15Args{ + C.uintptr_t(fn), C.uintptr_t(a1), C.uintptr_t(a2), C.uintptr_t(a3), + C.uintptr_t(a4), C.uintptr_t(a5), C.uintptr_t(a6), + C.uintptr_t(a7), C.uintptr_t(a8), C.uintptr_t(a9), C.uintptr_t(a10), C.uintptr_t(a11), C.uintptr_t(a12), + C.uintptr_t(a13), C.uintptr_t(a14), C.uintptr_t(a15), 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + C.syscall15(&args) + return uintptr(args.a1), 0, uintptr(args.err) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h new file mode 100644 index 000000000..9949435fe --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h @@ -0,0 +1,99 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These save the frame pointer, so in general, functions that use +// these should have zero frame size to suppress the automatic frame +// pointer, though it's harmless to not do this. + +#ifdef GOOS_windows + +// REGS_HOST_TO_ABI0_STACK is the stack bytes used by +// PUSH_REGS_HOST_TO_ABI0. +#define REGS_HOST_TO_ABI0_STACK (28*8 + 8) + +// PUSH_REGS_HOST_TO_ABI0 prepares for transitioning from +// the host ABI to Go ABI0 code. It saves all registers that are +// callee-save in the host ABI and caller-save in Go ABI0 and prepares +// for entry to Go. +// +// Save DI SI BP BX R12 R13 R14 R15 X6-X15 registers and the DF flag. +// Clear the DF flag for the Go ABI. +// MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +#define PUSH_REGS_HOST_TO_ABI0() \ + PUSHFQ \ + CLD \ + ADJSP $(REGS_HOST_TO_ABI0_STACK - 8) \ + MOVQ DI, (0*0)(SP) \ + MOVQ SI, (1*8)(SP) \ + MOVQ BP, (2*8)(SP) \ + MOVQ BX, (3*8)(SP) \ + MOVQ R12, (4*8)(SP) \ + MOVQ R13, (5*8)(SP) \ + MOVQ R14, (6*8)(SP) \ + MOVQ R15, (7*8)(SP) \ + MOVUPS X6, (8*8)(SP) \ + MOVUPS X7, (10*8)(SP) \ + MOVUPS X8, (12*8)(SP) \ + MOVUPS X9, (14*8)(SP) \ + MOVUPS X10, (16*8)(SP) \ + MOVUPS X11, (18*8)(SP) \ + MOVUPS X12, (20*8)(SP) \ + MOVUPS X13, (22*8)(SP) \ + MOVUPS X14, (24*8)(SP) \ + MOVUPS X15, (26*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*0)(SP), DI \ + MOVQ (1*8)(SP), SI \ + MOVQ (2*8)(SP), BP \ + MOVQ (3*8)(SP), BX \ + MOVQ (4*8)(SP), R12 \ + MOVQ (5*8)(SP), R13 \ + MOVQ (6*8)(SP), R14 \ + MOVQ (7*8)(SP), R15 \ + MOVUPS (8*8)(SP), X6 \ + MOVUPS (10*8)(SP), X7 \ + MOVUPS (12*8)(SP), X8 \ + MOVUPS (14*8)(SP), X9 \ + MOVUPS (16*8)(SP), X10 \ + MOVUPS (18*8)(SP), X11 \ + MOVUPS (20*8)(SP), X12 \ + MOVUPS (22*8)(SP), X13 \ + MOVUPS (24*8)(SP), X14 \ + MOVUPS (26*8)(SP), X15 \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK - 8) \ + POPFQ + +#else +// SysV ABI + +#define REGS_HOST_TO_ABI0_STACK (6*8) + +// SysV MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +// Both SysV and Go require DF to be cleared, so that's already clear. +// The SysV and Go frame pointer conventions are compatible. +#define PUSH_REGS_HOST_TO_ABI0() \ + ADJSP $(REGS_HOST_TO_ABI0_STACK) \ + MOVQ BP, (5*8)(SP) \ + LEAQ (5*8)(SP), BP \ + MOVQ BX, (0*8)(SP) \ + MOVQ R12, (1*8)(SP) \ + MOVQ R13, (2*8)(SP) \ + MOVQ R14, (3*8)(SP) \ + MOVQ R15, (4*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*8)(SP), BX \ + MOVQ (1*8)(SP), R12 \ + MOVQ (2*8)(SP), R13 \ + MOVQ (3*8)(SP), R14 \ + MOVQ (4*8)(SP), R15 \ + MOVQ (5*8)(SP), BP \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK) + +#endif diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h new file mode 100644 index 000000000..5d5061ec1 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h @@ -0,0 +1,39 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These macros save and restore the callee-saved registers +// from the stack, but they don't adjust stack pointer, so +// the user should prepare stack space in advance. +// SAVE_R19_TO_R28(offset) saves R19 ~ R28 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+9*8)(RSP). +// +// SAVE_F8_TO_F15(offset) saves F8 ~ F15 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+7*8)(RSP). +// +// R29 is not saved because Go will save and restore it. + +#define SAVE_R19_TO_R28(offset) \ + STP (R19, R20), ((offset)+0*8)(RSP) \ + STP (R21, R22), ((offset)+2*8)(RSP) \ + STP (R23, R24), ((offset)+4*8)(RSP) \ + STP (R25, R26), ((offset)+6*8)(RSP) \ + STP (R27, g), ((offset)+8*8)(RSP) +#define RESTORE_R19_TO_R28(offset) \ + LDP ((offset)+0*8)(RSP), (R19, R20) \ + LDP ((offset)+2*8)(RSP), (R21, R22) \ + LDP ((offset)+4*8)(RSP), (R23, R24) \ + LDP ((offset)+6*8)(RSP), (R25, R26) \ + LDP ((offset)+8*8)(RSP), (R27, g) /* R28 */ +#define SAVE_F8_TO_F15(offset) \ + FSTPD (F8, F9), ((offset)+0*8)(RSP) \ + FSTPD (F10, F11), ((offset)+2*8)(RSP) \ + FSTPD (F12, F13), ((offset)+4*8)(RSP) \ + FSTPD (F14, F15), ((offset)+6*8)(RSP) +#define RESTORE_F8_TO_F15(offset) \ + FLDPD ((offset)+0*8)(RSP), (F8, F9) \ + FLDPD ((offset)+2*8)(RSP), (F10, F11) \ + FLDPD ((offset)+4*8)(RSP), (F12, F13) \ + FLDPD ((offset)+6*8)(RSP), (F14, F15) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s new file mode 100644 index 000000000..2b7eb57f8 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s @@ -0,0 +1,39 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "abi_amd64.h" + +// Called by C code generated by cmd/cgo. +// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) +// Saves C callee-saved registers and calls cgocallback with three arguments. +// fn is the PC of a func(a unsafe.Pointer) function. +// This signature is known to SWIG, so we can't change it. +TEXT crosscall2(SB), NOSPLIT, $0-0 + PUSH_REGS_HOST_TO_ABI0() + + // Make room for arguments to cgocallback. + ADJSP $0x18 + +#ifndef GOOS_windows + MOVQ DI, 0x0(SP) // fn + MOVQ SI, 0x8(SP) // arg + + // Skip n in DX. + MOVQ CX, 0x10(SP) // ctxt + +#else + MOVQ CX, 0x0(SP) // fn + MOVQ DX, 0x8(SP) // arg + + // Skip n in R8. + MOVQ R9, 0x10(SP) // ctxt + +#endif + + CALL runtime·cgocallback(SB) + + ADJSP $-0x18 + POP_REGS_HOST_TO_ABI0() + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s new file mode 100644 index 000000000..50e5261d9 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s @@ -0,0 +1,36 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "abi_arm64.h" + +// Called by C code generated by cmd/cgo. +// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) +// Saves C callee-saved registers and calls cgocallback with three arguments. +// fn is the PC of a func(a unsafe.Pointer) function. +TEXT crosscall2(SB), NOSPLIT|NOFRAME, $0 +/* + * We still need to save all callee save register as before, and then + * push 3 args for fn (R0, R1, R3), skipping R2. + * Also note that at procedure entry in gc world, 8(RSP) will be the + * first arg. + */ + SUB $(8*24), RSP + STP (R0, R1), (8*1)(RSP) + MOVD R3, (8*3)(RSP) + + SAVE_R19_TO_R28(8*4) + SAVE_F8_TO_F15(8*14) + STP (R29, R30), (8*22)(RSP) + + // Initialize Go ABI environment + BL runtime·load_g(SB) + BL runtime·cgocallback(SB) + + RESTORE_R19_TO_R28(8*4) + RESTORE_F8_TO_F15(8*14) + LDP (8*22)(RSP), (R29, R30) + + ADD $(8*24), RSP + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go new file mode 100644 index 000000000..f29e690cc --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go @@ -0,0 +1,93 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import ( + _ "unsafe" +) + +// TODO: decide if we need _runtime_cgo_panic_internal + +//go:linkname x_cgo_init_trampoline x_cgo_init_trampoline +//go:linkname _cgo_init _cgo_init +var x_cgo_init_trampoline byte +var _cgo_init = &x_cgo_init_trampoline + +// Creates a new system thread without updating any Go state. +// +// This method is invoked during shared library loading to create a new OS +// thread to perform the runtime initialization. This method is similar to +// _cgo_sys_thread_start except that it doesn't update any Go state. + +//go:linkname x_cgo_thread_start_trampoline x_cgo_thread_start_trampoline +//go:linkname _cgo_thread_start _cgo_thread_start +var x_cgo_thread_start_trampoline byte +var _cgo_thread_start = &x_cgo_thread_start_trampoline + +// Notifies that the runtime has been initialized. +// +// We currently block at every CGO entry point (via _cgo_wait_runtime_init_done) +// to ensure that the runtime has been initialized before the CGO call is +// executed. This is necessary for shared libraries where we kickoff runtime +// initialization in a separate thread and return without waiting for this +// thread to complete the init. + +//go:linkname x_cgo_notify_runtime_init_done_trampoline x_cgo_notify_runtime_init_done_trampoline +//go:linkname _cgo_notify_runtime_init_done _cgo_notify_runtime_init_done +var x_cgo_notify_runtime_init_done_trampoline byte +var _cgo_notify_runtime_init_done = &x_cgo_notify_runtime_init_done_trampoline + +// Indicates whether a dummy thread key has been created or not. +// +// When calling go exported function from C, we register a destructor +// callback, for a dummy thread key, by using pthread_key_create. + +//go:linkname _cgo_pthread_key_created _cgo_pthread_key_created +var x_cgo_pthread_key_created uintptr +var _cgo_pthread_key_created = &x_cgo_pthread_key_created + +// Set the x_crosscall2_ptr C function pointer variable point to crosscall2. +// It's for the runtime package to call at init time. +func set_crosscall2() { + // nothing needs to be done here for fakecgo + // because it's possible to just call cgocallback directly +} + +//go:linkname _set_crosscall2 runtime.set_crosscall2 +var _set_crosscall2 = set_crosscall2 + +// Store the g into the thread-specific value. +// So that pthread_key_destructor will dropm when the thread is exiting. + +//go:linkname x_cgo_bindm_trampoline x_cgo_bindm_trampoline +//go:linkname _cgo_bindm _cgo_bindm +var x_cgo_bindm_trampoline byte +var _cgo_bindm = &x_cgo_bindm_trampoline + +// TODO: decide if we need x_cgo_set_context_function +// TODO: decide if we need _cgo_yield + +var ( + // In Go 1.20 the race detector was rewritten to pure Go + // on darwin. This means that when CGO_ENABLED=0 is set + // fakecgo is built with race detector code. This is not + // good since this code is pretending to be C. The go:norace + // pragma is not enough, since it only applies to the native + // ABIInternal function. The ABIO wrapper (which is necessary, + // since all references to text symbols from assembly will use it) + // does not inherit the go:norace pragma, so it will still be + // instrumented by the race detector. + // + // To circumvent this issue, using closure calls in the + // assembly, which forces the compiler to use the ABIInternal + // native implementation (which has go:norace) instead. + threadentry_call = threadentry + x_cgo_init_call = x_cgo_init + x_cgo_setenv_call = x_cgo_setenv + x_cgo_unsetenv_call = x_cgo_unsetenv + x_cgo_thread_start_call = x_cgo_thread_start +) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go new file mode 100644 index 000000000..be82f7dfc --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +// Package fakecgo implements the Cgo runtime (runtime/cgo) entirely in Go. +// This allows code that calls into C to function properly when CGO_ENABLED=0. +// +// # Goals +// +// fakecgo attempts to replicate the same naming structure as in the runtime. +// For example, functions that have the prefix "gcc_*" are named "go_*". +// This makes it easier to port other GOOSs and GOARCHs as well as to keep +// it in sync with runtime/cgo. +// +// # Support +// +// Currently, fakecgo only supports macOS on amd64 & arm64. It also cannot +// be used with -buildmode=c-archive because that requires special initialization +// that fakecgo does not implement at the moment. +// +// # Usage +// +// Using fakecgo is easy just import _ "github.com/ebitengine/purego" and then +// set the environment variable CGO_ENABLED=0. +// The recommended usage for fakecgo is to prefer using runtime/cgo if possible +// but if cross-compiling or fast build times are important fakecgo is available. +// Purego will pick which ever Cgo runtime is available and prefer the one that +// comes with Go (runtime/cgo). +package fakecgo + +//go:generate go run gen.go diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go new file mode 100644 index 000000000..bb73a709e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go @@ -0,0 +1,27 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd && !cgo + +package fakecgo + +import _ "unsafe" // for go:linkname + +// Supply environ and __progname, because we don't +// link against the standard FreeBSD crt0.o and the +// libc dynamic library needs them. + +// Note: when building with cross-compiling or CGO_ENABLED=0, add +// the following argument to `go` so that these symbols are defined by +// making fakecgo the Cgo. +// -gcflags="github.com/ebitengine/purego/internal/fakecgo=-std" + +//go:linkname _environ environ +//go:linkname _progname __progname + +//go:cgo_export_dynamic environ +//go:cgo_export_dynamic __progname + +var _environ uintptr +var _progname uintptr diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go new file mode 100644 index 000000000..39f5ff1f0 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +//go:norace +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + size = pthread_get_stacksize_np(pthread_self()) + pthread_attr_init(&attr) + pthread_attr_setstacksize(&attr, size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +//go:norace +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +//go:nosplit +//go:norace +func x_cgo_init(g *G, setg uintptr) { + var size size_t + + setg_func = setg + + size = pthread_get_stacksize_np(pthread_self()) + g.stacklo = uintptr(unsafe.Add(unsafe.Pointer(&size), -size+4096)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go new file mode 100644 index 000000000..d0868f0f7 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go @@ -0,0 +1,88 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +//go:norace +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + size = pthread_get_stacksize_np(pthread_self()) + pthread_attr_init(&attr) + pthread_attr_setstacksize(&attr, size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +//go:norace +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + // TODO: support ios + //#if TARGET_OS_IPHONE + // darwin_arm_init_thread_exception_port(); + //#endif + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c) +// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us +// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup +// This function can't be go:systemstack since go is not in a state where the systemcheck would work. +// +//go:nosplit +//go:norace +func x_cgo_init(g *G, setg uintptr) { + var size size_t + + setg_func = setg + size = pthread_get_stacksize_np(pthread_self()) + g.stacklo = uintptr(unsafe.Add(unsafe.Pointer(&size), -size+4096)) + + //TODO: support ios + //#if TARGET_OS_IPHONE + // darwin_arm_init_mach_exception_handler(); + // darwin_arm_init_thread_exception_port(); + // init_working_dir(); + //#endif +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go new file mode 100644 index 000000000..c9ff7156a --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go @@ -0,0 +1,95 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + // runtime/cgo uses __builtin_frame_address(0) instead of `uintptr(unsafe.Pointer(&size))` + // but this should be OK since we are taking the address of the first variable in this function. + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go new file mode 100644 index 000000000..e3a060b93 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + // fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c) +// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us +// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup +// This function can't be go:systemstack since go is not in a state where the systemcheck would work. +// +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go new file mode 100644 index 000000000..e5a66f39d --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import ( + "syscall" + "unsafe" +) + +var ( + pthread_g pthread_key_t + + runtime_init_cond = PTHREAD_COND_INITIALIZER + runtime_init_mu = PTHREAD_MUTEX_INITIALIZER + runtime_init_done int +) + +//go:nosplit +//go:norace +func x_cgo_notify_runtime_init_done() { + pthread_mutex_lock(&runtime_init_mu) + runtime_init_done = 1 + pthread_cond_broadcast(&runtime_init_cond) + pthread_mutex_unlock(&runtime_init_mu) +} + +// Store the g into a thread-specific value associated with the pthread key pthread_g. +// And pthread_key_destructor will dropm when the thread is exiting. +// +//go:norace +func x_cgo_bindm(g unsafe.Pointer) { + // We assume this will always succeed, otherwise, there might be extra M leaking, + // when a C thread exits after a cgo call. + // We only invoke this function once per thread in runtime.needAndBindM, + // and the next calls just reuse the bound m. + pthread_setspecific(pthread_g, g) +} + +// _cgo_try_pthread_create retries pthread_create if it fails with +// EAGAIN. +// +//go:nosplit +//go:norace +func _cgo_try_pthread_create(thread *pthread_t, attr *pthread_attr_t, pfn unsafe.Pointer, arg *ThreadStart) int { + var ts syscall.Timespec + // tries needs to be the same type as syscall.Timespec.Nsec + // but the fields are int32 on 32bit and int64 on 64bit. + // tries is assigned to syscall.Timespec.Nsec in order to match its type. + tries := ts.Nsec + var err int + + for tries = 0; tries < 20; tries++ { + // inlined this call because it ran out of stack when inlining was disabled + err = int(call5(pthread_createABI0, uintptr(unsafe.Pointer(thread)), uintptr(unsafe.Pointer(attr)), uintptr(pfn), uintptr(unsafe.Pointer(arg)), 0)) + if err == 0 { + // inlined this call because it ran out of stack when inlining was disabled + call5(pthread_detachABI0, uintptr(*thread), 0, 0, 0, 0) + return 0 + } + if err != int(syscall.EAGAIN) { + return err + } + ts.Sec = 0 + ts.Nsec = (tries + 1) * 1000 * 1000 // Milliseconds. + // inlined this call because it ran out of stack when inlining was disabled + call5(nanosleepABI0, uintptr(unsafe.Pointer(&ts)), 0, 0, 0, 0) + } + return int(syscall.EAGAIN) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go new file mode 100644 index 000000000..c9ff7156a --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go @@ -0,0 +1,95 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + // runtime/cgo uses __builtin_frame_address(0) instead of `uintptr(unsafe.Pointer(&size))` + // but this should be OK since we are taking the address of the first variable in this function. + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go new file mode 100644 index 000000000..a3b1cca59 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c) +// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us +// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup +// This function can't be go:systemstack since go is not in a state where the systemcheck would work. +// +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go new file mode 100644 index 000000000..e42d84f0b --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +//go:nosplit +//go:norace +func x_cgo_setenv(arg *[2]*byte) { + setenv(arg[0], arg[1], 1) +} + +//go:nosplit +//go:norace +func x_cgo_unsetenv(arg *[1]*byte) { + unsetenv(arg[0]) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go new file mode 100644 index 000000000..0ac10d1f1 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import "unsafe" + +// _cgo_thread_start is split into three parts in cgo since only one part is system dependent (keep it here for easier handling) + +// _cgo_thread_start(ThreadStart *arg) (runtime/cgo/gcc_util.c) +// This get's called instead of the go code for creating new threads +// -> pthread_* stuff is used, so threads are setup correctly for C +// If this is missing, TLS is only setup correctly on thread 1! +// This function should be go:systemstack instead of go:nosplit (but that requires runtime) +// +//go:nosplit +//go:norace +func x_cgo_thread_start(arg *ThreadStart) { + var ts *ThreadStart + // Make our own copy that can persist after we return. + // _cgo_tsan_acquire(); + ts = (*ThreadStart)(malloc(unsafe.Sizeof(*ts))) + // _cgo_tsan_release(); + if ts == nil { + println("fakecgo: out of memory in thread_start") + abort() + } + // *ts = *arg would cause a writebarrier so copy using slices + s1 := unsafe.Slice((*uintptr)(unsafe.Pointer(ts)), unsafe.Sizeof(*ts)/8) + s2 := unsafe.Slice((*uintptr)(unsafe.Pointer(arg)), unsafe.Sizeof(*arg)/8) + for i := range s2 { + s1[i] = s2[i] + } + _cgo_sys_thread_start(ts) // OS-dependent half +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go new file mode 100644 index 000000000..28af41cc6 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go @@ -0,0 +1,19 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo && (darwin || freebsd || linux) + +// The runtime package contains an uninitialized definition +// for runtime·iscgo. Override it to tell the runtime we're here. +// There are various function pointers that should be set too, +// but those depend on dynamic linker magic to get initialized +// correctly, and sometimes they break. This variable is a +// backup: it depends only on old C style static linking rules. + +package fakecgo + +import _ "unsafe" // for go:linkname + +//go:linkname _iscgo runtime.iscgo +var _iscgo bool = true diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go new file mode 100644 index 000000000..38f944193 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +type ( + size_t uintptr + // Sources: + // Darwin (32 bytes) - https://github.com/apple/darwin-xnu/blob/2ff845c2e033bd0ff64b5b6aa6063a1f8f65aa32/bsd/sys/_types.h#L74 + // FreeBSD (32 bytes) - https://github.com/DoctorWkt/xv6-freebsd/blob/d2a294c2a984baed27676068b15ed9a29b06ab6f/include/signal.h#L98C9-L98C21 + // Linux (128 bytes) - https://github.com/torvalds/linux/blob/ab75170520d4964f3acf8bb1f91d34cbc650688e/arch/x86/include/asm/signal.h#L25 + sigset_t [128]byte + pthread_attr_t [64]byte + pthread_t int + pthread_key_t uint64 +) + +// for pthread_sigmask: + +type sighow int32 + +const ( + SIG_BLOCK sighow = 0 + SIG_UNBLOCK sighow = 1 + SIG_SETMASK sighow = 2 +) + +type G struct { + stacklo uintptr + stackhi uintptr +} + +type ThreadStart struct { + g *G + tls *uintptr + fn uintptr +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go new file mode 100644 index 000000000..af148333f --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +type ( + pthread_mutex_t struct { + sig int64 + opaque [56]byte + } + pthread_cond_t struct { + sig int64 + opaque [40]byte + } +) + +var ( + PTHREAD_COND_INITIALIZER = pthread_cond_t{sig: 0x3CB0B1BB} + PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{sig: 0x32AAABA7} +) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go new file mode 100644 index 000000000..ca1f722c9 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +type ( + pthread_cond_t uintptr + pthread_mutex_t uintptr +) + +var ( + PTHREAD_COND_INITIALIZER = pthread_cond_t(0) + PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t(0) +) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go new file mode 100644 index 000000000..c4b6e9ea5 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +type ( + pthread_cond_t [48]byte + pthread_mutex_t [48]byte +) + +var ( + PTHREAD_COND_INITIALIZER = pthread_cond_t{} + PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{} +) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go new file mode 100644 index 000000000..f30af0e15 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go @@ -0,0 +1,19 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import _ "unsafe" // for go:linkname + +//go:linkname x_cgo_setenv_trampoline x_cgo_setenv_trampoline +//go:linkname _cgo_setenv runtime._cgo_setenv +var x_cgo_setenv_trampoline byte +var _cgo_setenv = &x_cgo_setenv_trampoline + +//go:linkname x_cgo_unsetenv_trampoline x_cgo_unsetenv_trampoline +//go:linkname _cgo_unsetenv runtime._cgo_unsetenv +var x_cgo_unsetenv_trampoline byte +var _cgo_unsetenv = &x_cgo_unsetenv_trampoline diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go new file mode 100644 index 000000000..d51702400 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go @@ -0,0 +1,221 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import ( + "syscall" + "unsafe" +) + +// setg_trampoline calls setg with the G provided +func setg_trampoline(setg uintptr, G uintptr) + +// call5 takes fn the C function and 5 arguments and calls the function with those arguments +func call5(fn, a1, a2, a3, a4, a5 uintptr) uintptr + +//go:nosplit +//go:norace +func malloc(size uintptr) unsafe.Pointer { + ret := call5(mallocABI0, uintptr(size), 0, 0, 0, 0) + // this indirection is to avoid go vet complaining about possible misuse of unsafe.Pointer + return *(*unsafe.Pointer)(unsafe.Pointer(&ret)) +} + +//go:nosplit +//go:norace +func free(ptr unsafe.Pointer) { + call5(freeABI0, uintptr(ptr), 0, 0, 0, 0) +} + +//go:nosplit +//go:norace +func setenv(name *byte, value *byte, overwrite int32) int32 { + return int32(call5(setenvABI0, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), uintptr(overwrite), 0, 0)) +} + +//go:nosplit +//go:norace +func unsetenv(name *byte) int32 { + return int32(call5(unsetenvABI0, uintptr(unsafe.Pointer(name)), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func sigfillset(set *sigset_t) int32 { + return int32(call5(sigfillsetABI0, uintptr(unsafe.Pointer(set)), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func nanosleep(ts *syscall.Timespec, rem *syscall.Timespec) int32 { + return int32(call5(nanosleepABI0, uintptr(unsafe.Pointer(ts)), uintptr(unsafe.Pointer(rem)), 0, 0, 0)) +} + +//go:nosplit +//go:norace +func abort() { + call5(abortABI0, 0, 0, 0, 0, 0) +} + +//go:nosplit +//go:norace +func pthread_attr_init(attr *pthread_attr_t) int32 { + return int32(call5(pthread_attr_initABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_create(thread *pthread_t, attr *pthread_attr_t, start unsafe.Pointer, arg unsafe.Pointer) int32 { + return int32(call5(pthread_createABI0, uintptr(unsafe.Pointer(thread)), uintptr(unsafe.Pointer(attr)), uintptr(start), uintptr(arg), 0)) +} + +//go:nosplit +//go:norace +func pthread_detach(thread pthread_t) int32 { + return int32(call5(pthread_detachABI0, uintptr(thread), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_sigmask(how sighow, ign *sigset_t, oset *sigset_t) int32 { + return int32(call5(pthread_sigmaskABI0, uintptr(how), uintptr(unsafe.Pointer(ign)), uintptr(unsafe.Pointer(oset)), 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_self() pthread_t { + return pthread_t(call5(pthread_selfABI0, 0, 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_get_stacksize_np(thread pthread_t) size_t { + return size_t(call5(pthread_get_stacksize_npABI0, uintptr(thread), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_attr_getstacksize(attr *pthread_attr_t, stacksize *size_t) int32 { + return int32(call5(pthread_attr_getstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(unsafe.Pointer(stacksize)), 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_attr_setstacksize(attr *pthread_attr_t, size size_t) int32 { + return int32(call5(pthread_attr_setstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(size), 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_attr_destroy(attr *pthread_attr_t) int32 { + return int32(call5(pthread_attr_destroyABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_mutex_lock(mutex *pthread_mutex_t) int32 { + return int32(call5(pthread_mutex_lockABI0, uintptr(unsafe.Pointer(mutex)), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_mutex_unlock(mutex *pthread_mutex_t) int32 { + return int32(call5(pthread_mutex_unlockABI0, uintptr(unsafe.Pointer(mutex)), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_cond_broadcast(cond *pthread_cond_t) int32 { + return int32(call5(pthread_cond_broadcastABI0, uintptr(unsafe.Pointer(cond)), 0, 0, 0, 0)) +} + +//go:nosplit +//go:norace +func pthread_setspecific(key pthread_key_t, value unsafe.Pointer) int32 { + return int32(call5(pthread_setspecificABI0, uintptr(key), uintptr(value), 0, 0, 0)) +} + +//go:linkname _malloc _malloc +var _malloc uint8 +var mallocABI0 = uintptr(unsafe.Pointer(&_malloc)) + +//go:linkname _free _free +var _free uint8 +var freeABI0 = uintptr(unsafe.Pointer(&_free)) + +//go:linkname _setenv _setenv +var _setenv uint8 +var setenvABI0 = uintptr(unsafe.Pointer(&_setenv)) + +//go:linkname _unsetenv _unsetenv +var _unsetenv uint8 +var unsetenvABI0 = uintptr(unsafe.Pointer(&_unsetenv)) + +//go:linkname _sigfillset _sigfillset +var _sigfillset uint8 +var sigfillsetABI0 = uintptr(unsafe.Pointer(&_sigfillset)) + +//go:linkname _nanosleep _nanosleep +var _nanosleep uint8 +var nanosleepABI0 = uintptr(unsafe.Pointer(&_nanosleep)) + +//go:linkname _abort _abort +var _abort uint8 +var abortABI0 = uintptr(unsafe.Pointer(&_abort)) + +//go:linkname _pthread_attr_init _pthread_attr_init +var _pthread_attr_init uint8 +var pthread_attr_initABI0 = uintptr(unsafe.Pointer(&_pthread_attr_init)) + +//go:linkname _pthread_create _pthread_create +var _pthread_create uint8 +var pthread_createABI0 = uintptr(unsafe.Pointer(&_pthread_create)) + +//go:linkname _pthread_detach _pthread_detach +var _pthread_detach uint8 +var pthread_detachABI0 = uintptr(unsafe.Pointer(&_pthread_detach)) + +//go:linkname _pthread_sigmask _pthread_sigmask +var _pthread_sigmask uint8 +var pthread_sigmaskABI0 = uintptr(unsafe.Pointer(&_pthread_sigmask)) + +//go:linkname _pthread_self _pthread_self +var _pthread_self uint8 +var pthread_selfABI0 = uintptr(unsafe.Pointer(&_pthread_self)) + +//go:linkname _pthread_get_stacksize_np _pthread_get_stacksize_np +var _pthread_get_stacksize_np uint8 +var pthread_get_stacksize_npABI0 = uintptr(unsafe.Pointer(&_pthread_get_stacksize_np)) + +//go:linkname _pthread_attr_getstacksize _pthread_attr_getstacksize +var _pthread_attr_getstacksize uint8 +var pthread_attr_getstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_getstacksize)) + +//go:linkname _pthread_attr_setstacksize _pthread_attr_setstacksize +var _pthread_attr_setstacksize uint8 +var pthread_attr_setstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_setstacksize)) + +//go:linkname _pthread_attr_destroy _pthread_attr_destroy +var _pthread_attr_destroy uint8 +var pthread_attr_destroyABI0 = uintptr(unsafe.Pointer(&_pthread_attr_destroy)) + +//go:linkname _pthread_mutex_lock _pthread_mutex_lock +var _pthread_mutex_lock uint8 +var pthread_mutex_lockABI0 = uintptr(unsafe.Pointer(&_pthread_mutex_lock)) + +//go:linkname _pthread_mutex_unlock _pthread_mutex_unlock +var _pthread_mutex_unlock uint8 +var pthread_mutex_unlockABI0 = uintptr(unsafe.Pointer(&_pthread_mutex_unlock)) + +//go:linkname _pthread_cond_broadcast _pthread_cond_broadcast +var _pthread_cond_broadcast uint8 +var pthread_cond_broadcastABI0 = uintptr(unsafe.Pointer(&_pthread_cond_broadcast)) + +//go:linkname _pthread_setspecific _pthread_setspecific +var _pthread_setspecific uint8 +var pthread_setspecificABI0 = uintptr(unsafe.Pointer(&_pthread_setspecific)) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go new file mode 100644 index 000000000..54aaa4628 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go @@ -0,0 +1,29 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +//go:cgo_import_dynamic purego_malloc malloc "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_free free "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_setenv setenv "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_unsetenv unsetenv "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_sigfillset sigfillset "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_nanosleep nanosleep "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_abort abort "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_create pthread_create "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_detach pthread_detach "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_self pthread_self "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "/usr/lib/libSystem.B.dylib" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go new file mode 100644 index 000000000..815381197 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go @@ -0,0 +1,29 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +//go:cgo_import_dynamic purego_malloc malloc "libc.so.7" +//go:cgo_import_dynamic purego_free free "libc.so.7" +//go:cgo_import_dynamic purego_setenv setenv "libc.so.7" +//go:cgo_import_dynamic purego_unsetenv unsetenv "libc.so.7" +//go:cgo_import_dynamic purego_sigfillset sigfillset "libc.so.7" +//go:cgo_import_dynamic purego_nanosleep nanosleep "libc.so.7" +//go:cgo_import_dynamic purego_abort abort "libc.so.7" +//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "libpthread.so" +//go:cgo_import_dynamic purego_pthread_create pthread_create "libpthread.so" +//go:cgo_import_dynamic purego_pthread_detach pthread_detach "libpthread.so" +//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "libpthread.so" +//go:cgo_import_dynamic purego_pthread_self pthread_self "libpthread.so" +//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "libpthread.so" +//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "libpthread.so" +//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "libpthread.so" +//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "libpthread.so" +//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "libpthread.so" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go new file mode 100644 index 000000000..180057d01 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go @@ -0,0 +1,29 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +//go:cgo_import_dynamic purego_malloc malloc "libc.so.6" +//go:cgo_import_dynamic purego_free free "libc.so.6" +//go:cgo_import_dynamic purego_setenv setenv "libc.so.6" +//go:cgo_import_dynamic purego_unsetenv unsetenv "libc.so.6" +//go:cgo_import_dynamic purego_sigfillset sigfillset "libc.so.6" +//go:cgo_import_dynamic purego_nanosleep nanosleep "libc.so.6" +//go:cgo_import_dynamic purego_abort abort "libc.so.6" +//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_create pthread_create "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_detach pthread_detach "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_self pthread_self "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "libpthread.so.0" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s new file mode 100644 index 000000000..c9a3cc09e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || linux || freebsd) + +/* +trampoline for emulating required C functions for cgo in go (see cgo.go) +(we convert cdecl calling convention to go and vice-versa) + +Since we're called from go and call into C we can cheat a bit with the calling conventions: + - in go all the registers are caller saved + - in C we have a couple of callee saved registers + +=> we can use BX, R12, R13, R14, R15 instead of the stack + +C Calling convention cdecl used here (we only need integer args): +1. arg: DI +2. arg: SI +3. arg: DX +4. arg: CX +5. arg: R8 +6. arg: R9 +We don't need floats with these functions -> AX=0 +return value will be in AX +*/ +#include "textflag.h" +#include "go_asm.h" + +// these trampolines map the gcc ABI to Go ABI and then calls into the Go equivalent functions. + +TEXT x_cgo_init_trampoline(SB), NOSPLIT, $16 + MOVQ DI, AX + MOVQ SI, BX + MOVQ ·x_cgo_init_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_thread_start_trampoline(SB), NOSPLIT, $8 + MOVQ DI, AX + MOVQ ·x_cgo_thread_start_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_setenv_trampoline(SB), NOSPLIT, $8 + MOVQ DI, AX + MOVQ ·x_cgo_setenv_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_unsetenv_trampoline(SB), NOSPLIT, $8 + MOVQ DI, AX + MOVQ ·x_cgo_unsetenv_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_notify_runtime_init_done_trampoline(SB), NOSPLIT, $0 + CALL ·x_cgo_notify_runtime_init_done(SB) + RET + +TEXT x_cgo_bindm_trampoline(SB), NOSPLIT, $0 + CALL ·x_cgo_bindm(SB) + RET + +// func setg_trampoline(setg uintptr, g uintptr) +TEXT ·setg_trampoline(SB), NOSPLIT, $0-16 + MOVQ G+8(FP), DI + MOVQ setg+0(FP), BX + XORL AX, AX + CALL BX + RET + +TEXT threadentry_trampoline(SB), NOSPLIT, $16 + MOVQ DI, AX + MOVQ ·threadentry_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT ·call5(SB), NOSPLIT, $0-56 + MOVQ fn+0(FP), BX + MOVQ a1+8(FP), DI + MOVQ a2+16(FP), SI + MOVQ a3+24(FP), DX + MOVQ a4+32(FP), CX + MOVQ a5+40(FP), R8 + + XORL AX, AX // no floats + + PUSHQ BP // save BP + MOVQ SP, BP // save SP inside BP bc BP is callee-saved + SUBQ $16, SP // allocate space for alignment + ANDQ $-16, SP // align on 16 bytes for SSE + + CALL BX + + MOVQ BP, SP // get SP back + POPQ BP // restore BP + + MOVQ AX, ret+48(FP) + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s new file mode 100644 index 000000000..9dbdbc013 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +#include "textflag.h" +#include "go_asm.h" + +// these trampolines map the gcc ABI to Go ABI and then calls into the Go equivalent functions. + +TEXT x_cgo_init_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD R1, 16(RSP) + MOVD ·x_cgo_init_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_thread_start_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·x_cgo_thread_start_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_setenv_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·x_cgo_setenv_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_unsetenv_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·x_cgo_unsetenv_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_notify_runtime_init_done_trampoline(SB), NOSPLIT, $0-0 + CALL ·x_cgo_notify_runtime_init_done(SB) + RET + +TEXT x_cgo_bindm_trampoline(SB), NOSPLIT, $0 + CALL ·x_cgo_bindm(SB) + RET + +// func setg_trampoline(setg uintptr, g uintptr) +TEXT ·setg_trampoline(SB), NOSPLIT, $0-16 + MOVD G+8(FP), R0 + MOVD setg+0(FP), R1 + CALL R1 + RET + +TEXT threadentry_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·threadentry_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + MOVD $0, R0 // TODO: get the return value from threadentry + RET + +TEXT ·call5(SB), NOSPLIT, $0-0 + MOVD fn+0(FP), R6 + MOVD a1+8(FP), R0 + MOVD a2+16(FP), R1 + MOVD a3+24(FP), R2 + MOVD a4+32(FP), R3 + MOVD a5+40(FP), R4 + CALL R6 + MOVD R0, ret+48(FP) + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s new file mode 100644 index 000000000..a65b2012c --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s @@ -0,0 +1,90 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +#include "textflag.h" + +// these stubs are here because it is not possible to go:linkname directly the C functions on darwin arm64 + +TEXT _malloc(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_malloc(SB) + RET + +TEXT _free(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_free(SB) + RET + +TEXT _setenv(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_setenv(SB) + RET + +TEXT _unsetenv(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_unsetenv(SB) + RET + +TEXT _sigfillset(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_sigfillset(SB) + RET + +TEXT _nanosleep(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_nanosleep(SB) + RET + +TEXT _abort(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_abort(SB) + RET + +TEXT _pthread_attr_init(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_init(SB) + RET + +TEXT _pthread_create(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_create(SB) + RET + +TEXT _pthread_detach(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_detach(SB) + RET + +TEXT _pthread_sigmask(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_sigmask(SB) + RET + +TEXT _pthread_self(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_self(SB) + RET + +TEXT _pthread_get_stacksize_np(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_get_stacksize_np(SB) + RET + +TEXT _pthread_attr_getstacksize(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_getstacksize(SB) + RET + +TEXT _pthread_attr_setstacksize(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_setstacksize(SB) + RET + +TEXT _pthread_attr_destroy(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_destroy(SB) + RET + +TEXT _pthread_mutex_lock(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_mutex_lock(SB) + RET + +TEXT _pthread_mutex_unlock(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_mutex_unlock(SB) + RET + +TEXT _pthread_cond_broadcast(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_cond_broadcast(SB) + RET + +TEXT _pthread_setspecific(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_setspecific(SB) + RET diff --git a/vendor/github.com/ebitengine/purego/internal/strings/strings.go b/vendor/github.com/ebitengine/purego/internal/strings/strings.go new file mode 100644 index 000000000..5b0d25225 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/strings/strings.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package strings + +import ( + "unsafe" +) + +// hasSuffix tests whether the string s ends with suffix. +func hasSuffix(s, suffix string) bool { + return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix +} + +// CString converts a go string to *byte that can be passed to C code. +func CString(name string) *byte { + if hasSuffix(name, "\x00") { + return &(*(*[]byte)(unsafe.Pointer(&name)))[0] + } + b := make([]byte, len(name)+1) + copy(b, name) + return &b[0] +} + +// GoString copies a null-terminated char* to a Go string. +func GoString(c uintptr) string { + // We take the address and then dereference it to trick go vet from creating a possible misuse of unsafe.Pointer + ptr := *(*unsafe.Pointer)(unsafe.Pointer(&c)) + if ptr == nil { + return "" + } + var length int + for { + if *(*byte)(unsafe.Add(ptr, uintptr(length))) == '\x00' { + break + } + length++ + } + return string(unsafe.Slice((*byte)(ptr), length)) +} diff --git a/vendor/github.com/ebitengine/purego/is_ios.go b/vendor/github.com/ebitengine/purego/is_ios.go new file mode 100644 index 000000000..ed31da978 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/is_ios.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package purego + +// if you are getting this error it means that you have +// CGO_ENABLED=0 while trying to build for ios. +// purego does not support this mode yet. +// the fix is to set CGO_ENABLED=1 which will require +// a C compiler. +var _ = _PUREGO_REQUIRES_CGO_ON_IOS diff --git a/vendor/github.com/ebitengine/purego/nocgo.go b/vendor/github.com/ebitengine/purego/nocgo.go new file mode 100644 index 000000000..5b989ea81 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/nocgo.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package purego + +// if CGO_ENABLED=0 import fakecgo to setup the Cgo runtime correctly. +// This is required since some frameworks need TLS setup the C way which Go doesn't do. +// We currently don't support ios in fakecgo mode so force Cgo or fail +// +// The way that the Cgo runtime (runtime/cgo) works is by setting some variables found +// in runtime with non-null GCC compiled functions. The variables that are replaced are +// var ( +// iscgo bool // in runtime/cgo.go +// _cgo_init unsafe.Pointer // in runtime/cgo.go +// _cgo_thread_start unsafe.Pointer // in runtime/cgo.go +// _cgo_notify_runtime_init_done unsafe.Pointer // in runtime/cgo.go +// _cgo_setenv unsafe.Pointer // in runtime/env_posix.go +// _cgo_unsetenv unsafe.Pointer // in runtime/env_posix.go +// ) +// importing fakecgo will set these (using //go:linkname) with functions written +// entirely in Go (except for some assembly trampolines to change GCC ABI to Go ABI). +// Doing so makes it possible to build applications that call into C without CGO_ENABLED=1. +import _ "github.com/ebitengine/purego/internal/fakecgo" diff --git a/vendor/github.com/ebitengine/purego/struct_amd64.go b/vendor/github.com/ebitengine/purego/struct_amd64.go new file mode 100644 index 000000000..f3514c984 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/struct_amd64.go @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package purego + +import ( + "math" + "reflect" + "unsafe" +) + +func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) { + outSize := outType.Size() + switch { + case outSize == 0: + return reflect.New(outType).Elem() + case outSize <= 8: + if isAllFloats(outType) { + // 2 float32s or 1 float64s are return in the float register + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{syscall.f1})).Elem() + } + // up to 8 bytes is returned in RAX + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{syscall.a1})).Elem() + case outSize <= 16: + r1, r2 := syscall.a1, syscall.a2 + if isAllFloats(outType) { + r1 = syscall.f1 + r2 = syscall.f2 + } else { + // check first 8 bytes if it's floats + hasFirstFloat := false + f1 := outType.Field(0).Type + if f1.Kind() == reflect.Float64 || f1.Kind() == reflect.Float32 && outType.Field(1).Type.Kind() == reflect.Float32 { + r1 = syscall.f1 + hasFirstFloat = true + } + + // find index of the field that starts the second 8 bytes + var i int + for i = 0; i < outType.NumField(); i++ { + if outType.Field(i).Offset == 8 { + break + } + } + + // check last 8 bytes if they are floats + f1 = outType.Field(i).Type + if f1.Kind() == reflect.Float64 || f1.Kind() == reflect.Float32 && i+1 == outType.NumField() { + r2 = syscall.f1 + } else if hasFirstFloat { + // if the first field was a float then that means the second integer field + // comes from the first integer register + r2 = syscall.a1 + } + } + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b uintptr }{r1, r2})).Elem() + default: + // create struct from the Go pointer created above + // weird pointer dereference to circumvent go vet + return reflect.NewAt(outType, *(*unsafe.Pointer)(unsafe.Pointer(&syscall.a1))).Elem() + } +} + +func isAllFloats(ty reflect.Type) bool { + for i := 0; i < ty.NumField(); i++ { + f := ty.Field(i) + switch f.Type.Kind() { + case reflect.Float64, reflect.Float32: + default: + return false + } + } + return true +} + +// https://refspecs.linuxbase.org/elf/x86_64-abi-0.99.pdf +// https://gitlab.com/x86-psABIs/x86-64-ABI +// Class determines where the 8 byte value goes. +// Higher value classes win over lower value classes +const ( + _NO_CLASS = 0b0000 + _SSE = 0b0001 + _X87 = 0b0011 // long double not used in Go + _INTEGER = 0b0111 + _MEMORY = 0b1111 +) + +func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []interface{}) []interface{} { + if v.Type().Size() == 0 { + return keepAlive + } + + // if greater than 64 bytes place on stack + if v.Type().Size() > 8*8 { + placeStack(v, addStack) + return keepAlive + } + var ( + savedNumFloats = *numFloats + savedNumInts = *numInts + savedNumStack = *numStack + ) + placeOnStack := postMerger(v.Type()) || !tryPlaceRegister(v, addFloat, addInt) + if placeOnStack { + // reset any values placed in registers + *numFloats = savedNumFloats + *numInts = savedNumInts + *numStack = savedNumStack + placeStack(v, addStack) + } + return keepAlive +} + +func postMerger(t reflect.Type) (passInMemory bool) { + // (c) If the size of the aggregate exceeds two eightbytes and the first eight- byte isn’t SSE or any other + // eightbyte isn’t SSEUP, the whole argument is passed in memory. + if t.Kind() != reflect.Struct { + return false + } + if t.Size() <= 2*8 { + return false + } + return true // Go does not have an SSE/SEEUP type so this is always true +} + +func tryPlaceRegister(v reflect.Value, addFloat func(uintptr), addInt func(uintptr)) (ok bool) { + ok = true + var val uint64 + var shift byte // # of bits to shift + var flushed bool + class := _NO_CLASS + flushIfNeeded := func() { + if flushed { + return + } + flushed = true + if class == _SSE { + addFloat(uintptr(val)) + } else { + addInt(uintptr(val)) + } + val = 0 + shift = 0 + class = _NO_CLASS + } + var place func(v reflect.Value) + place = func(v reflect.Value) { + var numFields int + if v.Kind() == reflect.Struct { + numFields = v.Type().NumField() + } else { + numFields = v.Type().Len() + } + + for i := 0; i < numFields; i++ { + flushed = false + var f reflect.Value + if v.Kind() == reflect.Struct { + f = v.Field(i) + } else { + f = v.Index(i) + } + switch f.Kind() { + case reflect.Struct: + place(f) + case reflect.Bool: + if f.Bool() { + val |= 1 + } + shift += 8 + class |= _INTEGER + case reflect.Pointer: + ok = false + return + case reflect.Int8: + val |= uint64(f.Int()&0xFF) << shift + shift += 8 + class |= _INTEGER + case reflect.Int16: + val |= uint64(f.Int()&0xFFFF) << shift + shift += 16 + class |= _INTEGER + case reflect.Int32: + val |= uint64(f.Int()&0xFFFF_FFFF) << shift + shift += 32 + class |= _INTEGER + case reflect.Int64, reflect.Int: + val = uint64(f.Int()) + shift = 64 + class = _INTEGER + case reflect.Uint8: + val |= f.Uint() << shift + shift += 8 + class |= _INTEGER + case reflect.Uint16: + val |= f.Uint() << shift + shift += 16 + class |= _INTEGER + case reflect.Uint32: + val |= f.Uint() << shift + shift += 32 + class |= _INTEGER + case reflect.Uint64, reflect.Uint: + val = f.Uint() + shift = 64 + class = _INTEGER + case reflect.Float32: + val |= uint64(math.Float32bits(float32(f.Float()))) << shift + shift += 32 + class |= _SSE + case reflect.Float64: + if v.Type().Size() > 16 { + ok = false + return + } + val = uint64(math.Float64bits(f.Float())) + shift = 64 + class = _SSE + case reflect.Array: + place(f) + default: + panic("purego: unsupported kind " + f.Kind().String()) + } + + if shift == 64 { + flushIfNeeded() + } else if shift > 64 { + // Should never happen, but may if we forget to reset shift after flush (or forget to flush), + // better fall apart here, than corrupt arguments. + panic("purego: tryPlaceRegisters shift > 64") + } + } + } + + place(v) + flushIfNeeded() + return ok +} + +func placeStack(v reflect.Value, addStack func(uintptr)) { + for i := 0; i < v.Type().NumField(); i++ { + f := v.Field(i) + switch f.Kind() { + case reflect.Pointer: + addStack(f.Pointer()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + addStack(uintptr(f.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + addStack(uintptr(f.Uint())) + case reflect.Float32: + addStack(uintptr(math.Float32bits(float32(f.Float())))) + case reflect.Float64: + addStack(uintptr(math.Float64bits(f.Float()))) + case reflect.Struct: + placeStack(f, addStack) + default: + panic("purego: unsupported kind " + f.Kind().String()) + } + } +} diff --git a/vendor/github.com/ebitengine/purego/struct_arm64.go b/vendor/github.com/ebitengine/purego/struct_arm64.go new file mode 100644 index 000000000..11c36bd6e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/struct_arm64.go @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package purego + +import ( + "math" + "reflect" + "unsafe" +) + +func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) { + outSize := outType.Size() + switch { + case outSize == 0: + return reflect.New(outType).Elem() + case outSize <= 8: + r1 := syscall.a1 + if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats { + r1 = syscall.f1 + if numFields == 2 { + r1 = syscall.f2<<32 | syscall.f1 + } + } + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{r1})).Elem() + case outSize <= 16: + r1, r2 := syscall.a1, syscall.a2 + if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats { + switch numFields { + case 4: + r1 = syscall.f2<<32 | syscall.f1 + r2 = syscall.f4<<32 | syscall.f3 + case 3: + r1 = syscall.f2<<32 | syscall.f1 + r2 = syscall.f3 + case 2: + r1 = syscall.f1 + r2 = syscall.f2 + default: + panic("unreachable") + } + } + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b uintptr }{r1, r2})).Elem() + default: + if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats && numFields <= 4 { + switch numFields { + case 4: + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b, c, d uintptr }{syscall.f1, syscall.f2, syscall.f3, syscall.f4})).Elem() + case 3: + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b, c uintptr }{syscall.f1, syscall.f2, syscall.f3})).Elem() + default: + panic("unreachable") + } + } + // create struct from the Go pointer created in arm64_r8 + // weird pointer dereference to circumvent go vet + return reflect.NewAt(outType, *(*unsafe.Pointer)(unsafe.Pointer(&syscall.arm64_r8))).Elem() + } +} + +// https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst +const ( + _NO_CLASS = 0b00 + _FLOAT = 0b01 + _INT = 0b11 +) + +func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []interface{}) []interface{} { + if v.Type().Size() == 0 { + return keepAlive + } + + if hva, hfa, size := isHVA(v.Type()), isHFA(v.Type()), v.Type().Size(); hva || hfa || size <= 16 { + // if this doesn't fit entirely in registers then + // each element goes onto the stack + if hfa && *numFloats+v.NumField() > numOfFloats { + *numFloats = numOfFloats + } else if hva && *numInts+v.NumField() > numOfIntegerRegisters() { + *numInts = numOfIntegerRegisters() + } + + placeRegisters(v, addFloat, addInt) + } else { + keepAlive = placeStack(v, keepAlive, addInt) + } + return keepAlive // the struct was allocated so don't panic +} + +func placeRegisters(v reflect.Value, addFloat func(uintptr), addInt func(uintptr)) { + var val uint64 + var shift byte + var flushed bool + class := _NO_CLASS + var place func(v reflect.Value) + place = func(v reflect.Value) { + var numFields int + if v.Kind() == reflect.Struct { + numFields = v.Type().NumField() + } else { + numFields = v.Type().Len() + } + for k := 0; k < numFields; k++ { + flushed = false + var f reflect.Value + if v.Kind() == reflect.Struct { + f = v.Field(k) + } else { + f = v.Index(k) + } + if shift >= 64 { + shift = 0 + flushed = true + if class == _FLOAT { + addFloat(uintptr(val)) + } else { + addInt(uintptr(val)) + } + } + switch f.Type().Kind() { + case reflect.Struct: + place(f) + case reflect.Bool: + if f.Bool() { + val |= 1 + } + shift += 8 + class |= _INT + case reflect.Uint8: + val |= f.Uint() << shift + shift += 8 + class |= _INT + case reflect.Uint16: + val |= f.Uint() << shift + shift += 16 + class |= _INT + case reflect.Uint32: + val |= f.Uint() << shift + shift += 32 + class |= _INT + case reflect.Uint64: + addInt(uintptr(f.Uint())) + shift = 0 + flushed = true + case reflect.Int8: + val |= uint64(f.Int()&0xFF) << shift + shift += 8 + class |= _INT + case reflect.Int16: + val |= uint64(f.Int()&0xFFFF) << shift + shift += 16 + class |= _INT + case reflect.Int32: + val |= uint64(f.Int()&0xFFFF_FFFF) << shift + shift += 32 + class |= _INT + case reflect.Int64: + addInt(uintptr(f.Int())) + shift = 0 + flushed = true + case reflect.Float32: + if class == _FLOAT { + addFloat(uintptr(val)) + val = 0 + shift = 0 + } + val |= uint64(math.Float32bits(float32(f.Float()))) << shift + shift += 32 + class |= _FLOAT + case reflect.Float64: + addFloat(uintptr(math.Float64bits(float64(f.Float())))) + shift = 0 + flushed = true + case reflect.Array: + place(f) + default: + panic("purego: unsupported kind " + f.Kind().String()) + } + } + } + place(v) + if !flushed { + if class == _FLOAT { + addFloat(uintptr(val)) + } else { + addInt(uintptr(val)) + } + } +} + +func placeStack(v reflect.Value, keepAlive []interface{}, addInt func(uintptr)) []interface{} { + // Struct is too big to be placed in registers. + // Copy to heap and place the pointer in register + ptrStruct := reflect.New(v.Type()) + ptrStruct.Elem().Set(v) + ptr := ptrStruct.Elem().Addr().UnsafePointer() + keepAlive = append(keepAlive, ptr) + addInt(uintptr(ptr)) + return keepAlive +} + +// isHFA reports a Homogeneous Floating-point Aggregate (HFA) which is a Fundamental Data Type that is a +// Floating-Point type and at most four uniquely addressable members (5.9.5.1 in [Arm64 Calling Convention]). +// This type of struct will be placed more compactly than the individual fields. +// +// [Arm64 Calling Convention]: https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst +func isHFA(t reflect.Type) bool { + // round up struct size to nearest 8 see section B.4 + structSize := roundUpTo8(t.Size()) + if structSize == 0 || t.NumField() > 4 { + return false + } + first := t.Field(0) + switch first.Type.Kind() { + case reflect.Float32, reflect.Float64: + firstKind := first.Type.Kind() + for i := 0; i < t.NumField(); i++ { + if t.Field(i).Type.Kind() != firstKind { + return false + } + } + return true + case reflect.Array: + switch first.Type.Elem().Kind() { + case reflect.Float32, reflect.Float64: + return true + default: + return false + } + case reflect.Struct: + for i := 0; i < first.Type.NumField(); i++ { + if !isHFA(first.Type) { + return false + } + } + return true + default: + return false + } +} + +// isHVA reports a Homogeneous Aggregate with a Fundamental Data Type that is a Short-Vector type +// and at most four uniquely addressable members (5.9.5.2 in [Arm64 Calling Convention]). +// A short vector is a machine type that is composed of repeated instances of one fundamental integral or +// floating-point type. It may be 8 or 16 bytes in total size (5.4 in [Arm64 Calling Convention]). +// This type of struct will be placed more compactly than the individual fields. +// +// [Arm64 Calling Convention]: https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst +func isHVA(t reflect.Type) bool { + // round up struct size to nearest 8 see section B.4 + structSize := roundUpTo8(t.Size()) + if structSize == 0 || (structSize != 8 && structSize != 16) { + return false + } + first := t.Field(0) + switch first.Type.Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Int8, reflect.Int16, reflect.Int32: + firstKind := first.Type.Kind() + for i := 0; i < t.NumField(); i++ { + if t.Field(i).Type.Kind() != firstKind { + return false + } + } + return true + case reflect.Array: + switch first.Type.Elem().Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Int8, reflect.Int16, reflect.Int32: + return true + default: + return false + } + default: + return false + } +} diff --git a/vendor/github.com/ebitengine/purego/struct_other.go b/vendor/github.com/ebitengine/purego/struct_other.go new file mode 100644 index 000000000..9d42adac8 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/struct_other.go @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +//go:build !amd64 && !arm64 + +package purego + +import "reflect" + +func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []interface{}) []interface{} { + panic("purego: struct arguments are not supported") +} + +func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) { + panic("purego: struct returns are not supported") +} diff --git a/vendor/github.com/ebitengine/purego/sys_amd64.s b/vendor/github.com/ebitengine/purego/sys_amd64.s new file mode 100644 index 000000000..cabde1a58 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/sys_amd64.s @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux + +#include "textflag.h" +#include "abi_amd64.h" +#include "go_asm.h" +#include "funcdata.h" + +#define STACK_SIZE 80 +#define PTR_ADDRESS (STACK_SIZE - 8) + +// syscall15X calls a function in libc on behalf of the syscall package. +// syscall15X takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// a7 uintptr +// a8 uintptr +// a9 uintptr +// a10 uintptr +// a11 uintptr +// a12 uintptr +// a13 uintptr +// a14 uintptr +// a15 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall15X must be called on the g0 stack with the +// C calling convention (use libcCall). +GLOBL ·syscall15XABI0(SB), NOPTR|RODATA, $8 +DATA ·syscall15XABI0(SB)/8, $syscall15X(SB) +TEXT syscall15X(SB), NOSPLIT|NOFRAME, $0 + PUSHQ BP + MOVQ SP, BP + SUBQ $STACK_SIZE, SP + MOVQ DI, PTR_ADDRESS(BP) // save the pointer + MOVQ DI, R11 + + MOVQ syscall15Args_f1(R11), X0 // f1 + MOVQ syscall15Args_f2(R11), X1 // f2 + MOVQ syscall15Args_f3(R11), X2 // f3 + MOVQ syscall15Args_f4(R11), X3 // f4 + MOVQ syscall15Args_f5(R11), X4 // f5 + MOVQ syscall15Args_f6(R11), X5 // f6 + MOVQ syscall15Args_f7(R11), X6 // f7 + MOVQ syscall15Args_f8(R11), X7 // f8 + + MOVQ syscall15Args_a1(R11), DI // a1 + MOVQ syscall15Args_a2(R11), SI // a2 + MOVQ syscall15Args_a3(R11), DX // a3 + MOVQ syscall15Args_a4(R11), CX // a4 + MOVQ syscall15Args_a5(R11), R8 // a5 + MOVQ syscall15Args_a6(R11), R9 // a6 + + // push the remaining paramters onto the stack + MOVQ syscall15Args_a7(R11), R12 + MOVQ R12, 0(SP) // push a7 + MOVQ syscall15Args_a8(R11), R12 + MOVQ R12, 8(SP) // push a8 + MOVQ syscall15Args_a9(R11), R12 + MOVQ R12, 16(SP) // push a9 + MOVQ syscall15Args_a10(R11), R12 + MOVQ R12, 24(SP) // push a10 + MOVQ syscall15Args_a11(R11), R12 + MOVQ R12, 32(SP) // push a11 + MOVQ syscall15Args_a12(R11), R12 + MOVQ R12, 40(SP) // push a12 + MOVQ syscall15Args_a13(R11), R12 + MOVQ R12, 48(SP) // push a13 + MOVQ syscall15Args_a14(R11), R12 + MOVQ R12, 56(SP) // push a14 + MOVQ syscall15Args_a15(R11), R12 + MOVQ R12, 64(SP) // push a15 + XORL AX, AX // vararg: say "no float args" + + MOVQ syscall15Args_fn(R11), R10 // fn + CALL R10 + + MOVQ PTR_ADDRESS(BP), DI // get the pointer back + MOVQ AX, syscall15Args_a1(DI) // r1 + MOVQ DX, syscall15Args_a2(DI) // r3 + MOVQ X0, syscall15Args_f1(DI) // f1 + MOVQ X1, syscall15Args_f2(DI) // f2 + + XORL AX, AX // no error (it's ignored anyway) + ADDQ $STACK_SIZE, SP + MOVQ BP, SP + POPQ BP + RET + +TEXT callbackasm1(SB), NOSPLIT|NOFRAME, $0 + MOVQ 0(SP), AX // save the return address to calculate the cb index + MOVQ 8(SP), R10 // get the return SP so that we can align register args with stack args + ADDQ $8, SP // remove return address from stack, we are not returning to callbackasm, but to its caller. + + // make space for first six int and 8 float arguments below the frame + ADJSP $14*8, SP + MOVSD X0, (1*8)(SP) + MOVSD X1, (2*8)(SP) + MOVSD X2, (3*8)(SP) + MOVSD X3, (4*8)(SP) + MOVSD X4, (5*8)(SP) + MOVSD X5, (6*8)(SP) + MOVSD X6, (7*8)(SP) + MOVSD X7, (8*8)(SP) + MOVQ DI, (9*8)(SP) + MOVQ SI, (10*8)(SP) + MOVQ DX, (11*8)(SP) + MOVQ CX, (12*8)(SP) + MOVQ R8, (13*8)(SP) + MOVQ R9, (14*8)(SP) + LEAQ 8(SP), R8 // R8 = address of args vector + + PUSHQ R10 // push the stack pointer below registers + + // Switch from the host ABI to the Go ABI. + PUSH_REGS_HOST_TO_ABI0() + + // determine index into runtime·cbs table + MOVQ $callbackasm(SB), DX + SUBQ DX, AX + MOVQ $0, DX + MOVQ $5, CX // divide by 5 because each call instruction in ·callbacks is 5 bytes long + DIVL CX + SUBQ $1, AX // subtract 1 because return PC is to the next slot + + // Create a struct callbackArgs on our stack to be passed as + // the "frame" to cgocallback and on to callbackWrap. + // $24 to make enough room for the arguments to runtime.cgocallback + SUBQ $(24+callbackArgs__size), SP + MOVQ AX, (24+callbackArgs_index)(SP) // callback index + MOVQ R8, (24+callbackArgs_args)(SP) // address of args vector + MOVQ $0, (24+callbackArgs_result)(SP) // result + LEAQ 24(SP), AX // take the address of callbackArgs + + // Call cgocallback, which will call callbackWrap(frame). + MOVQ ·callbackWrap_call(SB), DI // Get the ABIInternal function pointer + MOVQ (DI), DI // without by using a closure. + MOVQ AX, SI // frame (address of callbackArgs) + MOVQ $0, CX // context + + CALL crosscall2(SB) // runtime.cgocallback(fn, frame, ctxt uintptr) + + // Get callback result. + MOVQ (24+callbackArgs_result)(SP), AX + ADDQ $(24+callbackArgs__size), SP // remove callbackArgs struct + + POP_REGS_HOST_TO_ABI0() + + POPQ R10 // get the SP back + ADJSP $-14*8, SP // remove arguments + + MOVQ R10, 0(SP) + + RET diff --git a/vendor/github.com/ebitengine/purego/sys_arm64.s b/vendor/github.com/ebitengine/purego/sys_arm64.s new file mode 100644 index 000000000..a68fdb99b --- /dev/null +++ b/vendor/github.com/ebitengine/purego/sys_arm64.s @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux || windows + +#include "textflag.h" +#include "go_asm.h" +#include "funcdata.h" + +#define STACK_SIZE 64 +#define PTR_ADDRESS (STACK_SIZE - 8) + +// syscall15X calls a function in libc on behalf of the syscall package. +// syscall15X takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// a7 uintptr +// a8 uintptr +// a9 uintptr +// a10 uintptr +// a11 uintptr +// a12 uintptr +// a13 uintptr +// a14 uintptr +// a15 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall15X must be called on the g0 stack with the +// C calling convention (use libcCall). +GLOBL ·syscall15XABI0(SB), NOPTR|RODATA, $8 +DATA ·syscall15XABI0(SB)/8, $syscall15X(SB) +TEXT syscall15X(SB), NOSPLIT, $0 + SUB $STACK_SIZE, RSP // push structure pointer + MOVD R0, PTR_ADDRESS(RSP) + MOVD R0, R9 + + FMOVD syscall15Args_f1(R9), F0 // f1 + FMOVD syscall15Args_f2(R9), F1 // f2 + FMOVD syscall15Args_f3(R9), F2 // f3 + FMOVD syscall15Args_f4(R9), F3 // f4 + FMOVD syscall15Args_f5(R9), F4 // f5 + FMOVD syscall15Args_f6(R9), F5 // f6 + FMOVD syscall15Args_f7(R9), F6 // f7 + FMOVD syscall15Args_f8(R9), F7 // f8 + + MOVD syscall15Args_a1(R9), R0 // a1 + MOVD syscall15Args_a2(R9), R1 // a2 + MOVD syscall15Args_a3(R9), R2 // a3 + MOVD syscall15Args_a4(R9), R3 // a4 + MOVD syscall15Args_a5(R9), R4 // a5 + MOVD syscall15Args_a6(R9), R5 // a6 + MOVD syscall15Args_a7(R9), R6 // a7 + MOVD syscall15Args_a8(R9), R7 // a8 + MOVD syscall15Args_arm64_r8(R9), R8 // r8 + + MOVD syscall15Args_a9(R9), R10 + MOVD R10, 0(RSP) // push a9 onto stack + MOVD syscall15Args_a10(R9), R10 + MOVD R10, 8(RSP) // push a10 onto stack + MOVD syscall15Args_a11(R9), R10 + MOVD R10, 16(RSP) // push a11 onto stack + MOVD syscall15Args_a12(R9), R10 + MOVD R10, 24(RSP) // push a12 onto stack + MOVD syscall15Args_a13(R9), R10 + MOVD R10, 32(RSP) // push a13 onto stack + MOVD syscall15Args_a14(R9), R10 + MOVD R10, 40(RSP) // push a14 onto stack + MOVD syscall15Args_a15(R9), R10 + MOVD R10, 48(RSP) // push a15 onto stack + + MOVD syscall15Args_fn(R9), R10 // fn + BL (R10) + + MOVD PTR_ADDRESS(RSP), R2 // pop structure pointer + ADD $STACK_SIZE, RSP + + MOVD R0, syscall15Args_a1(R2) // save r1 + MOVD R1, syscall15Args_a2(R2) // save r3 + FMOVD F0, syscall15Args_f1(R2) // save f0 + FMOVD F1, syscall15Args_f2(R2) // save f1 + FMOVD F2, syscall15Args_f3(R2) // save f2 + FMOVD F3, syscall15Args_f4(R2) // save f3 + + RET diff --git a/vendor/github.com/ebitengine/purego/sys_unix_arm64.s b/vendor/github.com/ebitengine/purego/sys_unix_arm64.s new file mode 100644 index 000000000..6da06b4d1 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/sys_unix_arm64.s @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 The Ebitengine Authors + +//go:build darwin || freebsd || linux + +#include "textflag.h" +#include "go_asm.h" +#include "funcdata.h" +#include "abi_arm64.h" + +TEXT callbackasm1(SB), NOSPLIT|NOFRAME, $0 + NO_LOCAL_POINTERS + + // On entry, the trampoline in zcallback_darwin_arm64.s left + // the callback index in R12 (which is volatile in the C ABI). + + // Save callback register arguments R0-R7 and F0-F7. + // We do this at the top of the frame so they're contiguous with stack arguments. + SUB $(16*8), RSP, R14 + FSTPD (F0, F1), (0*8)(R14) + FSTPD (F2, F3), (2*8)(R14) + FSTPD (F4, F5), (4*8)(R14) + FSTPD (F6, F7), (6*8)(R14) + STP (R0, R1), (8*8)(R14) + STP (R2, R3), (10*8)(R14) + STP (R4, R5), (12*8)(R14) + STP (R6, R7), (14*8)(R14) + + // Adjust SP by frame size. + SUB $(26*8), RSP + + // It is important to save R27 because the go assembler + // uses it for move instructions for a variable. + // This line: + // MOVD ·callbackWrap_call(SB), R0 + // Creates the instructions: + // ADRP 14335(PC), R27 + // MOVD 388(27), R0 + // R27 is a callee saved register so we are responsible + // for ensuring its value doesn't change. So save it and + // restore it at the end of this function. + // R30 is the link register. crosscall2 doesn't save it + // so it's saved here. + STP (R27, R30), 0(RSP) + + // Create a struct callbackArgs on our stack. + MOVD $(callbackArgs__size)(RSP), R13 + MOVD R12, callbackArgs_index(R13) // callback index + MOVD R14, callbackArgs_args(R13) // address of args vector + MOVD ZR, callbackArgs_result(R13) // result + + // Move parameters into registers + // Get the ABIInternal function pointer + // without by using a closure. + MOVD ·callbackWrap_call(SB), R0 + MOVD (R0), R0 // fn unsafe.Pointer + MOVD R13, R1 // frame (&callbackArgs{...}) + MOVD $0, R3 // ctxt uintptr + + BL crosscall2(SB) + + // Get callback result. + MOVD $(callbackArgs__size)(RSP), R13 + MOVD callbackArgs_result(R13), R0 + + // Restore LR and R27 + LDP 0(RSP), (R27, R30) + ADD $(26*8), RSP + + RET diff --git a/vendor/github.com/ebitengine/purego/syscall.go b/vendor/github.com/ebitengine/purego/syscall.go new file mode 100644 index 000000000..c30688dda --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux || windows + +package purego + +// CDecl marks a function as being called using the __cdecl calling convention as defined in +// the [MSDocs] when passed to NewCallback. It must be the first argument to the function. +// This is only useful on 386 Windows, but it is safe to use on other platforms. +// +// [MSDocs]: https://learn.microsoft.com/en-us/cpp/cpp/cdecl?view=msvc-170 +type CDecl struct{} + +const ( + maxArgs = 15 + numOfFloats = 8 // arm64 and amd64 both have 8 float registers +) + +type syscall15Args struct { + fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr + f1, f2, f3, f4, f5, f6, f7, f8 uintptr + arm64_r8 uintptr +} + +// SyscallN takes fn, a C function pointer and a list of arguments as uintptr. +// There is an internal maximum number of arguments that SyscallN can take. It panics +// when the maximum is exceeded. It returns the result and the libc error code if there is one. +// +// NOTE: SyscallN does not properly call functions that have both integer and float parameters. +// See discussion comment https://github.com/ebiten/purego/pull/1#issuecomment-1128057607 +// for an explanation of why that is. +// +// On amd64, if there are more than 8 floats the 9th and so on will be placed incorrectly on the +// stack. +// +// The pragma go:nosplit is not needed at this function declaration because it uses go:uintptrescapes +// which forces all the objects that the uintptrs point to onto the heap where a stack split won't affect +// their memory location. +// +//go:uintptrescapes +func SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { + if fn == 0 { + panic("purego: fn is nil") + } + if len(args) > maxArgs { + panic("purego: too many arguments to SyscallN") + } + // add padding so there is no out-of-bounds slicing + var tmp [maxArgs]uintptr + copy(tmp[:], args) + return syscall_syscall15X(fn, tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], tmp[7], tmp[8], tmp[9], tmp[10], tmp[11], tmp[12], tmp[13], tmp[14]) +} diff --git a/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go b/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go new file mode 100644 index 000000000..36ee14e3b --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build cgo && !(amd64 || arm64) + +package purego + +import ( + "github.com/ebitengine/purego/internal/cgo" +) + +var syscall15XABI0 = uintptr(cgo.Syscall15XABI0) + +//go:nosplit +func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + return cgo.Syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) +} + +func NewCallback(_ interface{}) uintptr { + panic("purego: NewCallback on Linux is only supported on amd64/arm64") +} diff --git a/vendor/github.com/ebitengine/purego/syscall_sysv.go b/vendor/github.com/ebitengine/purego/syscall_sysv.go new file mode 100644 index 000000000..cce171c8f --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall_sysv.go @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || (linux && (amd64 || arm64)) + +package purego + +import ( + "reflect" + "runtime" + "sync" + "unsafe" +) + +var syscall15XABI0 uintptr + +//go:nosplit +func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + args := syscall15Args{ + fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, + a1, a2, a3, a4, a5, a6, a7, a8, + 0, + } + runtime_cgocall(syscall15XABI0, unsafe.Pointer(&args)) + return args.a1, args.a2, 0 +} + +// NewCallback converts a Go function to a function pointer conforming to the C calling convention. +// This is useful when interoperating with C code requiring callbacks. The argument is expected to be a +// function with zero or one uintptr-sized result. The function must not have arguments with size larger than the size +// of uintptr. Only a limited number of callbacks may be created in a single Go process, and any memory allocated +// for these callbacks is never released. At least 2000 callbacks can always be created. Although this function +// provides similar functionality to windows.NewCallback it is distinct. +func NewCallback(fn interface{}) uintptr { + ty := reflect.TypeOf(fn) + for i := 0; i < ty.NumIn(); i++ { + in := ty.In(i) + if !in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + if i != 0 { + panic("purego: CDecl must be the first argument") + } + } + return compileCallback(fn) +} + +// maxCb is the maximum number of callbacks +// only increase this if you have added more to the callbackasm function +const maxCB = 2000 + +var cbs struct { + lock sync.Mutex + numFn int // the number of functions currently in cbs.funcs + funcs [maxCB]reflect.Value // the saved callbacks +} + +type callbackArgs struct { + index uintptr + // args points to the argument block. + // + // The structure of the arguments goes + // float registers followed by the + // integer registers followed by the stack. + // + // This variable is treated as a continuous + // block of memory containing all of the arguments + // for this callback. + args unsafe.Pointer + // Below are out-args from callbackWrap + result uintptr +} + +func compileCallback(fn interface{}) uintptr { + val := reflect.ValueOf(fn) + if val.Kind() != reflect.Func { + panic("purego: the type must be a function but was not") + } + if val.IsNil() { + panic("purego: function must not be nil") + } + ty := val.Type() + for i := 0; i < ty.NumIn(); i++ { + in := ty.In(i) + switch in.Kind() { + case reflect.Struct: + if i == 0 && in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + fallthrough + case reflect.Interface, reflect.Func, reflect.Slice, + reflect.Chan, reflect.Complex64, reflect.Complex128, + reflect.String, reflect.Map, reflect.Invalid: + panic("purego: unsupported argument type: " + in.Kind().String()) + } + } +output: + switch { + case ty.NumOut() == 1: + switch ty.Out(0).Kind() { + case reflect.Pointer, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.UnsafePointer: + break output + } + panic("purego: unsupported return type: " + ty.String()) + case ty.NumOut() > 1: + panic("purego: callbacks can only have one return") + } + cbs.lock.Lock() + defer cbs.lock.Unlock() + if cbs.numFn >= maxCB { + panic("purego: the maximum number of callbacks has been reached") + } + cbs.funcs[cbs.numFn] = val + cbs.numFn++ + return callbackasmAddr(cbs.numFn - 1) +} + +const ptrSize = unsafe.Sizeof((*int)(nil)) + +const callbackMaxFrame = 64 * ptrSize + +// callbackasm is implemented in zcallback_GOOS_GOARCH.s +// +//go:linkname __callbackasm callbackasm +var __callbackasm byte +var callbackasmABI0 = uintptr(unsafe.Pointer(&__callbackasm)) + +// callbackWrap_call allows the calling of the ABIInternal wrapper +// which is required for runtime.cgocallback without the +// tag which is only allowed in the runtime. +// This closure is used inside sys_darwin_GOARCH.s +var callbackWrap_call = callbackWrap + +// callbackWrap is called by assembly code which determines which Go function to call. +// This function takes the arguments and passes them to the Go function and returns the result. +func callbackWrap(a *callbackArgs) { + cbs.lock.Lock() + fn := cbs.funcs[a.index] + cbs.lock.Unlock() + fnType := fn.Type() + args := make([]reflect.Value, fnType.NumIn()) + frame := (*[callbackMaxFrame]uintptr)(a.args) + var floatsN int // floatsN represents the number of float arguments processed + var intsN int // intsN represents the number of integer arguments processed + // stack points to the index into frame of the current stack element. + // The stack begins after the float and integer registers. + stack := numOfIntegerRegisters() + numOfFloats + for i := range args { + var pos int + switch fnType.In(i).Kind() { + case reflect.Float32, reflect.Float64: + if floatsN >= numOfFloats { + pos = stack + stack++ + } else { + pos = floatsN + } + floatsN++ + case reflect.Struct: + // This is the CDecl field + args[i] = reflect.Zero(fnType.In(i)) + continue + default: + + if intsN >= numOfIntegerRegisters() { + pos = stack + stack++ + } else { + // the integers begin after the floats in frame + pos = intsN + numOfFloats + } + intsN++ + } + args[i] = reflect.NewAt(fnType.In(i), unsafe.Pointer(&frame[pos])).Elem() + } + ret := fn.Call(args) + if len(ret) > 0 { + switch k := ret[0].Kind(); k { + case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uintptr: + a.result = uintptr(ret[0].Uint()) + case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + a.result = uintptr(ret[0].Int()) + case reflect.Bool: + if ret[0].Bool() { + a.result = 1 + } else { + a.result = 0 + } + case reflect.Pointer: + a.result = ret[0].Pointer() + case reflect.UnsafePointer: + a.result = ret[0].Pointer() + default: + panic("purego: unsupported kind: " + k.String()) + } + } +} + +// callbackasmAddr returns address of runtime.callbackasm +// function adjusted by i. +// On x86 and amd64, runtime.callbackasm is a series of CALL instructions, +// and we want callback to arrive at +// correspondent call instruction instead of start of +// runtime.callbackasm. +// On ARM, runtime.callbackasm is a series of mov and branch instructions. +// R12 is loaded with the callback index. Each entry is two instructions, +// hence 8 bytes. +func callbackasmAddr(i int) uintptr { + var entrySize int + switch runtime.GOARCH { + default: + panic("purego: unsupported architecture") + case "386", "amd64": + entrySize = 5 + case "arm", "arm64": + // On ARM and ARM64, each entry is a MOV instruction + // followed by a branch instruction + entrySize = 8 + } + return callbackasmABI0 + uintptr(i*entrySize) +} diff --git a/vendor/github.com/ebitengine/purego/syscall_windows.go b/vendor/github.com/ebitengine/purego/syscall_windows.go new file mode 100644 index 000000000..5fbfcabfd --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall_windows.go @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package purego + +import ( + "reflect" + "syscall" +) + +var syscall15XABI0 uintptr + +func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + r1, r2, errno := syscall.Syscall15(fn, 15, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) + return r1, r2, uintptr(errno) +} + +// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. +// This is useful when interoperating with Windows code requiring callbacks. The argument is expected to be a +// function with one uintptr-sized result. The function must not have arguments with size larger than the +// size of uintptr. Only a limited number of callbacks may be created in a single Go process, and any memory +// allocated for these callbacks is never released. Between NewCallback and NewCallbackCDecl, at least 1024 +// callbacks can always be created. Although this function is similiar to the darwin version it may act +// differently. +func NewCallback(fn interface{}) uintptr { + isCDecl := false + ty := reflect.TypeOf(fn) + for i := 0; i < ty.NumIn(); i++ { + in := ty.In(i) + if !in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + if i != 0 { + panic("purego: CDecl must be the first argument") + } + isCDecl = true + } + if isCDecl { + return syscall.NewCallbackCDecl(fn) + } + return syscall.NewCallback(fn) +} + +func loadSymbol(handle uintptr, name string) (uintptr, error) { + return syscall.GetProcAddress(syscall.Handle(handle), name) +} diff --git a/vendor/github.com/ebitengine/purego/zcallback_amd64.s b/vendor/github.com/ebitengine/purego/zcallback_amd64.s new file mode 100644 index 000000000..6a778bfca --- /dev/null +++ b/vendor/github.com/ebitengine/purego/zcallback_amd64.s @@ -0,0 +1,2014 @@ +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. + +//go:build darwin || freebsd || linux + +// runtime·callbackasm is called by external code to +// execute Go implemented callback function. It is not +// called from the start, instead runtime·compilecallback +// always returns address into runtime·callbackasm offset +// appropriately so different callbacks start with different +// CALL instruction in runtime·callbackasm. This determines +// which Go callback function is executed later on. +#include "textflag.h" + +TEXT callbackasm(SB), NOSPLIT|NOFRAME, $0 + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) diff --git a/vendor/github.com/ebitengine/purego/zcallback_arm64.s b/vendor/github.com/ebitengine/purego/zcallback_arm64.s new file mode 100644 index 000000000..c079b8038 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/zcallback_arm64.s @@ -0,0 +1,4014 @@ +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. + +//go:build darwin || freebsd || linux + +// External code calls into callbackasm at an offset corresponding +// to the callback index. Callbackasm is a table of MOV and B instructions. +// The MOV instruction loads R12 with the callback index, and the +// B instruction branches to callbackasm1. +// callbackasm1 takes the callback index from R12 and +// indexes into an array that stores information about each callback. +// It then calls the Go implementation for that callback. +#include "textflag.h" + +TEXT callbackasm(SB), NOSPLIT|NOFRAME, $0 + MOVD $0, R12 + B callbackasm1(SB) + MOVD $1, R12 + B callbackasm1(SB) + MOVD $2, R12 + B callbackasm1(SB) + MOVD $3, R12 + B callbackasm1(SB) + MOVD $4, R12 + B callbackasm1(SB) + MOVD $5, R12 + B callbackasm1(SB) + MOVD $6, R12 + B callbackasm1(SB) + MOVD $7, R12 + B callbackasm1(SB) + MOVD $8, R12 + B callbackasm1(SB) + MOVD $9, R12 + B callbackasm1(SB) + MOVD $10, R12 + B callbackasm1(SB) + MOVD $11, R12 + B callbackasm1(SB) + MOVD $12, R12 + B callbackasm1(SB) + MOVD $13, R12 + B callbackasm1(SB) + MOVD $14, R12 + B callbackasm1(SB) + MOVD $15, R12 + B callbackasm1(SB) + MOVD $16, R12 + B callbackasm1(SB) + MOVD $17, R12 + B callbackasm1(SB) + MOVD $18, R12 + B callbackasm1(SB) + MOVD $19, R12 + B callbackasm1(SB) + MOVD $20, R12 + B callbackasm1(SB) + MOVD $21, R12 + B callbackasm1(SB) + MOVD $22, R12 + B callbackasm1(SB) + MOVD $23, R12 + B callbackasm1(SB) + MOVD $24, R12 + B callbackasm1(SB) + MOVD $25, R12 + B callbackasm1(SB) + MOVD $26, R12 + B callbackasm1(SB) + MOVD $27, R12 + B callbackasm1(SB) + MOVD $28, R12 + B callbackasm1(SB) + MOVD $29, R12 + B callbackasm1(SB) + MOVD $30, R12 + B callbackasm1(SB) + MOVD $31, R12 + B callbackasm1(SB) + MOVD $32, R12 + B callbackasm1(SB) + MOVD $33, R12 + B callbackasm1(SB) + MOVD $34, R12 + B callbackasm1(SB) + MOVD $35, R12 + B callbackasm1(SB) + MOVD $36, R12 + B callbackasm1(SB) + MOVD $37, R12 + B callbackasm1(SB) + MOVD $38, R12 + B callbackasm1(SB) + MOVD $39, R12 + B callbackasm1(SB) + MOVD $40, R12 + B callbackasm1(SB) + MOVD $41, R12 + B callbackasm1(SB) + MOVD $42, R12 + B callbackasm1(SB) + MOVD $43, R12 + B callbackasm1(SB) + MOVD $44, R12 + B callbackasm1(SB) + MOVD $45, R12 + B callbackasm1(SB) + MOVD $46, R12 + B callbackasm1(SB) + MOVD $47, R12 + B callbackasm1(SB) + MOVD $48, R12 + B callbackasm1(SB) + MOVD $49, R12 + B callbackasm1(SB) + MOVD $50, R12 + B callbackasm1(SB) + MOVD $51, R12 + B callbackasm1(SB) + MOVD $52, R12 + B callbackasm1(SB) + MOVD $53, R12 + B callbackasm1(SB) + MOVD $54, R12 + B callbackasm1(SB) + MOVD $55, R12 + B callbackasm1(SB) + MOVD $56, R12 + B callbackasm1(SB) + MOVD $57, R12 + B callbackasm1(SB) + MOVD $58, R12 + B callbackasm1(SB) + MOVD $59, R12 + B callbackasm1(SB) + MOVD $60, R12 + B callbackasm1(SB) + MOVD $61, R12 + B callbackasm1(SB) + MOVD $62, R12 + B callbackasm1(SB) + MOVD $63, R12 + B callbackasm1(SB) + MOVD $64, R12 + B callbackasm1(SB) + MOVD $65, R12 + B callbackasm1(SB) + MOVD $66, R12 + B callbackasm1(SB) + MOVD $67, R12 + B callbackasm1(SB) + MOVD $68, R12 + B callbackasm1(SB) + MOVD $69, R12 + B callbackasm1(SB) + MOVD $70, R12 + B callbackasm1(SB) + MOVD $71, R12 + B callbackasm1(SB) + MOVD $72, R12 + B callbackasm1(SB) + MOVD $73, R12 + B callbackasm1(SB) + MOVD $74, R12 + B callbackasm1(SB) + MOVD $75, R12 + B callbackasm1(SB) + MOVD $76, R12 + B callbackasm1(SB) + MOVD $77, R12 + B callbackasm1(SB) + MOVD $78, R12 + B callbackasm1(SB) + MOVD $79, R12 + B callbackasm1(SB) + MOVD $80, R12 + B callbackasm1(SB) + MOVD $81, R12 + B callbackasm1(SB) + MOVD $82, R12 + B callbackasm1(SB) + MOVD $83, R12 + B callbackasm1(SB) + MOVD $84, R12 + B callbackasm1(SB) + MOVD $85, R12 + B callbackasm1(SB) + MOVD $86, R12 + B callbackasm1(SB) + MOVD $87, R12 + B callbackasm1(SB) + MOVD $88, R12 + B callbackasm1(SB) + MOVD $89, R12 + B callbackasm1(SB) + MOVD $90, R12 + B callbackasm1(SB) + MOVD $91, R12 + B callbackasm1(SB) + MOVD $92, R12 + B callbackasm1(SB) + MOVD $93, R12 + B callbackasm1(SB) + MOVD $94, R12 + B callbackasm1(SB) + MOVD $95, R12 + B callbackasm1(SB) + MOVD $96, R12 + B callbackasm1(SB) + MOVD $97, R12 + B callbackasm1(SB) + MOVD $98, R12 + B callbackasm1(SB) + MOVD $99, R12 + B callbackasm1(SB) + MOVD $100, R12 + B callbackasm1(SB) + MOVD $101, R12 + B callbackasm1(SB) + MOVD $102, R12 + B callbackasm1(SB) + MOVD $103, R12 + B callbackasm1(SB) + MOVD $104, R12 + B callbackasm1(SB) + MOVD $105, R12 + B callbackasm1(SB) + MOVD $106, R12 + B callbackasm1(SB) + MOVD $107, R12 + B callbackasm1(SB) + MOVD $108, R12 + B callbackasm1(SB) + MOVD $109, R12 + B callbackasm1(SB) + MOVD $110, R12 + B callbackasm1(SB) + MOVD $111, R12 + B callbackasm1(SB) + MOVD $112, R12 + B callbackasm1(SB) + MOVD $113, R12 + B callbackasm1(SB) + MOVD $114, R12 + B callbackasm1(SB) + MOVD $115, R12 + B callbackasm1(SB) + MOVD $116, R12 + B callbackasm1(SB) + MOVD $117, R12 + B callbackasm1(SB) + MOVD $118, R12 + B callbackasm1(SB) + MOVD $119, R12 + B callbackasm1(SB) + MOVD $120, R12 + B callbackasm1(SB) + MOVD $121, R12 + B callbackasm1(SB) + MOVD $122, R12 + B callbackasm1(SB) + MOVD $123, R12 + B callbackasm1(SB) + MOVD $124, R12 + B callbackasm1(SB) + MOVD $125, R12 + B callbackasm1(SB) + MOVD $126, R12 + B callbackasm1(SB) + MOVD $127, R12 + B callbackasm1(SB) + MOVD $128, R12 + B callbackasm1(SB) + MOVD $129, R12 + B callbackasm1(SB) + MOVD $130, R12 + B callbackasm1(SB) + MOVD $131, R12 + B callbackasm1(SB) + MOVD $132, R12 + B callbackasm1(SB) + MOVD $133, R12 + B callbackasm1(SB) + MOVD $134, R12 + B callbackasm1(SB) + MOVD $135, R12 + B callbackasm1(SB) + MOVD $136, R12 + B callbackasm1(SB) + MOVD $137, R12 + B callbackasm1(SB) + MOVD $138, R12 + B callbackasm1(SB) + MOVD $139, R12 + B callbackasm1(SB) + MOVD $140, R12 + B callbackasm1(SB) + MOVD $141, R12 + B callbackasm1(SB) + MOVD $142, R12 + B callbackasm1(SB) + MOVD $143, R12 + B callbackasm1(SB) + MOVD $144, R12 + B callbackasm1(SB) + MOVD $145, R12 + B callbackasm1(SB) + MOVD $146, R12 + B callbackasm1(SB) + MOVD $147, R12 + B callbackasm1(SB) + MOVD $148, R12 + B callbackasm1(SB) + MOVD $149, R12 + B callbackasm1(SB) + MOVD $150, R12 + B callbackasm1(SB) + MOVD $151, R12 + B callbackasm1(SB) + MOVD $152, R12 + B callbackasm1(SB) + MOVD $153, R12 + B callbackasm1(SB) + MOVD $154, R12 + B callbackasm1(SB) + MOVD $155, R12 + B callbackasm1(SB) + MOVD $156, R12 + B callbackasm1(SB) + MOVD $157, R12 + B callbackasm1(SB) + MOVD $158, R12 + B callbackasm1(SB) + MOVD $159, R12 + B callbackasm1(SB) + MOVD $160, R12 + B callbackasm1(SB) + MOVD $161, R12 + B callbackasm1(SB) + MOVD $162, R12 + B callbackasm1(SB) + MOVD $163, R12 + B callbackasm1(SB) + MOVD $164, R12 + B callbackasm1(SB) + MOVD $165, R12 + B callbackasm1(SB) + MOVD $166, R12 + B callbackasm1(SB) + MOVD $167, R12 + B callbackasm1(SB) + MOVD $168, R12 + B callbackasm1(SB) + MOVD $169, R12 + B callbackasm1(SB) + MOVD $170, R12 + B callbackasm1(SB) + MOVD $171, R12 + B callbackasm1(SB) + MOVD $172, R12 + B callbackasm1(SB) + MOVD $173, R12 + B callbackasm1(SB) + MOVD $174, R12 + B callbackasm1(SB) + MOVD $175, R12 + B callbackasm1(SB) + MOVD $176, R12 + B callbackasm1(SB) + MOVD $177, R12 + B callbackasm1(SB) + MOVD $178, R12 + B callbackasm1(SB) + MOVD $179, R12 + B callbackasm1(SB) + MOVD $180, R12 + B callbackasm1(SB) + MOVD $181, R12 + B callbackasm1(SB) + MOVD $182, R12 + B callbackasm1(SB) + MOVD $183, R12 + B callbackasm1(SB) + MOVD $184, R12 + B callbackasm1(SB) + MOVD $185, R12 + B callbackasm1(SB) + MOVD $186, R12 + B callbackasm1(SB) + MOVD $187, R12 + B callbackasm1(SB) + MOVD $188, R12 + B callbackasm1(SB) + MOVD $189, R12 + B callbackasm1(SB) + MOVD $190, R12 + B callbackasm1(SB) + MOVD $191, R12 + B callbackasm1(SB) + MOVD $192, R12 + B callbackasm1(SB) + MOVD $193, R12 + B callbackasm1(SB) + MOVD $194, R12 + B callbackasm1(SB) + MOVD $195, R12 + B callbackasm1(SB) + MOVD $196, R12 + B callbackasm1(SB) + MOVD $197, R12 + B callbackasm1(SB) + MOVD $198, R12 + B callbackasm1(SB) + MOVD $199, R12 + B callbackasm1(SB) + MOVD $200, R12 + B callbackasm1(SB) + MOVD $201, R12 + B callbackasm1(SB) + MOVD $202, R12 + B callbackasm1(SB) + MOVD $203, R12 + B callbackasm1(SB) + MOVD $204, R12 + B callbackasm1(SB) + MOVD $205, R12 + B callbackasm1(SB) + MOVD $206, R12 + B callbackasm1(SB) + MOVD $207, R12 + B callbackasm1(SB) + MOVD $208, R12 + B callbackasm1(SB) + MOVD $209, R12 + B callbackasm1(SB) + MOVD $210, R12 + B callbackasm1(SB) + MOVD $211, R12 + B callbackasm1(SB) + MOVD $212, R12 + B callbackasm1(SB) + MOVD $213, R12 + B callbackasm1(SB) + MOVD $214, R12 + B callbackasm1(SB) + MOVD $215, R12 + B callbackasm1(SB) + MOVD $216, R12 + B callbackasm1(SB) + MOVD $217, R12 + B callbackasm1(SB) + MOVD $218, R12 + B callbackasm1(SB) + MOVD $219, R12 + B callbackasm1(SB) + MOVD $220, R12 + B callbackasm1(SB) + MOVD $221, R12 + B callbackasm1(SB) + MOVD $222, R12 + B callbackasm1(SB) + MOVD $223, R12 + B callbackasm1(SB) + MOVD $224, R12 + B callbackasm1(SB) + MOVD $225, R12 + B callbackasm1(SB) + MOVD $226, R12 + B callbackasm1(SB) + MOVD $227, R12 + B callbackasm1(SB) + MOVD $228, R12 + B callbackasm1(SB) + MOVD $229, R12 + B callbackasm1(SB) + MOVD $230, R12 + B callbackasm1(SB) + MOVD $231, R12 + B callbackasm1(SB) + MOVD $232, R12 + B callbackasm1(SB) + MOVD $233, R12 + B callbackasm1(SB) + MOVD $234, R12 + B callbackasm1(SB) + MOVD $235, R12 + B callbackasm1(SB) + MOVD $236, R12 + B callbackasm1(SB) + MOVD $237, R12 + B callbackasm1(SB) + MOVD $238, R12 + B callbackasm1(SB) + MOVD $239, R12 + B callbackasm1(SB) + MOVD $240, R12 + B callbackasm1(SB) + MOVD $241, R12 + B callbackasm1(SB) + MOVD $242, R12 + B callbackasm1(SB) + MOVD $243, R12 + B callbackasm1(SB) + MOVD $244, R12 + B callbackasm1(SB) + MOVD $245, R12 + B callbackasm1(SB) + MOVD $246, R12 + B callbackasm1(SB) + MOVD $247, R12 + B callbackasm1(SB) + MOVD $248, R12 + B callbackasm1(SB) + MOVD $249, R12 + B callbackasm1(SB) + MOVD $250, R12 + B callbackasm1(SB) + MOVD $251, R12 + B callbackasm1(SB) + MOVD $252, R12 + B callbackasm1(SB) + MOVD $253, R12 + B callbackasm1(SB) + MOVD $254, R12 + B callbackasm1(SB) + MOVD $255, R12 + B callbackasm1(SB) + MOVD $256, R12 + B callbackasm1(SB) + MOVD $257, R12 + B callbackasm1(SB) + MOVD $258, R12 + B callbackasm1(SB) + MOVD $259, R12 + B callbackasm1(SB) + MOVD $260, R12 + B callbackasm1(SB) + MOVD $261, R12 + B callbackasm1(SB) + MOVD $262, R12 + B callbackasm1(SB) + MOVD $263, R12 + B callbackasm1(SB) + MOVD $264, R12 + B callbackasm1(SB) + MOVD $265, R12 + B callbackasm1(SB) + MOVD $266, R12 + B callbackasm1(SB) + MOVD $267, R12 + B callbackasm1(SB) + MOVD $268, R12 + B callbackasm1(SB) + MOVD $269, R12 + B callbackasm1(SB) + MOVD $270, R12 + B callbackasm1(SB) + MOVD $271, R12 + B callbackasm1(SB) + MOVD $272, R12 + B callbackasm1(SB) + MOVD $273, R12 + B callbackasm1(SB) + MOVD $274, R12 + B callbackasm1(SB) + MOVD $275, R12 + B callbackasm1(SB) + MOVD $276, R12 + B callbackasm1(SB) + MOVD $277, R12 + B callbackasm1(SB) + MOVD $278, R12 + B callbackasm1(SB) + MOVD $279, R12 + B callbackasm1(SB) + MOVD $280, R12 + B callbackasm1(SB) + MOVD $281, R12 + B callbackasm1(SB) + MOVD $282, R12 + B callbackasm1(SB) + MOVD $283, R12 + B callbackasm1(SB) + MOVD $284, R12 + B callbackasm1(SB) + MOVD $285, R12 + B callbackasm1(SB) + MOVD $286, R12 + B callbackasm1(SB) + MOVD $287, R12 + B callbackasm1(SB) + MOVD $288, R12 + B callbackasm1(SB) + MOVD $289, R12 + B callbackasm1(SB) + MOVD $290, R12 + B callbackasm1(SB) + MOVD $291, R12 + B callbackasm1(SB) + MOVD $292, R12 + B callbackasm1(SB) + MOVD $293, R12 + B callbackasm1(SB) + MOVD $294, R12 + B callbackasm1(SB) + MOVD $295, R12 + B callbackasm1(SB) + MOVD $296, R12 + B callbackasm1(SB) + MOVD $297, R12 + B callbackasm1(SB) + MOVD $298, R12 + B callbackasm1(SB) + MOVD $299, R12 + B callbackasm1(SB) + MOVD $300, R12 + B callbackasm1(SB) + MOVD $301, R12 + B callbackasm1(SB) + MOVD $302, R12 + B callbackasm1(SB) + MOVD $303, R12 + B callbackasm1(SB) + MOVD $304, R12 + B callbackasm1(SB) + MOVD $305, R12 + B callbackasm1(SB) + MOVD $306, R12 + B callbackasm1(SB) + MOVD $307, R12 + B callbackasm1(SB) + MOVD $308, R12 + B callbackasm1(SB) + MOVD $309, R12 + B callbackasm1(SB) + MOVD $310, R12 + B callbackasm1(SB) + MOVD $311, R12 + B callbackasm1(SB) + MOVD $312, R12 + B callbackasm1(SB) + MOVD $313, R12 + B callbackasm1(SB) + MOVD $314, R12 + B callbackasm1(SB) + MOVD $315, R12 + B callbackasm1(SB) + MOVD $316, R12 + B callbackasm1(SB) + MOVD $317, R12 + B callbackasm1(SB) + MOVD $318, R12 + B callbackasm1(SB) + MOVD $319, R12 + B callbackasm1(SB) + MOVD $320, R12 + B callbackasm1(SB) + MOVD $321, R12 + B callbackasm1(SB) + MOVD $322, R12 + B callbackasm1(SB) + MOVD $323, R12 + B callbackasm1(SB) + MOVD $324, R12 + B callbackasm1(SB) + MOVD $325, R12 + B callbackasm1(SB) + MOVD $326, R12 + B callbackasm1(SB) + MOVD $327, R12 + B callbackasm1(SB) + MOVD $328, R12 + B callbackasm1(SB) + MOVD $329, R12 + B callbackasm1(SB) + MOVD $330, R12 + B callbackasm1(SB) + MOVD $331, R12 + B callbackasm1(SB) + MOVD $332, R12 + B callbackasm1(SB) + MOVD $333, R12 + B callbackasm1(SB) + MOVD $334, R12 + B callbackasm1(SB) + MOVD $335, R12 + B callbackasm1(SB) + MOVD $336, R12 + B callbackasm1(SB) + MOVD $337, R12 + B callbackasm1(SB) + MOVD $338, R12 + B callbackasm1(SB) + MOVD $339, R12 + B callbackasm1(SB) + MOVD $340, R12 + B callbackasm1(SB) + MOVD $341, R12 + B callbackasm1(SB) + MOVD $342, R12 + B callbackasm1(SB) + MOVD $343, R12 + B callbackasm1(SB) + MOVD $344, R12 + B callbackasm1(SB) + MOVD $345, R12 + B callbackasm1(SB) + MOVD $346, R12 + B callbackasm1(SB) + MOVD $347, R12 + B callbackasm1(SB) + MOVD $348, R12 + B callbackasm1(SB) + MOVD $349, R12 + B callbackasm1(SB) + MOVD $350, R12 + B callbackasm1(SB) + MOVD $351, R12 + B callbackasm1(SB) + MOVD $352, R12 + B callbackasm1(SB) + MOVD $353, R12 + B callbackasm1(SB) + MOVD $354, R12 + B callbackasm1(SB) + MOVD $355, R12 + B callbackasm1(SB) + MOVD $356, R12 + B callbackasm1(SB) + MOVD $357, R12 + B callbackasm1(SB) + MOVD $358, R12 + B callbackasm1(SB) + MOVD $359, R12 + B callbackasm1(SB) + MOVD $360, R12 + B callbackasm1(SB) + MOVD $361, R12 + B callbackasm1(SB) + MOVD $362, R12 + B callbackasm1(SB) + MOVD $363, R12 + B callbackasm1(SB) + MOVD $364, R12 + B callbackasm1(SB) + MOVD $365, R12 + B callbackasm1(SB) + MOVD $366, R12 + B callbackasm1(SB) + MOVD $367, R12 + B callbackasm1(SB) + MOVD $368, R12 + B callbackasm1(SB) + MOVD $369, R12 + B callbackasm1(SB) + MOVD $370, R12 + B callbackasm1(SB) + MOVD $371, R12 + B callbackasm1(SB) + MOVD $372, R12 + B callbackasm1(SB) + MOVD $373, R12 + B callbackasm1(SB) + MOVD $374, R12 + B callbackasm1(SB) + MOVD $375, R12 + B callbackasm1(SB) + MOVD $376, R12 + B callbackasm1(SB) + MOVD $377, R12 + B callbackasm1(SB) + MOVD $378, R12 + B callbackasm1(SB) + MOVD $379, R12 + B callbackasm1(SB) + MOVD $380, R12 + B callbackasm1(SB) + MOVD $381, R12 + B callbackasm1(SB) + MOVD $382, R12 + B callbackasm1(SB) + MOVD $383, R12 + B callbackasm1(SB) + MOVD $384, R12 + B callbackasm1(SB) + MOVD $385, R12 + B callbackasm1(SB) + MOVD $386, R12 + B callbackasm1(SB) + MOVD $387, R12 + B callbackasm1(SB) + MOVD $388, R12 + B callbackasm1(SB) + MOVD $389, R12 + B callbackasm1(SB) + MOVD $390, R12 + B callbackasm1(SB) + MOVD $391, R12 + B callbackasm1(SB) + MOVD $392, R12 + B callbackasm1(SB) + MOVD $393, R12 + B callbackasm1(SB) + MOVD $394, R12 + B callbackasm1(SB) + MOVD $395, R12 + B callbackasm1(SB) + MOVD $396, R12 + B callbackasm1(SB) + MOVD $397, R12 + B callbackasm1(SB) + MOVD $398, R12 + B callbackasm1(SB) + MOVD $399, R12 + B callbackasm1(SB) + MOVD $400, R12 + B callbackasm1(SB) + MOVD $401, R12 + B callbackasm1(SB) + MOVD $402, R12 + B callbackasm1(SB) + MOVD $403, R12 + B callbackasm1(SB) + MOVD $404, R12 + B callbackasm1(SB) + MOVD $405, R12 + B callbackasm1(SB) + MOVD $406, R12 + B callbackasm1(SB) + MOVD $407, R12 + B callbackasm1(SB) + MOVD $408, R12 + B callbackasm1(SB) + MOVD $409, R12 + B callbackasm1(SB) + MOVD $410, R12 + B callbackasm1(SB) + MOVD $411, R12 + B callbackasm1(SB) + MOVD $412, R12 + B callbackasm1(SB) + MOVD $413, R12 + B callbackasm1(SB) + MOVD $414, R12 + B callbackasm1(SB) + MOVD $415, R12 + B callbackasm1(SB) + MOVD $416, R12 + B callbackasm1(SB) + MOVD $417, R12 + B callbackasm1(SB) + MOVD $418, R12 + B callbackasm1(SB) + MOVD $419, R12 + B callbackasm1(SB) + MOVD $420, R12 + B callbackasm1(SB) + MOVD $421, R12 + B callbackasm1(SB) + MOVD $422, R12 + B callbackasm1(SB) + MOVD $423, R12 + B callbackasm1(SB) + MOVD $424, R12 + B callbackasm1(SB) + MOVD $425, R12 + B callbackasm1(SB) + MOVD $426, R12 + B callbackasm1(SB) + MOVD $427, R12 + B callbackasm1(SB) + MOVD $428, R12 + B callbackasm1(SB) + MOVD $429, R12 + B callbackasm1(SB) + MOVD $430, R12 + B callbackasm1(SB) + MOVD $431, R12 + B callbackasm1(SB) + MOVD $432, R12 + B callbackasm1(SB) + MOVD $433, R12 + B callbackasm1(SB) + MOVD $434, R12 + B callbackasm1(SB) + MOVD $435, R12 + B callbackasm1(SB) + MOVD $436, R12 + B callbackasm1(SB) + MOVD $437, R12 + B callbackasm1(SB) + MOVD $438, R12 + B callbackasm1(SB) + MOVD $439, R12 + B callbackasm1(SB) + MOVD $440, R12 + B callbackasm1(SB) + MOVD $441, R12 + B callbackasm1(SB) + MOVD $442, R12 + B callbackasm1(SB) + MOVD $443, R12 + B callbackasm1(SB) + MOVD $444, R12 + B callbackasm1(SB) + MOVD $445, R12 + B callbackasm1(SB) + MOVD $446, R12 + B callbackasm1(SB) + MOVD $447, R12 + B callbackasm1(SB) + MOVD $448, R12 + B callbackasm1(SB) + MOVD $449, R12 + B callbackasm1(SB) + MOVD $450, R12 + B callbackasm1(SB) + MOVD $451, R12 + B callbackasm1(SB) + MOVD $452, R12 + B callbackasm1(SB) + MOVD $453, R12 + B callbackasm1(SB) + MOVD $454, R12 + B callbackasm1(SB) + MOVD $455, R12 + B callbackasm1(SB) + MOVD $456, R12 + B callbackasm1(SB) + MOVD $457, R12 + B callbackasm1(SB) + MOVD $458, R12 + B callbackasm1(SB) + MOVD $459, R12 + B callbackasm1(SB) + MOVD $460, R12 + B callbackasm1(SB) + MOVD $461, R12 + B callbackasm1(SB) + MOVD $462, R12 + B callbackasm1(SB) + MOVD $463, R12 + B callbackasm1(SB) + MOVD $464, R12 + B callbackasm1(SB) + MOVD $465, R12 + B callbackasm1(SB) + MOVD $466, R12 + B callbackasm1(SB) + MOVD $467, R12 + B callbackasm1(SB) + MOVD $468, R12 + B callbackasm1(SB) + MOVD $469, R12 + B callbackasm1(SB) + MOVD $470, R12 + B callbackasm1(SB) + MOVD $471, R12 + B callbackasm1(SB) + MOVD $472, R12 + B callbackasm1(SB) + MOVD $473, R12 + B callbackasm1(SB) + MOVD $474, R12 + B callbackasm1(SB) + MOVD $475, R12 + B callbackasm1(SB) + MOVD $476, R12 + B callbackasm1(SB) + MOVD $477, R12 + B callbackasm1(SB) + MOVD $478, R12 + B callbackasm1(SB) + MOVD $479, R12 + B callbackasm1(SB) + MOVD $480, R12 + B callbackasm1(SB) + MOVD $481, R12 + B callbackasm1(SB) + MOVD $482, R12 + B callbackasm1(SB) + MOVD $483, R12 + B callbackasm1(SB) + MOVD $484, R12 + B callbackasm1(SB) + MOVD $485, R12 + B callbackasm1(SB) + MOVD $486, R12 + B callbackasm1(SB) + MOVD $487, R12 + B callbackasm1(SB) + MOVD $488, R12 + B callbackasm1(SB) + MOVD $489, R12 + B callbackasm1(SB) + MOVD $490, R12 + B callbackasm1(SB) + MOVD $491, R12 + B callbackasm1(SB) + MOVD $492, R12 + B callbackasm1(SB) + MOVD $493, R12 + B callbackasm1(SB) + MOVD $494, R12 + B callbackasm1(SB) + MOVD $495, R12 + B callbackasm1(SB) + MOVD $496, R12 + B callbackasm1(SB) + MOVD $497, R12 + B callbackasm1(SB) + MOVD $498, R12 + B callbackasm1(SB) + MOVD $499, R12 + B callbackasm1(SB) + MOVD $500, R12 + B callbackasm1(SB) + MOVD $501, R12 + B callbackasm1(SB) + MOVD $502, R12 + B callbackasm1(SB) + MOVD $503, R12 + B callbackasm1(SB) + MOVD $504, R12 + B callbackasm1(SB) + MOVD $505, R12 + B callbackasm1(SB) + MOVD $506, R12 + B callbackasm1(SB) + MOVD $507, R12 + B callbackasm1(SB) + MOVD $508, R12 + B callbackasm1(SB) + MOVD $509, R12 + B callbackasm1(SB) + MOVD $510, R12 + B callbackasm1(SB) + MOVD $511, R12 + B callbackasm1(SB) + MOVD $512, R12 + B callbackasm1(SB) + MOVD $513, R12 + B callbackasm1(SB) + MOVD $514, R12 + B callbackasm1(SB) + MOVD $515, R12 + B callbackasm1(SB) + MOVD $516, R12 + B callbackasm1(SB) + MOVD $517, R12 + B callbackasm1(SB) + MOVD $518, R12 + B callbackasm1(SB) + MOVD $519, R12 + B callbackasm1(SB) + MOVD $520, R12 + B callbackasm1(SB) + MOVD $521, R12 + B callbackasm1(SB) + MOVD $522, R12 + B callbackasm1(SB) + MOVD $523, R12 + B callbackasm1(SB) + MOVD $524, R12 + B callbackasm1(SB) + MOVD $525, R12 + B callbackasm1(SB) + MOVD $526, R12 + B callbackasm1(SB) + MOVD $527, R12 + B callbackasm1(SB) + MOVD $528, R12 + B callbackasm1(SB) + MOVD $529, R12 + B callbackasm1(SB) + MOVD $530, R12 + B callbackasm1(SB) + MOVD $531, R12 + B callbackasm1(SB) + MOVD $532, R12 + B callbackasm1(SB) + MOVD $533, R12 + B callbackasm1(SB) + MOVD $534, R12 + B callbackasm1(SB) + MOVD $535, R12 + B callbackasm1(SB) + MOVD $536, R12 + B callbackasm1(SB) + MOVD $537, R12 + B callbackasm1(SB) + MOVD $538, R12 + B callbackasm1(SB) + MOVD $539, R12 + B callbackasm1(SB) + MOVD $540, R12 + B callbackasm1(SB) + MOVD $541, R12 + B callbackasm1(SB) + MOVD $542, R12 + B callbackasm1(SB) + MOVD $543, R12 + B callbackasm1(SB) + MOVD $544, R12 + B callbackasm1(SB) + MOVD $545, R12 + B callbackasm1(SB) + MOVD $546, R12 + B callbackasm1(SB) + MOVD $547, R12 + B callbackasm1(SB) + MOVD $548, R12 + B callbackasm1(SB) + MOVD $549, R12 + B callbackasm1(SB) + MOVD $550, R12 + B callbackasm1(SB) + MOVD $551, R12 + B callbackasm1(SB) + MOVD $552, R12 + B callbackasm1(SB) + MOVD $553, R12 + B callbackasm1(SB) + MOVD $554, R12 + B callbackasm1(SB) + MOVD $555, R12 + B callbackasm1(SB) + MOVD $556, R12 + B callbackasm1(SB) + MOVD $557, R12 + B callbackasm1(SB) + MOVD $558, R12 + B callbackasm1(SB) + MOVD $559, R12 + B callbackasm1(SB) + MOVD $560, R12 + B callbackasm1(SB) + MOVD $561, R12 + B callbackasm1(SB) + MOVD $562, R12 + B callbackasm1(SB) + MOVD $563, R12 + B callbackasm1(SB) + MOVD $564, R12 + B callbackasm1(SB) + MOVD $565, R12 + B callbackasm1(SB) + MOVD $566, R12 + B callbackasm1(SB) + MOVD $567, R12 + B callbackasm1(SB) + MOVD $568, R12 + B callbackasm1(SB) + MOVD $569, R12 + B callbackasm1(SB) + MOVD $570, R12 + B callbackasm1(SB) + MOVD $571, R12 + B callbackasm1(SB) + MOVD $572, R12 + B callbackasm1(SB) + MOVD $573, R12 + B callbackasm1(SB) + MOVD $574, R12 + B callbackasm1(SB) + MOVD $575, R12 + B callbackasm1(SB) + MOVD $576, R12 + B callbackasm1(SB) + MOVD $577, R12 + B callbackasm1(SB) + MOVD $578, R12 + B callbackasm1(SB) + MOVD $579, R12 + B callbackasm1(SB) + MOVD $580, R12 + B callbackasm1(SB) + MOVD $581, R12 + B callbackasm1(SB) + MOVD $582, R12 + B callbackasm1(SB) + MOVD $583, R12 + B callbackasm1(SB) + MOVD $584, R12 + B callbackasm1(SB) + MOVD $585, R12 + B callbackasm1(SB) + MOVD $586, R12 + B callbackasm1(SB) + MOVD $587, R12 + B callbackasm1(SB) + MOVD $588, R12 + B callbackasm1(SB) + MOVD $589, R12 + B callbackasm1(SB) + MOVD $590, R12 + B callbackasm1(SB) + MOVD $591, R12 + B callbackasm1(SB) + MOVD $592, R12 + B callbackasm1(SB) + MOVD $593, R12 + B callbackasm1(SB) + MOVD $594, R12 + B callbackasm1(SB) + MOVD $595, R12 + B callbackasm1(SB) + MOVD $596, R12 + B callbackasm1(SB) + MOVD $597, R12 + B callbackasm1(SB) + MOVD $598, R12 + B callbackasm1(SB) + MOVD $599, R12 + B callbackasm1(SB) + MOVD $600, R12 + B callbackasm1(SB) + MOVD $601, R12 + B callbackasm1(SB) + MOVD $602, R12 + B callbackasm1(SB) + MOVD $603, R12 + B callbackasm1(SB) + MOVD $604, R12 + B callbackasm1(SB) + MOVD $605, R12 + B callbackasm1(SB) + MOVD $606, R12 + B callbackasm1(SB) + MOVD $607, R12 + B callbackasm1(SB) + MOVD $608, R12 + B callbackasm1(SB) + MOVD $609, R12 + B callbackasm1(SB) + MOVD $610, R12 + B callbackasm1(SB) + MOVD $611, R12 + B callbackasm1(SB) + MOVD $612, R12 + B callbackasm1(SB) + MOVD $613, R12 + B callbackasm1(SB) + MOVD $614, R12 + B callbackasm1(SB) + MOVD $615, R12 + B callbackasm1(SB) + MOVD $616, R12 + B callbackasm1(SB) + MOVD $617, R12 + B callbackasm1(SB) + MOVD $618, R12 + B callbackasm1(SB) + MOVD $619, R12 + B callbackasm1(SB) + MOVD $620, R12 + B callbackasm1(SB) + MOVD $621, R12 + B callbackasm1(SB) + MOVD $622, R12 + B callbackasm1(SB) + MOVD $623, R12 + B callbackasm1(SB) + MOVD $624, R12 + B callbackasm1(SB) + MOVD $625, R12 + B callbackasm1(SB) + MOVD $626, R12 + B callbackasm1(SB) + MOVD $627, R12 + B callbackasm1(SB) + MOVD $628, R12 + B callbackasm1(SB) + MOVD $629, R12 + B callbackasm1(SB) + MOVD $630, R12 + B callbackasm1(SB) + MOVD $631, R12 + B callbackasm1(SB) + MOVD $632, R12 + B callbackasm1(SB) + MOVD $633, R12 + B callbackasm1(SB) + MOVD $634, R12 + B callbackasm1(SB) + MOVD $635, R12 + B callbackasm1(SB) + MOVD $636, R12 + B callbackasm1(SB) + MOVD $637, R12 + B callbackasm1(SB) + MOVD $638, R12 + B callbackasm1(SB) + MOVD $639, R12 + B callbackasm1(SB) + MOVD $640, R12 + B callbackasm1(SB) + MOVD $641, R12 + B callbackasm1(SB) + MOVD $642, R12 + B callbackasm1(SB) + MOVD $643, R12 + B callbackasm1(SB) + MOVD $644, R12 + B callbackasm1(SB) + MOVD $645, R12 + B callbackasm1(SB) + MOVD $646, R12 + B callbackasm1(SB) + MOVD $647, R12 + B callbackasm1(SB) + MOVD $648, R12 + B callbackasm1(SB) + MOVD $649, R12 + B callbackasm1(SB) + MOVD $650, R12 + B callbackasm1(SB) + MOVD $651, R12 + B callbackasm1(SB) + MOVD $652, R12 + B callbackasm1(SB) + MOVD $653, R12 + B callbackasm1(SB) + MOVD $654, R12 + B callbackasm1(SB) + MOVD $655, R12 + B callbackasm1(SB) + MOVD $656, R12 + B callbackasm1(SB) + MOVD $657, R12 + B callbackasm1(SB) + MOVD $658, R12 + B callbackasm1(SB) + MOVD $659, R12 + B callbackasm1(SB) + MOVD $660, R12 + B callbackasm1(SB) + MOVD $661, R12 + B callbackasm1(SB) + MOVD $662, R12 + B callbackasm1(SB) + MOVD $663, R12 + B callbackasm1(SB) + MOVD $664, R12 + B callbackasm1(SB) + MOVD $665, R12 + B callbackasm1(SB) + MOVD $666, R12 + B callbackasm1(SB) + MOVD $667, R12 + B callbackasm1(SB) + MOVD $668, R12 + B callbackasm1(SB) + MOVD $669, R12 + B callbackasm1(SB) + MOVD $670, R12 + B callbackasm1(SB) + MOVD $671, R12 + B callbackasm1(SB) + MOVD $672, R12 + B callbackasm1(SB) + MOVD $673, R12 + B callbackasm1(SB) + MOVD $674, R12 + B callbackasm1(SB) + MOVD $675, R12 + B callbackasm1(SB) + MOVD $676, R12 + B callbackasm1(SB) + MOVD $677, R12 + B callbackasm1(SB) + MOVD $678, R12 + B callbackasm1(SB) + MOVD $679, R12 + B callbackasm1(SB) + MOVD $680, R12 + B callbackasm1(SB) + MOVD $681, R12 + B callbackasm1(SB) + MOVD $682, R12 + B callbackasm1(SB) + MOVD $683, R12 + B callbackasm1(SB) + MOVD $684, R12 + B callbackasm1(SB) + MOVD $685, R12 + B callbackasm1(SB) + MOVD $686, R12 + B callbackasm1(SB) + MOVD $687, R12 + B callbackasm1(SB) + MOVD $688, R12 + B callbackasm1(SB) + MOVD $689, R12 + B callbackasm1(SB) + MOVD $690, R12 + B callbackasm1(SB) + MOVD $691, R12 + B callbackasm1(SB) + MOVD $692, R12 + B callbackasm1(SB) + MOVD $693, R12 + B callbackasm1(SB) + MOVD $694, R12 + B callbackasm1(SB) + MOVD $695, R12 + B callbackasm1(SB) + MOVD $696, R12 + B callbackasm1(SB) + MOVD $697, R12 + B callbackasm1(SB) + MOVD $698, R12 + B callbackasm1(SB) + MOVD $699, R12 + B callbackasm1(SB) + MOVD $700, R12 + B callbackasm1(SB) + MOVD $701, R12 + B callbackasm1(SB) + MOVD $702, R12 + B callbackasm1(SB) + MOVD $703, R12 + B callbackasm1(SB) + MOVD $704, R12 + B callbackasm1(SB) + MOVD $705, R12 + B callbackasm1(SB) + MOVD $706, R12 + B callbackasm1(SB) + MOVD $707, R12 + B callbackasm1(SB) + MOVD $708, R12 + B callbackasm1(SB) + MOVD $709, R12 + B callbackasm1(SB) + MOVD $710, R12 + B callbackasm1(SB) + MOVD $711, R12 + B callbackasm1(SB) + MOVD $712, R12 + B callbackasm1(SB) + MOVD $713, R12 + B callbackasm1(SB) + MOVD $714, R12 + B callbackasm1(SB) + MOVD $715, R12 + B callbackasm1(SB) + MOVD $716, R12 + B callbackasm1(SB) + MOVD $717, R12 + B callbackasm1(SB) + MOVD $718, R12 + B callbackasm1(SB) + MOVD $719, R12 + B callbackasm1(SB) + MOVD $720, R12 + B callbackasm1(SB) + MOVD $721, R12 + B callbackasm1(SB) + MOVD $722, R12 + B callbackasm1(SB) + MOVD $723, R12 + B callbackasm1(SB) + MOVD $724, R12 + B callbackasm1(SB) + MOVD $725, R12 + B callbackasm1(SB) + MOVD $726, R12 + B callbackasm1(SB) + MOVD $727, R12 + B callbackasm1(SB) + MOVD $728, R12 + B callbackasm1(SB) + MOVD $729, R12 + B callbackasm1(SB) + MOVD $730, R12 + B callbackasm1(SB) + MOVD $731, R12 + B callbackasm1(SB) + MOVD $732, R12 + B callbackasm1(SB) + MOVD $733, R12 + B callbackasm1(SB) + MOVD $734, R12 + B callbackasm1(SB) + MOVD $735, R12 + B callbackasm1(SB) + MOVD $736, R12 + B callbackasm1(SB) + MOVD $737, R12 + B callbackasm1(SB) + MOVD $738, R12 + B callbackasm1(SB) + MOVD $739, R12 + B callbackasm1(SB) + MOVD $740, R12 + B callbackasm1(SB) + MOVD $741, R12 + B callbackasm1(SB) + MOVD $742, R12 + B callbackasm1(SB) + MOVD $743, R12 + B callbackasm1(SB) + MOVD $744, R12 + B callbackasm1(SB) + MOVD $745, R12 + B callbackasm1(SB) + MOVD $746, R12 + B callbackasm1(SB) + MOVD $747, R12 + B callbackasm1(SB) + MOVD $748, R12 + B callbackasm1(SB) + MOVD $749, R12 + B callbackasm1(SB) + MOVD $750, R12 + B callbackasm1(SB) + MOVD $751, R12 + B callbackasm1(SB) + MOVD $752, R12 + B callbackasm1(SB) + MOVD $753, R12 + B callbackasm1(SB) + MOVD $754, R12 + B callbackasm1(SB) + MOVD $755, R12 + B callbackasm1(SB) + MOVD $756, R12 + B callbackasm1(SB) + MOVD $757, R12 + B callbackasm1(SB) + MOVD $758, R12 + B callbackasm1(SB) + MOVD $759, R12 + B callbackasm1(SB) + MOVD $760, R12 + B callbackasm1(SB) + MOVD $761, R12 + B callbackasm1(SB) + MOVD $762, R12 + B callbackasm1(SB) + MOVD $763, R12 + B callbackasm1(SB) + MOVD $764, R12 + B callbackasm1(SB) + MOVD $765, R12 + B callbackasm1(SB) + MOVD $766, R12 + B callbackasm1(SB) + MOVD $767, R12 + B callbackasm1(SB) + MOVD $768, R12 + B callbackasm1(SB) + MOVD $769, R12 + B callbackasm1(SB) + MOVD $770, R12 + B callbackasm1(SB) + MOVD $771, R12 + B callbackasm1(SB) + MOVD $772, R12 + B callbackasm1(SB) + MOVD $773, R12 + B callbackasm1(SB) + MOVD $774, R12 + B callbackasm1(SB) + MOVD $775, R12 + B callbackasm1(SB) + MOVD $776, R12 + B callbackasm1(SB) + MOVD $777, R12 + B callbackasm1(SB) + MOVD $778, R12 + B callbackasm1(SB) + MOVD $779, R12 + B callbackasm1(SB) + MOVD $780, R12 + B callbackasm1(SB) + MOVD $781, R12 + B callbackasm1(SB) + MOVD $782, R12 + B callbackasm1(SB) + MOVD $783, R12 + B callbackasm1(SB) + MOVD $784, R12 + B callbackasm1(SB) + MOVD $785, R12 + B callbackasm1(SB) + MOVD $786, R12 + B callbackasm1(SB) + MOVD $787, R12 + B callbackasm1(SB) + MOVD $788, R12 + B callbackasm1(SB) + MOVD $789, R12 + B callbackasm1(SB) + MOVD $790, R12 + B callbackasm1(SB) + MOVD $791, R12 + B callbackasm1(SB) + MOVD $792, R12 + B callbackasm1(SB) + MOVD $793, R12 + B callbackasm1(SB) + MOVD $794, R12 + B callbackasm1(SB) + MOVD $795, R12 + B callbackasm1(SB) + MOVD $796, R12 + B callbackasm1(SB) + MOVD $797, R12 + B callbackasm1(SB) + MOVD $798, R12 + B callbackasm1(SB) + MOVD $799, R12 + B callbackasm1(SB) + MOVD $800, R12 + B callbackasm1(SB) + MOVD $801, R12 + B callbackasm1(SB) + MOVD $802, R12 + B callbackasm1(SB) + MOVD $803, R12 + B callbackasm1(SB) + MOVD $804, R12 + B callbackasm1(SB) + MOVD $805, R12 + B callbackasm1(SB) + MOVD $806, R12 + B callbackasm1(SB) + MOVD $807, R12 + B callbackasm1(SB) + MOVD $808, R12 + B callbackasm1(SB) + MOVD $809, R12 + B callbackasm1(SB) + MOVD $810, R12 + B callbackasm1(SB) + MOVD $811, R12 + B callbackasm1(SB) + MOVD $812, R12 + B callbackasm1(SB) + MOVD $813, R12 + B callbackasm1(SB) + MOVD $814, R12 + B callbackasm1(SB) + MOVD $815, R12 + B callbackasm1(SB) + MOVD $816, R12 + B callbackasm1(SB) + MOVD $817, R12 + B callbackasm1(SB) + MOVD $818, R12 + B callbackasm1(SB) + MOVD $819, R12 + B callbackasm1(SB) + MOVD $820, R12 + B callbackasm1(SB) + MOVD $821, R12 + B callbackasm1(SB) + MOVD $822, R12 + B callbackasm1(SB) + MOVD $823, R12 + B callbackasm1(SB) + MOVD $824, R12 + B callbackasm1(SB) + MOVD $825, R12 + B callbackasm1(SB) + MOVD $826, R12 + B callbackasm1(SB) + MOVD $827, R12 + B callbackasm1(SB) + MOVD $828, R12 + B callbackasm1(SB) + MOVD $829, R12 + B callbackasm1(SB) + MOVD $830, R12 + B callbackasm1(SB) + MOVD $831, R12 + B callbackasm1(SB) + MOVD $832, R12 + B callbackasm1(SB) + MOVD $833, R12 + B callbackasm1(SB) + MOVD $834, R12 + B callbackasm1(SB) + MOVD $835, R12 + B callbackasm1(SB) + MOVD $836, R12 + B callbackasm1(SB) + MOVD $837, R12 + B callbackasm1(SB) + MOVD $838, R12 + B callbackasm1(SB) + MOVD $839, R12 + B callbackasm1(SB) + MOVD $840, R12 + B callbackasm1(SB) + MOVD $841, R12 + B callbackasm1(SB) + MOVD $842, R12 + B callbackasm1(SB) + MOVD $843, R12 + B callbackasm1(SB) + MOVD $844, R12 + B callbackasm1(SB) + MOVD $845, R12 + B callbackasm1(SB) + MOVD $846, R12 + B callbackasm1(SB) + MOVD $847, R12 + B callbackasm1(SB) + MOVD $848, R12 + B callbackasm1(SB) + MOVD $849, R12 + B callbackasm1(SB) + MOVD $850, R12 + B callbackasm1(SB) + MOVD $851, R12 + B callbackasm1(SB) + MOVD $852, R12 + B callbackasm1(SB) + MOVD $853, R12 + B callbackasm1(SB) + MOVD $854, R12 + B callbackasm1(SB) + MOVD $855, R12 + B callbackasm1(SB) + MOVD $856, R12 + B callbackasm1(SB) + MOVD $857, R12 + B callbackasm1(SB) + MOVD $858, R12 + B callbackasm1(SB) + MOVD $859, R12 + B callbackasm1(SB) + MOVD $860, R12 + B callbackasm1(SB) + MOVD $861, R12 + B callbackasm1(SB) + MOVD $862, R12 + B callbackasm1(SB) + MOVD $863, R12 + B callbackasm1(SB) + MOVD $864, R12 + B callbackasm1(SB) + MOVD $865, R12 + B callbackasm1(SB) + MOVD $866, R12 + B callbackasm1(SB) + MOVD $867, R12 + B callbackasm1(SB) + MOVD $868, R12 + B callbackasm1(SB) + MOVD $869, R12 + B callbackasm1(SB) + MOVD $870, R12 + B callbackasm1(SB) + MOVD $871, R12 + B callbackasm1(SB) + MOVD $872, R12 + B callbackasm1(SB) + MOVD $873, R12 + B callbackasm1(SB) + MOVD $874, R12 + B callbackasm1(SB) + MOVD $875, R12 + B callbackasm1(SB) + MOVD $876, R12 + B callbackasm1(SB) + MOVD $877, R12 + B callbackasm1(SB) + MOVD $878, R12 + B callbackasm1(SB) + MOVD $879, R12 + B callbackasm1(SB) + MOVD $880, R12 + B callbackasm1(SB) + MOVD $881, R12 + B callbackasm1(SB) + MOVD $882, R12 + B callbackasm1(SB) + MOVD $883, R12 + B callbackasm1(SB) + MOVD $884, R12 + B callbackasm1(SB) + MOVD $885, R12 + B callbackasm1(SB) + MOVD $886, R12 + B callbackasm1(SB) + MOVD $887, R12 + B callbackasm1(SB) + MOVD $888, R12 + B callbackasm1(SB) + MOVD $889, R12 + B callbackasm1(SB) + MOVD $890, R12 + B callbackasm1(SB) + MOVD $891, R12 + B callbackasm1(SB) + MOVD $892, R12 + B callbackasm1(SB) + MOVD $893, R12 + B callbackasm1(SB) + MOVD $894, R12 + B callbackasm1(SB) + MOVD $895, R12 + B callbackasm1(SB) + MOVD $896, R12 + B callbackasm1(SB) + MOVD $897, R12 + B callbackasm1(SB) + MOVD $898, R12 + B callbackasm1(SB) + MOVD $899, R12 + B callbackasm1(SB) + MOVD $900, R12 + B callbackasm1(SB) + MOVD $901, R12 + B callbackasm1(SB) + MOVD $902, R12 + B callbackasm1(SB) + MOVD $903, R12 + B callbackasm1(SB) + MOVD $904, R12 + B callbackasm1(SB) + MOVD $905, R12 + B callbackasm1(SB) + MOVD $906, R12 + B callbackasm1(SB) + MOVD $907, R12 + B callbackasm1(SB) + MOVD $908, R12 + B callbackasm1(SB) + MOVD $909, R12 + B callbackasm1(SB) + MOVD $910, R12 + B callbackasm1(SB) + MOVD $911, R12 + B callbackasm1(SB) + MOVD $912, R12 + B callbackasm1(SB) + MOVD $913, R12 + B callbackasm1(SB) + MOVD $914, R12 + B callbackasm1(SB) + MOVD $915, R12 + B callbackasm1(SB) + MOVD $916, R12 + B callbackasm1(SB) + MOVD $917, R12 + B callbackasm1(SB) + MOVD $918, R12 + B callbackasm1(SB) + MOVD $919, R12 + B callbackasm1(SB) + MOVD $920, R12 + B callbackasm1(SB) + MOVD $921, R12 + B callbackasm1(SB) + MOVD $922, R12 + B callbackasm1(SB) + MOVD $923, R12 + B callbackasm1(SB) + MOVD $924, R12 + B callbackasm1(SB) + MOVD $925, R12 + B callbackasm1(SB) + MOVD $926, R12 + B callbackasm1(SB) + MOVD $927, R12 + B callbackasm1(SB) + MOVD $928, R12 + B callbackasm1(SB) + MOVD $929, R12 + B callbackasm1(SB) + MOVD $930, R12 + B callbackasm1(SB) + MOVD $931, R12 + B callbackasm1(SB) + MOVD $932, R12 + B callbackasm1(SB) + MOVD $933, R12 + B callbackasm1(SB) + MOVD $934, R12 + B callbackasm1(SB) + MOVD $935, R12 + B callbackasm1(SB) + MOVD $936, R12 + B callbackasm1(SB) + MOVD $937, R12 + B callbackasm1(SB) + MOVD $938, R12 + B callbackasm1(SB) + MOVD $939, R12 + B callbackasm1(SB) + MOVD $940, R12 + B callbackasm1(SB) + MOVD $941, R12 + B callbackasm1(SB) + MOVD $942, R12 + B callbackasm1(SB) + MOVD $943, R12 + B callbackasm1(SB) + MOVD $944, R12 + B callbackasm1(SB) + MOVD $945, R12 + B callbackasm1(SB) + MOVD $946, R12 + B callbackasm1(SB) + MOVD $947, R12 + B callbackasm1(SB) + MOVD $948, R12 + B callbackasm1(SB) + MOVD $949, R12 + B callbackasm1(SB) + MOVD $950, R12 + B callbackasm1(SB) + MOVD $951, R12 + B callbackasm1(SB) + MOVD $952, R12 + B callbackasm1(SB) + MOVD $953, R12 + B callbackasm1(SB) + MOVD $954, R12 + B callbackasm1(SB) + MOVD $955, R12 + B callbackasm1(SB) + MOVD $956, R12 + B callbackasm1(SB) + MOVD $957, R12 + B callbackasm1(SB) + MOVD $958, R12 + B callbackasm1(SB) + MOVD $959, R12 + B callbackasm1(SB) + MOVD $960, R12 + B callbackasm1(SB) + MOVD $961, R12 + B callbackasm1(SB) + MOVD $962, R12 + B callbackasm1(SB) + MOVD $963, R12 + B callbackasm1(SB) + MOVD $964, R12 + B callbackasm1(SB) + MOVD $965, R12 + B callbackasm1(SB) + MOVD $966, R12 + B callbackasm1(SB) + MOVD $967, R12 + B callbackasm1(SB) + MOVD $968, R12 + B callbackasm1(SB) + MOVD $969, R12 + B callbackasm1(SB) + MOVD $970, R12 + B callbackasm1(SB) + MOVD $971, R12 + B callbackasm1(SB) + MOVD $972, R12 + B callbackasm1(SB) + MOVD $973, R12 + B callbackasm1(SB) + MOVD $974, R12 + B callbackasm1(SB) + MOVD $975, R12 + B callbackasm1(SB) + MOVD $976, R12 + B callbackasm1(SB) + MOVD $977, R12 + B callbackasm1(SB) + MOVD $978, R12 + B callbackasm1(SB) + MOVD $979, R12 + B callbackasm1(SB) + MOVD $980, R12 + B callbackasm1(SB) + MOVD $981, R12 + B callbackasm1(SB) + MOVD $982, R12 + B callbackasm1(SB) + MOVD $983, R12 + B callbackasm1(SB) + MOVD $984, R12 + B callbackasm1(SB) + MOVD $985, R12 + B callbackasm1(SB) + MOVD $986, R12 + B callbackasm1(SB) + MOVD $987, R12 + B callbackasm1(SB) + MOVD $988, R12 + B callbackasm1(SB) + MOVD $989, R12 + B callbackasm1(SB) + MOVD $990, R12 + B callbackasm1(SB) + MOVD $991, R12 + B callbackasm1(SB) + MOVD $992, R12 + B callbackasm1(SB) + MOVD $993, R12 + B callbackasm1(SB) + MOVD $994, R12 + B callbackasm1(SB) + MOVD $995, R12 + B callbackasm1(SB) + MOVD $996, R12 + B callbackasm1(SB) + MOVD $997, R12 + B callbackasm1(SB) + MOVD $998, R12 + B callbackasm1(SB) + MOVD $999, R12 + B callbackasm1(SB) + MOVD $1000, R12 + B callbackasm1(SB) + MOVD $1001, R12 + B callbackasm1(SB) + MOVD $1002, R12 + B callbackasm1(SB) + MOVD $1003, R12 + B callbackasm1(SB) + MOVD $1004, R12 + B callbackasm1(SB) + MOVD $1005, R12 + B callbackasm1(SB) + MOVD $1006, R12 + B callbackasm1(SB) + MOVD $1007, R12 + B callbackasm1(SB) + MOVD $1008, R12 + B callbackasm1(SB) + MOVD $1009, R12 + B callbackasm1(SB) + MOVD $1010, R12 + B callbackasm1(SB) + MOVD $1011, R12 + B callbackasm1(SB) + MOVD $1012, R12 + B callbackasm1(SB) + MOVD $1013, R12 + B callbackasm1(SB) + MOVD $1014, R12 + B callbackasm1(SB) + MOVD $1015, R12 + B callbackasm1(SB) + MOVD $1016, R12 + B callbackasm1(SB) + MOVD $1017, R12 + B callbackasm1(SB) + MOVD $1018, R12 + B callbackasm1(SB) + MOVD $1019, R12 + B callbackasm1(SB) + MOVD $1020, R12 + B callbackasm1(SB) + MOVD $1021, R12 + B callbackasm1(SB) + MOVD $1022, R12 + B callbackasm1(SB) + MOVD $1023, R12 + B callbackasm1(SB) + MOVD $1024, R12 + B callbackasm1(SB) + MOVD $1025, R12 + B callbackasm1(SB) + MOVD $1026, R12 + B callbackasm1(SB) + MOVD $1027, R12 + B callbackasm1(SB) + MOVD $1028, R12 + B callbackasm1(SB) + MOVD $1029, R12 + B callbackasm1(SB) + MOVD $1030, R12 + B callbackasm1(SB) + MOVD $1031, R12 + B callbackasm1(SB) + MOVD $1032, R12 + B callbackasm1(SB) + MOVD $1033, R12 + B callbackasm1(SB) + MOVD $1034, R12 + B callbackasm1(SB) + MOVD $1035, R12 + B callbackasm1(SB) + MOVD $1036, R12 + B callbackasm1(SB) + MOVD $1037, R12 + B callbackasm1(SB) + MOVD $1038, R12 + B callbackasm1(SB) + MOVD $1039, R12 + B callbackasm1(SB) + MOVD $1040, R12 + B callbackasm1(SB) + MOVD $1041, R12 + B callbackasm1(SB) + MOVD $1042, R12 + B callbackasm1(SB) + MOVD $1043, R12 + B callbackasm1(SB) + MOVD $1044, R12 + B callbackasm1(SB) + MOVD $1045, R12 + B callbackasm1(SB) + MOVD $1046, R12 + B callbackasm1(SB) + MOVD $1047, R12 + B callbackasm1(SB) + MOVD $1048, R12 + B callbackasm1(SB) + MOVD $1049, R12 + B callbackasm1(SB) + MOVD $1050, R12 + B callbackasm1(SB) + MOVD $1051, R12 + B callbackasm1(SB) + MOVD $1052, R12 + B callbackasm1(SB) + MOVD $1053, R12 + B callbackasm1(SB) + MOVD $1054, R12 + B callbackasm1(SB) + MOVD $1055, R12 + B callbackasm1(SB) + MOVD $1056, R12 + B callbackasm1(SB) + MOVD $1057, R12 + B callbackasm1(SB) + MOVD $1058, R12 + B callbackasm1(SB) + MOVD $1059, R12 + B callbackasm1(SB) + MOVD $1060, R12 + B callbackasm1(SB) + MOVD $1061, R12 + B callbackasm1(SB) + MOVD $1062, R12 + B callbackasm1(SB) + MOVD $1063, R12 + B callbackasm1(SB) + MOVD $1064, R12 + B callbackasm1(SB) + MOVD $1065, R12 + B callbackasm1(SB) + MOVD $1066, R12 + B callbackasm1(SB) + MOVD $1067, R12 + B callbackasm1(SB) + MOVD $1068, R12 + B callbackasm1(SB) + MOVD $1069, R12 + B callbackasm1(SB) + MOVD $1070, R12 + B callbackasm1(SB) + MOVD $1071, R12 + B callbackasm1(SB) + MOVD $1072, R12 + B callbackasm1(SB) + MOVD $1073, R12 + B callbackasm1(SB) + MOVD $1074, R12 + B callbackasm1(SB) + MOVD $1075, R12 + B callbackasm1(SB) + MOVD $1076, R12 + B callbackasm1(SB) + MOVD $1077, R12 + B callbackasm1(SB) + MOVD $1078, R12 + B callbackasm1(SB) + MOVD $1079, R12 + B callbackasm1(SB) + MOVD $1080, R12 + B callbackasm1(SB) + MOVD $1081, R12 + B callbackasm1(SB) + MOVD $1082, R12 + B callbackasm1(SB) + MOVD $1083, R12 + B callbackasm1(SB) + MOVD $1084, R12 + B callbackasm1(SB) + MOVD $1085, R12 + B callbackasm1(SB) + MOVD $1086, R12 + B callbackasm1(SB) + MOVD $1087, R12 + B callbackasm1(SB) + MOVD $1088, R12 + B callbackasm1(SB) + MOVD $1089, R12 + B callbackasm1(SB) + MOVD $1090, R12 + B callbackasm1(SB) + MOVD $1091, R12 + B callbackasm1(SB) + MOVD $1092, R12 + B callbackasm1(SB) + MOVD $1093, R12 + B callbackasm1(SB) + MOVD $1094, R12 + B callbackasm1(SB) + MOVD $1095, R12 + B callbackasm1(SB) + MOVD $1096, R12 + B callbackasm1(SB) + MOVD $1097, R12 + B callbackasm1(SB) + MOVD $1098, R12 + B callbackasm1(SB) + MOVD $1099, R12 + B callbackasm1(SB) + MOVD $1100, R12 + B callbackasm1(SB) + MOVD $1101, R12 + B callbackasm1(SB) + MOVD $1102, R12 + B callbackasm1(SB) + MOVD $1103, R12 + B callbackasm1(SB) + MOVD $1104, R12 + B callbackasm1(SB) + MOVD $1105, R12 + B callbackasm1(SB) + MOVD $1106, R12 + B callbackasm1(SB) + MOVD $1107, R12 + B callbackasm1(SB) + MOVD $1108, R12 + B callbackasm1(SB) + MOVD $1109, R12 + B callbackasm1(SB) + MOVD $1110, R12 + B callbackasm1(SB) + MOVD $1111, R12 + B callbackasm1(SB) + MOVD $1112, R12 + B callbackasm1(SB) + MOVD $1113, R12 + B callbackasm1(SB) + MOVD $1114, R12 + B callbackasm1(SB) + MOVD $1115, R12 + B callbackasm1(SB) + MOVD $1116, R12 + B callbackasm1(SB) + MOVD $1117, R12 + B callbackasm1(SB) + MOVD $1118, R12 + B callbackasm1(SB) + MOVD $1119, R12 + B callbackasm1(SB) + MOVD $1120, R12 + B callbackasm1(SB) + MOVD $1121, R12 + B callbackasm1(SB) + MOVD $1122, R12 + B callbackasm1(SB) + MOVD $1123, R12 + B callbackasm1(SB) + MOVD $1124, R12 + B callbackasm1(SB) + MOVD $1125, R12 + B callbackasm1(SB) + MOVD $1126, R12 + B callbackasm1(SB) + MOVD $1127, R12 + B callbackasm1(SB) + MOVD $1128, R12 + B callbackasm1(SB) + MOVD $1129, R12 + B callbackasm1(SB) + MOVD $1130, R12 + B callbackasm1(SB) + MOVD $1131, R12 + B callbackasm1(SB) + MOVD $1132, R12 + B callbackasm1(SB) + MOVD $1133, R12 + B callbackasm1(SB) + MOVD $1134, R12 + B callbackasm1(SB) + MOVD $1135, R12 + B callbackasm1(SB) + MOVD $1136, R12 + B callbackasm1(SB) + MOVD $1137, R12 + B callbackasm1(SB) + MOVD $1138, R12 + B callbackasm1(SB) + MOVD $1139, R12 + B callbackasm1(SB) + MOVD $1140, R12 + B callbackasm1(SB) + MOVD $1141, R12 + B callbackasm1(SB) + MOVD $1142, R12 + B callbackasm1(SB) + MOVD $1143, R12 + B callbackasm1(SB) + MOVD $1144, R12 + B callbackasm1(SB) + MOVD $1145, R12 + B callbackasm1(SB) + MOVD $1146, R12 + B callbackasm1(SB) + MOVD $1147, R12 + B callbackasm1(SB) + MOVD $1148, R12 + B callbackasm1(SB) + MOVD $1149, R12 + B callbackasm1(SB) + MOVD $1150, R12 + B callbackasm1(SB) + MOVD $1151, R12 + B callbackasm1(SB) + MOVD $1152, R12 + B callbackasm1(SB) + MOVD $1153, R12 + B callbackasm1(SB) + MOVD $1154, R12 + B callbackasm1(SB) + MOVD $1155, R12 + B callbackasm1(SB) + MOVD $1156, R12 + B callbackasm1(SB) + MOVD $1157, R12 + B callbackasm1(SB) + MOVD $1158, R12 + B callbackasm1(SB) + MOVD $1159, R12 + B callbackasm1(SB) + MOVD $1160, R12 + B callbackasm1(SB) + MOVD $1161, R12 + B callbackasm1(SB) + MOVD $1162, R12 + B callbackasm1(SB) + MOVD $1163, R12 + B callbackasm1(SB) + MOVD $1164, R12 + B callbackasm1(SB) + MOVD $1165, R12 + B callbackasm1(SB) + MOVD $1166, R12 + B callbackasm1(SB) + MOVD $1167, R12 + B callbackasm1(SB) + MOVD $1168, R12 + B callbackasm1(SB) + MOVD $1169, R12 + B callbackasm1(SB) + MOVD $1170, R12 + B callbackasm1(SB) + MOVD $1171, R12 + B callbackasm1(SB) + MOVD $1172, R12 + B callbackasm1(SB) + MOVD $1173, R12 + B callbackasm1(SB) + MOVD $1174, R12 + B callbackasm1(SB) + MOVD $1175, R12 + B callbackasm1(SB) + MOVD $1176, R12 + B callbackasm1(SB) + MOVD $1177, R12 + B callbackasm1(SB) + MOVD $1178, R12 + B callbackasm1(SB) + MOVD $1179, R12 + B callbackasm1(SB) + MOVD $1180, R12 + B callbackasm1(SB) + MOVD $1181, R12 + B callbackasm1(SB) + MOVD $1182, R12 + B callbackasm1(SB) + MOVD $1183, R12 + B callbackasm1(SB) + MOVD $1184, R12 + B callbackasm1(SB) + MOVD $1185, R12 + B callbackasm1(SB) + MOVD $1186, R12 + B callbackasm1(SB) + MOVD $1187, R12 + B callbackasm1(SB) + MOVD $1188, R12 + B callbackasm1(SB) + MOVD $1189, R12 + B callbackasm1(SB) + MOVD $1190, R12 + B callbackasm1(SB) + MOVD $1191, R12 + B callbackasm1(SB) + MOVD $1192, R12 + B callbackasm1(SB) + MOVD $1193, R12 + B callbackasm1(SB) + MOVD $1194, R12 + B callbackasm1(SB) + MOVD $1195, R12 + B callbackasm1(SB) + MOVD $1196, R12 + B callbackasm1(SB) + MOVD $1197, R12 + B callbackasm1(SB) + MOVD $1198, R12 + B callbackasm1(SB) + MOVD $1199, R12 + B callbackasm1(SB) + MOVD $1200, R12 + B callbackasm1(SB) + MOVD $1201, R12 + B callbackasm1(SB) + MOVD $1202, R12 + B callbackasm1(SB) + MOVD $1203, R12 + B callbackasm1(SB) + MOVD $1204, R12 + B callbackasm1(SB) + MOVD $1205, R12 + B callbackasm1(SB) + MOVD $1206, R12 + B callbackasm1(SB) + MOVD $1207, R12 + B callbackasm1(SB) + MOVD $1208, R12 + B callbackasm1(SB) + MOVD $1209, R12 + B callbackasm1(SB) + MOVD $1210, R12 + B callbackasm1(SB) + MOVD $1211, R12 + B callbackasm1(SB) + MOVD $1212, R12 + B callbackasm1(SB) + MOVD $1213, R12 + B callbackasm1(SB) + MOVD $1214, R12 + B callbackasm1(SB) + MOVD $1215, R12 + B callbackasm1(SB) + MOVD $1216, R12 + B callbackasm1(SB) + MOVD $1217, R12 + B callbackasm1(SB) + MOVD $1218, R12 + B callbackasm1(SB) + MOVD $1219, R12 + B callbackasm1(SB) + MOVD $1220, R12 + B callbackasm1(SB) + MOVD $1221, R12 + B callbackasm1(SB) + MOVD $1222, R12 + B callbackasm1(SB) + MOVD $1223, R12 + B callbackasm1(SB) + MOVD $1224, R12 + B callbackasm1(SB) + MOVD $1225, R12 + B callbackasm1(SB) + MOVD $1226, R12 + B callbackasm1(SB) + MOVD $1227, R12 + B callbackasm1(SB) + MOVD $1228, R12 + B callbackasm1(SB) + MOVD $1229, R12 + B callbackasm1(SB) + MOVD $1230, R12 + B callbackasm1(SB) + MOVD $1231, R12 + B callbackasm1(SB) + MOVD $1232, R12 + B callbackasm1(SB) + MOVD $1233, R12 + B callbackasm1(SB) + MOVD $1234, R12 + B callbackasm1(SB) + MOVD $1235, R12 + B callbackasm1(SB) + MOVD $1236, R12 + B callbackasm1(SB) + MOVD $1237, R12 + B callbackasm1(SB) + MOVD $1238, R12 + B callbackasm1(SB) + MOVD $1239, R12 + B callbackasm1(SB) + MOVD $1240, R12 + B callbackasm1(SB) + MOVD $1241, R12 + B callbackasm1(SB) + MOVD $1242, R12 + B callbackasm1(SB) + MOVD $1243, R12 + B callbackasm1(SB) + MOVD $1244, R12 + B callbackasm1(SB) + MOVD $1245, R12 + B callbackasm1(SB) + MOVD $1246, R12 + B callbackasm1(SB) + MOVD $1247, R12 + B callbackasm1(SB) + MOVD $1248, R12 + B callbackasm1(SB) + MOVD $1249, R12 + B callbackasm1(SB) + MOVD $1250, R12 + B callbackasm1(SB) + MOVD $1251, R12 + B callbackasm1(SB) + MOVD $1252, R12 + B callbackasm1(SB) + MOVD $1253, R12 + B callbackasm1(SB) + MOVD $1254, R12 + B callbackasm1(SB) + MOVD $1255, R12 + B callbackasm1(SB) + MOVD $1256, R12 + B callbackasm1(SB) + MOVD $1257, R12 + B callbackasm1(SB) + MOVD $1258, R12 + B callbackasm1(SB) + MOVD $1259, R12 + B callbackasm1(SB) + MOVD $1260, R12 + B callbackasm1(SB) + MOVD $1261, R12 + B callbackasm1(SB) + MOVD $1262, R12 + B callbackasm1(SB) + MOVD $1263, R12 + B callbackasm1(SB) + MOVD $1264, R12 + B callbackasm1(SB) + MOVD $1265, R12 + B callbackasm1(SB) + MOVD $1266, R12 + B callbackasm1(SB) + MOVD $1267, R12 + B callbackasm1(SB) + MOVD $1268, R12 + B callbackasm1(SB) + MOVD $1269, R12 + B callbackasm1(SB) + MOVD $1270, R12 + B callbackasm1(SB) + MOVD $1271, R12 + B callbackasm1(SB) + MOVD $1272, R12 + B callbackasm1(SB) + MOVD $1273, R12 + B callbackasm1(SB) + MOVD $1274, R12 + B callbackasm1(SB) + MOVD $1275, R12 + B callbackasm1(SB) + MOVD $1276, R12 + B callbackasm1(SB) + MOVD $1277, R12 + B callbackasm1(SB) + MOVD $1278, R12 + B callbackasm1(SB) + MOVD $1279, R12 + B callbackasm1(SB) + MOVD $1280, R12 + B callbackasm1(SB) + MOVD $1281, R12 + B callbackasm1(SB) + MOVD $1282, R12 + B callbackasm1(SB) + MOVD $1283, R12 + B callbackasm1(SB) + MOVD $1284, R12 + B callbackasm1(SB) + MOVD $1285, R12 + B callbackasm1(SB) + MOVD $1286, R12 + B callbackasm1(SB) + MOVD $1287, R12 + B callbackasm1(SB) + MOVD $1288, R12 + B callbackasm1(SB) + MOVD $1289, R12 + B callbackasm1(SB) + MOVD $1290, R12 + B callbackasm1(SB) + MOVD $1291, R12 + B callbackasm1(SB) + MOVD $1292, R12 + B callbackasm1(SB) + MOVD $1293, R12 + B callbackasm1(SB) + MOVD $1294, R12 + B callbackasm1(SB) + MOVD $1295, R12 + B callbackasm1(SB) + MOVD $1296, R12 + B callbackasm1(SB) + MOVD $1297, R12 + B callbackasm1(SB) + MOVD $1298, R12 + B callbackasm1(SB) + MOVD $1299, R12 + B callbackasm1(SB) + MOVD $1300, R12 + B callbackasm1(SB) + MOVD $1301, R12 + B callbackasm1(SB) + MOVD $1302, R12 + B callbackasm1(SB) + MOVD $1303, R12 + B callbackasm1(SB) + MOVD $1304, R12 + B callbackasm1(SB) + MOVD $1305, R12 + B callbackasm1(SB) + MOVD $1306, R12 + B callbackasm1(SB) + MOVD $1307, R12 + B callbackasm1(SB) + MOVD $1308, R12 + B callbackasm1(SB) + MOVD $1309, R12 + B callbackasm1(SB) + MOVD $1310, R12 + B callbackasm1(SB) + MOVD $1311, R12 + B callbackasm1(SB) + MOVD $1312, R12 + B callbackasm1(SB) + MOVD $1313, R12 + B callbackasm1(SB) + MOVD $1314, R12 + B callbackasm1(SB) + MOVD $1315, R12 + B callbackasm1(SB) + MOVD $1316, R12 + B callbackasm1(SB) + MOVD $1317, R12 + B callbackasm1(SB) + MOVD $1318, R12 + B callbackasm1(SB) + MOVD $1319, R12 + B callbackasm1(SB) + MOVD $1320, R12 + B callbackasm1(SB) + MOVD $1321, R12 + B callbackasm1(SB) + MOVD $1322, R12 + B callbackasm1(SB) + MOVD $1323, R12 + B callbackasm1(SB) + MOVD $1324, R12 + B callbackasm1(SB) + MOVD $1325, R12 + B callbackasm1(SB) + MOVD $1326, R12 + B callbackasm1(SB) + MOVD $1327, R12 + B callbackasm1(SB) + MOVD $1328, R12 + B callbackasm1(SB) + MOVD $1329, R12 + B callbackasm1(SB) + MOVD $1330, R12 + B callbackasm1(SB) + MOVD $1331, R12 + B callbackasm1(SB) + MOVD $1332, R12 + B callbackasm1(SB) + MOVD $1333, R12 + B callbackasm1(SB) + MOVD $1334, R12 + B callbackasm1(SB) + MOVD $1335, R12 + B callbackasm1(SB) + MOVD $1336, R12 + B callbackasm1(SB) + MOVD $1337, R12 + B callbackasm1(SB) + MOVD $1338, R12 + B callbackasm1(SB) + MOVD $1339, R12 + B callbackasm1(SB) + MOVD $1340, R12 + B callbackasm1(SB) + MOVD $1341, R12 + B callbackasm1(SB) + MOVD $1342, R12 + B callbackasm1(SB) + MOVD $1343, R12 + B callbackasm1(SB) + MOVD $1344, R12 + B callbackasm1(SB) + MOVD $1345, R12 + B callbackasm1(SB) + MOVD $1346, R12 + B callbackasm1(SB) + MOVD $1347, R12 + B callbackasm1(SB) + MOVD $1348, R12 + B callbackasm1(SB) + MOVD $1349, R12 + B callbackasm1(SB) + MOVD $1350, R12 + B callbackasm1(SB) + MOVD $1351, R12 + B callbackasm1(SB) + MOVD $1352, R12 + B callbackasm1(SB) + MOVD $1353, R12 + B callbackasm1(SB) + MOVD $1354, R12 + B callbackasm1(SB) + MOVD $1355, R12 + B callbackasm1(SB) + MOVD $1356, R12 + B callbackasm1(SB) + MOVD $1357, R12 + B callbackasm1(SB) + MOVD $1358, R12 + B callbackasm1(SB) + MOVD $1359, R12 + B callbackasm1(SB) + MOVD $1360, R12 + B callbackasm1(SB) + MOVD $1361, R12 + B callbackasm1(SB) + MOVD $1362, R12 + B callbackasm1(SB) + MOVD $1363, R12 + B callbackasm1(SB) + MOVD $1364, R12 + B callbackasm1(SB) + MOVD $1365, R12 + B callbackasm1(SB) + MOVD $1366, R12 + B callbackasm1(SB) + MOVD $1367, R12 + B callbackasm1(SB) + MOVD $1368, R12 + B callbackasm1(SB) + MOVD $1369, R12 + B callbackasm1(SB) + MOVD $1370, R12 + B callbackasm1(SB) + MOVD $1371, R12 + B callbackasm1(SB) + MOVD $1372, R12 + B callbackasm1(SB) + MOVD $1373, R12 + B callbackasm1(SB) + MOVD $1374, R12 + B callbackasm1(SB) + MOVD $1375, R12 + B callbackasm1(SB) + MOVD $1376, R12 + B callbackasm1(SB) + MOVD $1377, R12 + B callbackasm1(SB) + MOVD $1378, R12 + B callbackasm1(SB) + MOVD $1379, R12 + B callbackasm1(SB) + MOVD $1380, R12 + B callbackasm1(SB) + MOVD $1381, R12 + B callbackasm1(SB) + MOVD $1382, R12 + B callbackasm1(SB) + MOVD $1383, R12 + B callbackasm1(SB) + MOVD $1384, R12 + B callbackasm1(SB) + MOVD $1385, R12 + B callbackasm1(SB) + MOVD $1386, R12 + B callbackasm1(SB) + MOVD $1387, R12 + B callbackasm1(SB) + MOVD $1388, R12 + B callbackasm1(SB) + MOVD $1389, R12 + B callbackasm1(SB) + MOVD $1390, R12 + B callbackasm1(SB) + MOVD $1391, R12 + B callbackasm1(SB) + MOVD $1392, R12 + B callbackasm1(SB) + MOVD $1393, R12 + B callbackasm1(SB) + MOVD $1394, R12 + B callbackasm1(SB) + MOVD $1395, R12 + B callbackasm1(SB) + MOVD $1396, R12 + B callbackasm1(SB) + MOVD $1397, R12 + B callbackasm1(SB) + MOVD $1398, R12 + B callbackasm1(SB) + MOVD $1399, R12 + B callbackasm1(SB) + MOVD $1400, R12 + B callbackasm1(SB) + MOVD $1401, R12 + B callbackasm1(SB) + MOVD $1402, R12 + B callbackasm1(SB) + MOVD $1403, R12 + B callbackasm1(SB) + MOVD $1404, R12 + B callbackasm1(SB) + MOVD $1405, R12 + B callbackasm1(SB) + MOVD $1406, R12 + B callbackasm1(SB) + MOVD $1407, R12 + B callbackasm1(SB) + MOVD $1408, R12 + B callbackasm1(SB) + MOVD $1409, R12 + B callbackasm1(SB) + MOVD $1410, R12 + B callbackasm1(SB) + MOVD $1411, R12 + B callbackasm1(SB) + MOVD $1412, R12 + B callbackasm1(SB) + MOVD $1413, R12 + B callbackasm1(SB) + MOVD $1414, R12 + B callbackasm1(SB) + MOVD $1415, R12 + B callbackasm1(SB) + MOVD $1416, R12 + B callbackasm1(SB) + MOVD $1417, R12 + B callbackasm1(SB) + MOVD $1418, R12 + B callbackasm1(SB) + MOVD $1419, R12 + B callbackasm1(SB) + MOVD $1420, R12 + B callbackasm1(SB) + MOVD $1421, R12 + B callbackasm1(SB) + MOVD $1422, R12 + B callbackasm1(SB) + MOVD $1423, R12 + B callbackasm1(SB) + MOVD $1424, R12 + B callbackasm1(SB) + MOVD $1425, R12 + B callbackasm1(SB) + MOVD $1426, R12 + B callbackasm1(SB) + MOVD $1427, R12 + B callbackasm1(SB) + MOVD $1428, R12 + B callbackasm1(SB) + MOVD $1429, R12 + B callbackasm1(SB) + MOVD $1430, R12 + B callbackasm1(SB) + MOVD $1431, R12 + B callbackasm1(SB) + MOVD $1432, R12 + B callbackasm1(SB) + MOVD $1433, R12 + B callbackasm1(SB) + MOVD $1434, R12 + B callbackasm1(SB) + MOVD $1435, R12 + B callbackasm1(SB) + MOVD $1436, R12 + B callbackasm1(SB) + MOVD $1437, R12 + B callbackasm1(SB) + MOVD $1438, R12 + B callbackasm1(SB) + MOVD $1439, R12 + B callbackasm1(SB) + MOVD $1440, R12 + B callbackasm1(SB) + MOVD $1441, R12 + B callbackasm1(SB) + MOVD $1442, R12 + B callbackasm1(SB) + MOVD $1443, R12 + B callbackasm1(SB) + MOVD $1444, R12 + B callbackasm1(SB) + MOVD $1445, R12 + B callbackasm1(SB) + MOVD $1446, R12 + B callbackasm1(SB) + MOVD $1447, R12 + B callbackasm1(SB) + MOVD $1448, R12 + B callbackasm1(SB) + MOVD $1449, R12 + B callbackasm1(SB) + MOVD $1450, R12 + B callbackasm1(SB) + MOVD $1451, R12 + B callbackasm1(SB) + MOVD $1452, R12 + B callbackasm1(SB) + MOVD $1453, R12 + B callbackasm1(SB) + MOVD $1454, R12 + B callbackasm1(SB) + MOVD $1455, R12 + B callbackasm1(SB) + MOVD $1456, R12 + B callbackasm1(SB) + MOVD $1457, R12 + B callbackasm1(SB) + MOVD $1458, R12 + B callbackasm1(SB) + MOVD $1459, R12 + B callbackasm1(SB) + MOVD $1460, R12 + B callbackasm1(SB) + MOVD $1461, R12 + B callbackasm1(SB) + MOVD $1462, R12 + B callbackasm1(SB) + MOVD $1463, R12 + B callbackasm1(SB) + MOVD $1464, R12 + B callbackasm1(SB) + MOVD $1465, R12 + B callbackasm1(SB) + MOVD $1466, R12 + B callbackasm1(SB) + MOVD $1467, R12 + B callbackasm1(SB) + MOVD $1468, R12 + B callbackasm1(SB) + MOVD $1469, R12 + B callbackasm1(SB) + MOVD $1470, R12 + B callbackasm1(SB) + MOVD $1471, R12 + B callbackasm1(SB) + MOVD $1472, R12 + B callbackasm1(SB) + MOVD $1473, R12 + B callbackasm1(SB) + MOVD $1474, R12 + B callbackasm1(SB) + MOVD $1475, R12 + B callbackasm1(SB) + MOVD $1476, R12 + B callbackasm1(SB) + MOVD $1477, R12 + B callbackasm1(SB) + MOVD $1478, R12 + B callbackasm1(SB) + MOVD $1479, R12 + B callbackasm1(SB) + MOVD $1480, R12 + B callbackasm1(SB) + MOVD $1481, R12 + B callbackasm1(SB) + MOVD $1482, R12 + B callbackasm1(SB) + MOVD $1483, R12 + B callbackasm1(SB) + MOVD $1484, R12 + B callbackasm1(SB) + MOVD $1485, R12 + B callbackasm1(SB) + MOVD $1486, R12 + B callbackasm1(SB) + MOVD $1487, R12 + B callbackasm1(SB) + MOVD $1488, R12 + B callbackasm1(SB) + MOVD $1489, R12 + B callbackasm1(SB) + MOVD $1490, R12 + B callbackasm1(SB) + MOVD $1491, R12 + B callbackasm1(SB) + MOVD $1492, R12 + B callbackasm1(SB) + MOVD $1493, R12 + B callbackasm1(SB) + MOVD $1494, R12 + B callbackasm1(SB) + MOVD $1495, R12 + B callbackasm1(SB) + MOVD $1496, R12 + B callbackasm1(SB) + MOVD $1497, R12 + B callbackasm1(SB) + MOVD $1498, R12 + B callbackasm1(SB) + MOVD $1499, R12 + B callbackasm1(SB) + MOVD $1500, R12 + B callbackasm1(SB) + MOVD $1501, R12 + B callbackasm1(SB) + MOVD $1502, R12 + B callbackasm1(SB) + MOVD $1503, R12 + B callbackasm1(SB) + MOVD $1504, R12 + B callbackasm1(SB) + MOVD $1505, R12 + B callbackasm1(SB) + MOVD $1506, R12 + B callbackasm1(SB) + MOVD $1507, R12 + B callbackasm1(SB) + MOVD $1508, R12 + B callbackasm1(SB) + MOVD $1509, R12 + B callbackasm1(SB) + MOVD $1510, R12 + B callbackasm1(SB) + MOVD $1511, R12 + B callbackasm1(SB) + MOVD $1512, R12 + B callbackasm1(SB) + MOVD $1513, R12 + B callbackasm1(SB) + MOVD $1514, R12 + B callbackasm1(SB) + MOVD $1515, R12 + B callbackasm1(SB) + MOVD $1516, R12 + B callbackasm1(SB) + MOVD $1517, R12 + B callbackasm1(SB) + MOVD $1518, R12 + B callbackasm1(SB) + MOVD $1519, R12 + B callbackasm1(SB) + MOVD $1520, R12 + B callbackasm1(SB) + MOVD $1521, R12 + B callbackasm1(SB) + MOVD $1522, R12 + B callbackasm1(SB) + MOVD $1523, R12 + B callbackasm1(SB) + MOVD $1524, R12 + B callbackasm1(SB) + MOVD $1525, R12 + B callbackasm1(SB) + MOVD $1526, R12 + B callbackasm1(SB) + MOVD $1527, R12 + B callbackasm1(SB) + MOVD $1528, R12 + B callbackasm1(SB) + MOVD $1529, R12 + B callbackasm1(SB) + MOVD $1530, R12 + B callbackasm1(SB) + MOVD $1531, R12 + B callbackasm1(SB) + MOVD $1532, R12 + B callbackasm1(SB) + MOVD $1533, R12 + B callbackasm1(SB) + MOVD $1534, R12 + B callbackasm1(SB) + MOVD $1535, R12 + B callbackasm1(SB) + MOVD $1536, R12 + B callbackasm1(SB) + MOVD $1537, R12 + B callbackasm1(SB) + MOVD $1538, R12 + B callbackasm1(SB) + MOVD $1539, R12 + B callbackasm1(SB) + MOVD $1540, R12 + B callbackasm1(SB) + MOVD $1541, R12 + B callbackasm1(SB) + MOVD $1542, R12 + B callbackasm1(SB) + MOVD $1543, R12 + B callbackasm1(SB) + MOVD $1544, R12 + B callbackasm1(SB) + MOVD $1545, R12 + B callbackasm1(SB) + MOVD $1546, R12 + B callbackasm1(SB) + MOVD $1547, R12 + B callbackasm1(SB) + MOVD $1548, R12 + B callbackasm1(SB) + MOVD $1549, R12 + B callbackasm1(SB) + MOVD $1550, R12 + B callbackasm1(SB) + MOVD $1551, R12 + B callbackasm1(SB) + MOVD $1552, R12 + B callbackasm1(SB) + MOVD $1553, R12 + B callbackasm1(SB) + MOVD $1554, R12 + B callbackasm1(SB) + MOVD $1555, R12 + B callbackasm1(SB) + MOVD $1556, R12 + B callbackasm1(SB) + MOVD $1557, R12 + B callbackasm1(SB) + MOVD $1558, R12 + B callbackasm1(SB) + MOVD $1559, R12 + B callbackasm1(SB) + MOVD $1560, R12 + B callbackasm1(SB) + MOVD $1561, R12 + B callbackasm1(SB) + MOVD $1562, R12 + B callbackasm1(SB) + MOVD $1563, R12 + B callbackasm1(SB) + MOVD $1564, R12 + B callbackasm1(SB) + MOVD $1565, R12 + B callbackasm1(SB) + MOVD $1566, R12 + B callbackasm1(SB) + MOVD $1567, R12 + B callbackasm1(SB) + MOVD $1568, R12 + B callbackasm1(SB) + MOVD $1569, R12 + B callbackasm1(SB) + MOVD $1570, R12 + B callbackasm1(SB) + MOVD $1571, R12 + B callbackasm1(SB) + MOVD $1572, R12 + B callbackasm1(SB) + MOVD $1573, R12 + B callbackasm1(SB) + MOVD $1574, R12 + B callbackasm1(SB) + MOVD $1575, R12 + B callbackasm1(SB) + MOVD $1576, R12 + B callbackasm1(SB) + MOVD $1577, R12 + B callbackasm1(SB) + MOVD $1578, R12 + B callbackasm1(SB) + MOVD $1579, R12 + B callbackasm1(SB) + MOVD $1580, R12 + B callbackasm1(SB) + MOVD $1581, R12 + B callbackasm1(SB) + MOVD $1582, R12 + B callbackasm1(SB) + MOVD $1583, R12 + B callbackasm1(SB) + MOVD $1584, R12 + B callbackasm1(SB) + MOVD $1585, R12 + B callbackasm1(SB) + MOVD $1586, R12 + B callbackasm1(SB) + MOVD $1587, R12 + B callbackasm1(SB) + MOVD $1588, R12 + B callbackasm1(SB) + MOVD $1589, R12 + B callbackasm1(SB) + MOVD $1590, R12 + B callbackasm1(SB) + MOVD $1591, R12 + B callbackasm1(SB) + MOVD $1592, R12 + B callbackasm1(SB) + MOVD $1593, R12 + B callbackasm1(SB) + MOVD $1594, R12 + B callbackasm1(SB) + MOVD $1595, R12 + B callbackasm1(SB) + MOVD $1596, R12 + B callbackasm1(SB) + MOVD $1597, R12 + B callbackasm1(SB) + MOVD $1598, R12 + B callbackasm1(SB) + MOVD $1599, R12 + B callbackasm1(SB) + MOVD $1600, R12 + B callbackasm1(SB) + MOVD $1601, R12 + B callbackasm1(SB) + MOVD $1602, R12 + B callbackasm1(SB) + MOVD $1603, R12 + B callbackasm1(SB) + MOVD $1604, R12 + B callbackasm1(SB) + MOVD $1605, R12 + B callbackasm1(SB) + MOVD $1606, R12 + B callbackasm1(SB) + MOVD $1607, R12 + B callbackasm1(SB) + MOVD $1608, R12 + B callbackasm1(SB) + MOVD $1609, R12 + B callbackasm1(SB) + MOVD $1610, R12 + B callbackasm1(SB) + MOVD $1611, R12 + B callbackasm1(SB) + MOVD $1612, R12 + B callbackasm1(SB) + MOVD $1613, R12 + B callbackasm1(SB) + MOVD $1614, R12 + B callbackasm1(SB) + MOVD $1615, R12 + B callbackasm1(SB) + MOVD $1616, R12 + B callbackasm1(SB) + MOVD $1617, R12 + B callbackasm1(SB) + MOVD $1618, R12 + B callbackasm1(SB) + MOVD $1619, R12 + B callbackasm1(SB) + MOVD $1620, R12 + B callbackasm1(SB) + MOVD $1621, R12 + B callbackasm1(SB) + MOVD $1622, R12 + B callbackasm1(SB) + MOVD $1623, R12 + B callbackasm1(SB) + MOVD $1624, R12 + B callbackasm1(SB) + MOVD $1625, R12 + B callbackasm1(SB) + MOVD $1626, R12 + B callbackasm1(SB) + MOVD $1627, R12 + B callbackasm1(SB) + MOVD $1628, R12 + B callbackasm1(SB) + MOVD $1629, R12 + B callbackasm1(SB) + MOVD $1630, R12 + B callbackasm1(SB) + MOVD $1631, R12 + B callbackasm1(SB) + MOVD $1632, R12 + B callbackasm1(SB) + MOVD $1633, R12 + B callbackasm1(SB) + MOVD $1634, R12 + B callbackasm1(SB) + MOVD $1635, R12 + B callbackasm1(SB) + MOVD $1636, R12 + B callbackasm1(SB) + MOVD $1637, R12 + B callbackasm1(SB) + MOVD $1638, R12 + B callbackasm1(SB) + MOVD $1639, R12 + B callbackasm1(SB) + MOVD $1640, R12 + B callbackasm1(SB) + MOVD $1641, R12 + B callbackasm1(SB) + MOVD $1642, R12 + B callbackasm1(SB) + MOVD $1643, R12 + B callbackasm1(SB) + MOVD $1644, R12 + B callbackasm1(SB) + MOVD $1645, R12 + B callbackasm1(SB) + MOVD $1646, R12 + B callbackasm1(SB) + MOVD $1647, R12 + B callbackasm1(SB) + MOVD $1648, R12 + B callbackasm1(SB) + MOVD $1649, R12 + B callbackasm1(SB) + MOVD $1650, R12 + B callbackasm1(SB) + MOVD $1651, R12 + B callbackasm1(SB) + MOVD $1652, R12 + B callbackasm1(SB) + MOVD $1653, R12 + B callbackasm1(SB) + MOVD $1654, R12 + B callbackasm1(SB) + MOVD $1655, R12 + B callbackasm1(SB) + MOVD $1656, R12 + B callbackasm1(SB) + MOVD $1657, R12 + B callbackasm1(SB) + MOVD $1658, R12 + B callbackasm1(SB) + MOVD $1659, R12 + B callbackasm1(SB) + MOVD $1660, R12 + B callbackasm1(SB) + MOVD $1661, R12 + B callbackasm1(SB) + MOVD $1662, R12 + B callbackasm1(SB) + MOVD $1663, R12 + B callbackasm1(SB) + MOVD $1664, R12 + B callbackasm1(SB) + MOVD $1665, R12 + B callbackasm1(SB) + MOVD $1666, R12 + B callbackasm1(SB) + MOVD $1667, R12 + B callbackasm1(SB) + MOVD $1668, R12 + B callbackasm1(SB) + MOVD $1669, R12 + B callbackasm1(SB) + MOVD $1670, R12 + B callbackasm1(SB) + MOVD $1671, R12 + B callbackasm1(SB) + MOVD $1672, R12 + B callbackasm1(SB) + MOVD $1673, R12 + B callbackasm1(SB) + MOVD $1674, R12 + B callbackasm1(SB) + MOVD $1675, R12 + B callbackasm1(SB) + MOVD $1676, R12 + B callbackasm1(SB) + MOVD $1677, R12 + B callbackasm1(SB) + MOVD $1678, R12 + B callbackasm1(SB) + MOVD $1679, R12 + B callbackasm1(SB) + MOVD $1680, R12 + B callbackasm1(SB) + MOVD $1681, R12 + B callbackasm1(SB) + MOVD $1682, R12 + B callbackasm1(SB) + MOVD $1683, R12 + B callbackasm1(SB) + MOVD $1684, R12 + B callbackasm1(SB) + MOVD $1685, R12 + B callbackasm1(SB) + MOVD $1686, R12 + B callbackasm1(SB) + MOVD $1687, R12 + B callbackasm1(SB) + MOVD $1688, R12 + B callbackasm1(SB) + MOVD $1689, R12 + B callbackasm1(SB) + MOVD $1690, R12 + B callbackasm1(SB) + MOVD $1691, R12 + B callbackasm1(SB) + MOVD $1692, R12 + B callbackasm1(SB) + MOVD $1693, R12 + B callbackasm1(SB) + MOVD $1694, R12 + B callbackasm1(SB) + MOVD $1695, R12 + B callbackasm1(SB) + MOVD $1696, R12 + B callbackasm1(SB) + MOVD $1697, R12 + B callbackasm1(SB) + MOVD $1698, R12 + B callbackasm1(SB) + MOVD $1699, R12 + B callbackasm1(SB) + MOVD $1700, R12 + B callbackasm1(SB) + MOVD $1701, R12 + B callbackasm1(SB) + MOVD $1702, R12 + B callbackasm1(SB) + MOVD $1703, R12 + B callbackasm1(SB) + MOVD $1704, R12 + B callbackasm1(SB) + MOVD $1705, R12 + B callbackasm1(SB) + MOVD $1706, R12 + B callbackasm1(SB) + MOVD $1707, R12 + B callbackasm1(SB) + MOVD $1708, R12 + B callbackasm1(SB) + MOVD $1709, R12 + B callbackasm1(SB) + MOVD $1710, R12 + B callbackasm1(SB) + MOVD $1711, R12 + B callbackasm1(SB) + MOVD $1712, R12 + B callbackasm1(SB) + MOVD $1713, R12 + B callbackasm1(SB) + MOVD $1714, R12 + B callbackasm1(SB) + MOVD $1715, R12 + B callbackasm1(SB) + MOVD $1716, R12 + B callbackasm1(SB) + MOVD $1717, R12 + B callbackasm1(SB) + MOVD $1718, R12 + B callbackasm1(SB) + MOVD $1719, R12 + B callbackasm1(SB) + MOVD $1720, R12 + B callbackasm1(SB) + MOVD $1721, R12 + B callbackasm1(SB) + MOVD $1722, R12 + B callbackasm1(SB) + MOVD $1723, R12 + B callbackasm1(SB) + MOVD $1724, R12 + B callbackasm1(SB) + MOVD $1725, R12 + B callbackasm1(SB) + MOVD $1726, R12 + B callbackasm1(SB) + MOVD $1727, R12 + B callbackasm1(SB) + MOVD $1728, R12 + B callbackasm1(SB) + MOVD $1729, R12 + B callbackasm1(SB) + MOVD $1730, R12 + B callbackasm1(SB) + MOVD $1731, R12 + B callbackasm1(SB) + MOVD $1732, R12 + B callbackasm1(SB) + MOVD $1733, R12 + B callbackasm1(SB) + MOVD $1734, R12 + B callbackasm1(SB) + MOVD $1735, R12 + B callbackasm1(SB) + MOVD $1736, R12 + B callbackasm1(SB) + MOVD $1737, R12 + B callbackasm1(SB) + MOVD $1738, R12 + B callbackasm1(SB) + MOVD $1739, R12 + B callbackasm1(SB) + MOVD $1740, R12 + B callbackasm1(SB) + MOVD $1741, R12 + B callbackasm1(SB) + MOVD $1742, R12 + B callbackasm1(SB) + MOVD $1743, R12 + B callbackasm1(SB) + MOVD $1744, R12 + B callbackasm1(SB) + MOVD $1745, R12 + B callbackasm1(SB) + MOVD $1746, R12 + B callbackasm1(SB) + MOVD $1747, R12 + B callbackasm1(SB) + MOVD $1748, R12 + B callbackasm1(SB) + MOVD $1749, R12 + B callbackasm1(SB) + MOVD $1750, R12 + B callbackasm1(SB) + MOVD $1751, R12 + B callbackasm1(SB) + MOVD $1752, R12 + B callbackasm1(SB) + MOVD $1753, R12 + B callbackasm1(SB) + MOVD $1754, R12 + B callbackasm1(SB) + MOVD $1755, R12 + B callbackasm1(SB) + MOVD $1756, R12 + B callbackasm1(SB) + MOVD $1757, R12 + B callbackasm1(SB) + MOVD $1758, R12 + B callbackasm1(SB) + MOVD $1759, R12 + B callbackasm1(SB) + MOVD $1760, R12 + B callbackasm1(SB) + MOVD $1761, R12 + B callbackasm1(SB) + MOVD $1762, R12 + B callbackasm1(SB) + MOVD $1763, R12 + B callbackasm1(SB) + MOVD $1764, R12 + B callbackasm1(SB) + MOVD $1765, R12 + B callbackasm1(SB) + MOVD $1766, R12 + B callbackasm1(SB) + MOVD $1767, R12 + B callbackasm1(SB) + MOVD $1768, R12 + B callbackasm1(SB) + MOVD $1769, R12 + B callbackasm1(SB) + MOVD $1770, R12 + B callbackasm1(SB) + MOVD $1771, R12 + B callbackasm1(SB) + MOVD $1772, R12 + B callbackasm1(SB) + MOVD $1773, R12 + B callbackasm1(SB) + MOVD $1774, R12 + B callbackasm1(SB) + MOVD $1775, R12 + B callbackasm1(SB) + MOVD $1776, R12 + B callbackasm1(SB) + MOVD $1777, R12 + B callbackasm1(SB) + MOVD $1778, R12 + B callbackasm1(SB) + MOVD $1779, R12 + B callbackasm1(SB) + MOVD $1780, R12 + B callbackasm1(SB) + MOVD $1781, R12 + B callbackasm1(SB) + MOVD $1782, R12 + B callbackasm1(SB) + MOVD $1783, R12 + B callbackasm1(SB) + MOVD $1784, R12 + B callbackasm1(SB) + MOVD $1785, R12 + B callbackasm1(SB) + MOVD $1786, R12 + B callbackasm1(SB) + MOVD $1787, R12 + B callbackasm1(SB) + MOVD $1788, R12 + B callbackasm1(SB) + MOVD $1789, R12 + B callbackasm1(SB) + MOVD $1790, R12 + B callbackasm1(SB) + MOVD $1791, R12 + B callbackasm1(SB) + MOVD $1792, R12 + B callbackasm1(SB) + MOVD $1793, R12 + B callbackasm1(SB) + MOVD $1794, R12 + B callbackasm1(SB) + MOVD $1795, R12 + B callbackasm1(SB) + MOVD $1796, R12 + B callbackasm1(SB) + MOVD $1797, R12 + B callbackasm1(SB) + MOVD $1798, R12 + B callbackasm1(SB) + MOVD $1799, R12 + B callbackasm1(SB) + MOVD $1800, R12 + B callbackasm1(SB) + MOVD $1801, R12 + B callbackasm1(SB) + MOVD $1802, R12 + B callbackasm1(SB) + MOVD $1803, R12 + B callbackasm1(SB) + MOVD $1804, R12 + B callbackasm1(SB) + MOVD $1805, R12 + B callbackasm1(SB) + MOVD $1806, R12 + B callbackasm1(SB) + MOVD $1807, R12 + B callbackasm1(SB) + MOVD $1808, R12 + B callbackasm1(SB) + MOVD $1809, R12 + B callbackasm1(SB) + MOVD $1810, R12 + B callbackasm1(SB) + MOVD $1811, R12 + B callbackasm1(SB) + MOVD $1812, R12 + B callbackasm1(SB) + MOVD $1813, R12 + B callbackasm1(SB) + MOVD $1814, R12 + B callbackasm1(SB) + MOVD $1815, R12 + B callbackasm1(SB) + MOVD $1816, R12 + B callbackasm1(SB) + MOVD $1817, R12 + B callbackasm1(SB) + MOVD $1818, R12 + B callbackasm1(SB) + MOVD $1819, R12 + B callbackasm1(SB) + MOVD $1820, R12 + B callbackasm1(SB) + MOVD $1821, R12 + B callbackasm1(SB) + MOVD $1822, R12 + B callbackasm1(SB) + MOVD $1823, R12 + B callbackasm1(SB) + MOVD $1824, R12 + B callbackasm1(SB) + MOVD $1825, R12 + B callbackasm1(SB) + MOVD $1826, R12 + B callbackasm1(SB) + MOVD $1827, R12 + B callbackasm1(SB) + MOVD $1828, R12 + B callbackasm1(SB) + MOVD $1829, R12 + B callbackasm1(SB) + MOVD $1830, R12 + B callbackasm1(SB) + MOVD $1831, R12 + B callbackasm1(SB) + MOVD $1832, R12 + B callbackasm1(SB) + MOVD $1833, R12 + B callbackasm1(SB) + MOVD $1834, R12 + B callbackasm1(SB) + MOVD $1835, R12 + B callbackasm1(SB) + MOVD $1836, R12 + B callbackasm1(SB) + MOVD $1837, R12 + B callbackasm1(SB) + MOVD $1838, R12 + B callbackasm1(SB) + MOVD $1839, R12 + B callbackasm1(SB) + MOVD $1840, R12 + B callbackasm1(SB) + MOVD $1841, R12 + B callbackasm1(SB) + MOVD $1842, R12 + B callbackasm1(SB) + MOVD $1843, R12 + B callbackasm1(SB) + MOVD $1844, R12 + B callbackasm1(SB) + MOVD $1845, R12 + B callbackasm1(SB) + MOVD $1846, R12 + B callbackasm1(SB) + MOVD $1847, R12 + B callbackasm1(SB) + MOVD $1848, R12 + B callbackasm1(SB) + MOVD $1849, R12 + B callbackasm1(SB) + MOVD $1850, R12 + B callbackasm1(SB) + MOVD $1851, R12 + B callbackasm1(SB) + MOVD $1852, R12 + B callbackasm1(SB) + MOVD $1853, R12 + B callbackasm1(SB) + MOVD $1854, R12 + B callbackasm1(SB) + MOVD $1855, R12 + B callbackasm1(SB) + MOVD $1856, R12 + B callbackasm1(SB) + MOVD $1857, R12 + B callbackasm1(SB) + MOVD $1858, R12 + B callbackasm1(SB) + MOVD $1859, R12 + B callbackasm1(SB) + MOVD $1860, R12 + B callbackasm1(SB) + MOVD $1861, R12 + B callbackasm1(SB) + MOVD $1862, R12 + B callbackasm1(SB) + MOVD $1863, R12 + B callbackasm1(SB) + MOVD $1864, R12 + B callbackasm1(SB) + MOVD $1865, R12 + B callbackasm1(SB) + MOVD $1866, R12 + B callbackasm1(SB) + MOVD $1867, R12 + B callbackasm1(SB) + MOVD $1868, R12 + B callbackasm1(SB) + MOVD $1869, R12 + B callbackasm1(SB) + MOVD $1870, R12 + B callbackasm1(SB) + MOVD $1871, R12 + B callbackasm1(SB) + MOVD $1872, R12 + B callbackasm1(SB) + MOVD $1873, R12 + B callbackasm1(SB) + MOVD $1874, R12 + B callbackasm1(SB) + MOVD $1875, R12 + B callbackasm1(SB) + MOVD $1876, R12 + B callbackasm1(SB) + MOVD $1877, R12 + B callbackasm1(SB) + MOVD $1878, R12 + B callbackasm1(SB) + MOVD $1879, R12 + B callbackasm1(SB) + MOVD $1880, R12 + B callbackasm1(SB) + MOVD $1881, R12 + B callbackasm1(SB) + MOVD $1882, R12 + B callbackasm1(SB) + MOVD $1883, R12 + B callbackasm1(SB) + MOVD $1884, R12 + B callbackasm1(SB) + MOVD $1885, R12 + B callbackasm1(SB) + MOVD $1886, R12 + B callbackasm1(SB) + MOVD $1887, R12 + B callbackasm1(SB) + MOVD $1888, R12 + B callbackasm1(SB) + MOVD $1889, R12 + B callbackasm1(SB) + MOVD $1890, R12 + B callbackasm1(SB) + MOVD $1891, R12 + B callbackasm1(SB) + MOVD $1892, R12 + B callbackasm1(SB) + MOVD $1893, R12 + B callbackasm1(SB) + MOVD $1894, R12 + B callbackasm1(SB) + MOVD $1895, R12 + B callbackasm1(SB) + MOVD $1896, R12 + B callbackasm1(SB) + MOVD $1897, R12 + B callbackasm1(SB) + MOVD $1898, R12 + B callbackasm1(SB) + MOVD $1899, R12 + B callbackasm1(SB) + MOVD $1900, R12 + B callbackasm1(SB) + MOVD $1901, R12 + B callbackasm1(SB) + MOVD $1902, R12 + B callbackasm1(SB) + MOVD $1903, R12 + B callbackasm1(SB) + MOVD $1904, R12 + B callbackasm1(SB) + MOVD $1905, R12 + B callbackasm1(SB) + MOVD $1906, R12 + B callbackasm1(SB) + MOVD $1907, R12 + B callbackasm1(SB) + MOVD $1908, R12 + B callbackasm1(SB) + MOVD $1909, R12 + B callbackasm1(SB) + MOVD $1910, R12 + B callbackasm1(SB) + MOVD $1911, R12 + B callbackasm1(SB) + MOVD $1912, R12 + B callbackasm1(SB) + MOVD $1913, R12 + B callbackasm1(SB) + MOVD $1914, R12 + B callbackasm1(SB) + MOVD $1915, R12 + B callbackasm1(SB) + MOVD $1916, R12 + B callbackasm1(SB) + MOVD $1917, R12 + B callbackasm1(SB) + MOVD $1918, R12 + B callbackasm1(SB) + MOVD $1919, R12 + B callbackasm1(SB) + MOVD $1920, R12 + B callbackasm1(SB) + MOVD $1921, R12 + B callbackasm1(SB) + MOVD $1922, R12 + B callbackasm1(SB) + MOVD $1923, R12 + B callbackasm1(SB) + MOVD $1924, R12 + B callbackasm1(SB) + MOVD $1925, R12 + B callbackasm1(SB) + MOVD $1926, R12 + B callbackasm1(SB) + MOVD $1927, R12 + B callbackasm1(SB) + MOVD $1928, R12 + B callbackasm1(SB) + MOVD $1929, R12 + B callbackasm1(SB) + MOVD $1930, R12 + B callbackasm1(SB) + MOVD $1931, R12 + B callbackasm1(SB) + MOVD $1932, R12 + B callbackasm1(SB) + MOVD $1933, R12 + B callbackasm1(SB) + MOVD $1934, R12 + B callbackasm1(SB) + MOVD $1935, R12 + B callbackasm1(SB) + MOVD $1936, R12 + B callbackasm1(SB) + MOVD $1937, R12 + B callbackasm1(SB) + MOVD $1938, R12 + B callbackasm1(SB) + MOVD $1939, R12 + B callbackasm1(SB) + MOVD $1940, R12 + B callbackasm1(SB) + MOVD $1941, R12 + B callbackasm1(SB) + MOVD $1942, R12 + B callbackasm1(SB) + MOVD $1943, R12 + B callbackasm1(SB) + MOVD $1944, R12 + B callbackasm1(SB) + MOVD $1945, R12 + B callbackasm1(SB) + MOVD $1946, R12 + B callbackasm1(SB) + MOVD $1947, R12 + B callbackasm1(SB) + MOVD $1948, R12 + B callbackasm1(SB) + MOVD $1949, R12 + B callbackasm1(SB) + MOVD $1950, R12 + B callbackasm1(SB) + MOVD $1951, R12 + B callbackasm1(SB) + MOVD $1952, R12 + B callbackasm1(SB) + MOVD $1953, R12 + B callbackasm1(SB) + MOVD $1954, R12 + B callbackasm1(SB) + MOVD $1955, R12 + B callbackasm1(SB) + MOVD $1956, R12 + B callbackasm1(SB) + MOVD $1957, R12 + B callbackasm1(SB) + MOVD $1958, R12 + B callbackasm1(SB) + MOVD $1959, R12 + B callbackasm1(SB) + MOVD $1960, R12 + B callbackasm1(SB) + MOVD $1961, R12 + B callbackasm1(SB) + MOVD $1962, R12 + B callbackasm1(SB) + MOVD $1963, R12 + B callbackasm1(SB) + MOVD $1964, R12 + B callbackasm1(SB) + MOVD $1965, R12 + B callbackasm1(SB) + MOVD $1966, R12 + B callbackasm1(SB) + MOVD $1967, R12 + B callbackasm1(SB) + MOVD $1968, R12 + B callbackasm1(SB) + MOVD $1969, R12 + B callbackasm1(SB) + MOVD $1970, R12 + B callbackasm1(SB) + MOVD $1971, R12 + B callbackasm1(SB) + MOVD $1972, R12 + B callbackasm1(SB) + MOVD $1973, R12 + B callbackasm1(SB) + MOVD $1974, R12 + B callbackasm1(SB) + MOVD $1975, R12 + B callbackasm1(SB) + MOVD $1976, R12 + B callbackasm1(SB) + MOVD $1977, R12 + B callbackasm1(SB) + MOVD $1978, R12 + B callbackasm1(SB) + MOVD $1979, R12 + B callbackasm1(SB) + MOVD $1980, R12 + B callbackasm1(SB) + MOVD $1981, R12 + B callbackasm1(SB) + MOVD $1982, R12 + B callbackasm1(SB) + MOVD $1983, R12 + B callbackasm1(SB) + MOVD $1984, R12 + B callbackasm1(SB) + MOVD $1985, R12 + B callbackasm1(SB) + MOVD $1986, R12 + B callbackasm1(SB) + MOVD $1987, R12 + B callbackasm1(SB) + MOVD $1988, R12 + B callbackasm1(SB) + MOVD $1989, R12 + B callbackasm1(SB) + MOVD $1990, R12 + B callbackasm1(SB) + MOVD $1991, R12 + B callbackasm1(SB) + MOVD $1992, R12 + B callbackasm1(SB) + MOVD $1993, R12 + B callbackasm1(SB) + MOVD $1994, R12 + B callbackasm1(SB) + MOVD $1995, R12 + B callbackasm1(SB) + MOVD $1996, R12 + B callbackasm1(SB) + MOVD $1997, R12 + B callbackasm1(SB) + MOVD $1998, R12 + B callbackasm1(SB) + MOVD $1999, R12 + B callbackasm1(SB) diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go deleted file mode 100644 index a77b4dbb7..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go +++ /dev/null @@ -1,92 +0,0 @@ -//go:build aix && !cgo -// +build aix,!cgo - -package cpu - -import ( - "context" - "strconv" - "strings" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { - if percpu { - return []TimesStat{}, common.ErrNotImplementedError - } else { - out, err := invoke.CommandWithContext(ctx, "sar", "-u", "10", "1") - if err != nil { - return nil, err - } - lines := strings.Split(string(out), "\n") - if len(lines) < 5 { - return []TimesStat{}, common.ErrNotImplementedError - } - - ret := TimesStat{CPU: "cpu-total"} - h := strings.Fields(lines[len(lines)-3]) // headers - v := strings.Fields(lines[len(lines)-2]) // values - for i, header := range h { - if t, err := strconv.ParseFloat(v[i], 64); err == nil { - switch header { - case `%usr`: - ret.User = t - case `%sys`: - ret.System = t - case `%wio`: - ret.Iowait = t - case `%idle`: - ret.Idle = t - } - } - } - - return []TimesStat{ret}, nil - } -} - -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { - out, err := invoke.CommandWithContext(ctx, "prtconf") - if err != nil { - return nil, err - } - - ret := InfoStat{} - for _, line := range strings.Split(string(out), "\n") { - if strings.HasPrefix(line, "Number Of Processors:") { - p := strings.Fields(line) - if len(p) > 3 { - if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { - ret.Cores = int32(t) - } - } - } else if strings.HasPrefix(line, "Processor Clock Speed:") { - p := strings.Fields(line) - if len(p) > 4 { - if t, err := strconv.ParseFloat(p[3], 64); err == nil { - switch strings.ToUpper(p[4]) { - case "MHZ": - ret.Mhz = t - case "GHZ": - ret.Mhz = t * 1000.0 - case "KHZ": - ret.Mhz = t / 1000.0 - default: - ret.Mhz = t - } - } - } - break - } - } - return []InfoStat{ret}, nil -} - -func CountsWithContext(ctx context.Context, logical bool) (int, error) { - info, err := InfoWithContext(ctx) - if err == nil { - return int(info[0].Cores), nil - } - return 0, err -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go deleted file mode 100644 index 41f395e5e..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go +++ /dev/null @@ -1,117 +0,0 @@ -//go:build darwin -// +build darwin - -package cpu - -import ( - "context" - "strconv" - "strings" - - "github.com/shoenig/go-m1cpu" - "github.com/tklauser/go-sysconf" - "golang.org/x/sys/unix" -) - -// sys/resource.h -const ( - CPUser = 0 - cpNice = 1 - cpSys = 2 - cpIntr = 3 - cpIdle = 4 - cpUStates = 5 -) - -// default value. from time.h -var ClocksPerSec = float64(128) - -func init() { - clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) - // ignore errors - if err == nil { - ClocksPerSec = float64(clkTck) - } -} - -func Times(percpu bool) ([]TimesStat, error) { - return TimesWithContext(context.Background(), percpu) -} - -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { - if percpu { - return perCPUTimes() - } - - return allCPUTimes() -} - -// Returns only one CPUInfoStat on FreeBSD -func Info() ([]InfoStat, error) { - return InfoWithContext(context.Background()) -} - -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { - var ret []InfoStat - - c := InfoStat{} - c.ModelName, _ = unix.Sysctl("machdep.cpu.brand_string") - family, _ := unix.SysctlUint32("machdep.cpu.family") - c.Family = strconv.FormatUint(uint64(family), 10) - model, _ := unix.SysctlUint32("machdep.cpu.model") - c.Model = strconv.FormatUint(uint64(model), 10) - stepping, _ := unix.SysctlUint32("machdep.cpu.stepping") - c.Stepping = int32(stepping) - features, err := unix.Sysctl("machdep.cpu.features") - if err == nil { - for _, v := range strings.Fields(features) { - c.Flags = append(c.Flags, strings.ToLower(v)) - } - } - leaf7Features, err := unix.Sysctl("machdep.cpu.leaf7_features") - if err == nil { - for _, v := range strings.Fields(leaf7Features) { - c.Flags = append(c.Flags, strings.ToLower(v)) - } - } - extfeatures, err := unix.Sysctl("machdep.cpu.extfeatures") - if err == nil { - for _, v := range strings.Fields(extfeatures) { - c.Flags = append(c.Flags, strings.ToLower(v)) - } - } - cores, _ := unix.SysctlUint32("machdep.cpu.core_count") - c.Cores = int32(cores) - cacheSize, _ := unix.SysctlUint32("machdep.cpu.cache.size") - c.CacheSize = int32(cacheSize) - c.VendorID, _ = unix.Sysctl("machdep.cpu.vendor") - - if m1cpu.IsAppleSilicon() { - c.Mhz = float64(m1cpu.PCoreHz() / 1_000_000) - } else { - // Use the rated frequency of the CPU. This is a static value and does not - // account for low power or Turbo Boost modes. - cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency") - if err == nil { - c.Mhz = float64(cpuFrequency) / 1000000.0 - } - } - - return append(ret, c), nil -} - -func CountsWithContext(ctx context.Context, logical bool) (int, error) { - var cpuArgument string - if logical { - cpuArgument = "hw.logicalcpu" - } else { - cpuArgument = "hw.physicalcpu" - } - - count, err := unix.SysctlUint32(cpuArgument) - if err != nil { - return 0, err - } - - return int(count), nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go deleted file mode 100644 index 1d5f0772e..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go +++ /dev/null @@ -1,111 +0,0 @@ -//go:build darwin && cgo -// +build darwin,cgo - -package cpu - -/* -#include -#include -#include -#include -#include -#include -#include -#if TARGET_OS_MAC -#include -#endif -#include -#include -*/ -import "C" - -import ( - "bytes" - "encoding/binary" - "fmt" - "unsafe" -) - -// these CPU times for darwin is borrowed from influxdb/telegraf. - -func perCPUTimes() ([]TimesStat, error) { - var ( - count C.mach_msg_type_number_t - cpuload *C.processor_cpu_load_info_data_t - ncpu C.natural_t - ) - - status := C.host_processor_info(C.host_t(C.mach_host_self()), - C.PROCESSOR_CPU_LOAD_INFO, - &ncpu, - (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), - &count) - - if status != C.KERN_SUCCESS { - return nil, fmt.Errorf("host_processor_info error=%d", status) - } - - // jump through some cgo casting hoops and ensure we properly free - // the memory that cpuload points to - target := C.vm_map_t(C.mach_task_self_) - address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) - defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) - - // the body of struct processor_cpu_load_info - // aka processor_cpu_load_info_data_t - var cpu_ticks [C.CPU_STATE_MAX]uint32 - - // copy the cpuload array to a []byte buffer - // where we can binary.Read the data - size := int(ncpu) * binary.Size(cpu_ticks) - buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size] - - bbuf := bytes.NewBuffer(buf) - - var ret []TimesStat - - for i := 0; i < int(ncpu); i++ { - err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) - if err != nil { - return nil, err - } - - c := TimesStat{ - CPU: fmt.Sprintf("cpu%d", i), - User: float64(cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, - System: float64(cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, - Nice: float64(cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, - Idle: float64(cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, - } - - ret = append(ret, c) - } - - return ret, nil -} - -func allCPUTimes() ([]TimesStat, error) { - var count C.mach_msg_type_number_t - var cpuload C.host_cpu_load_info_data_t - - count = C.HOST_CPU_LOAD_INFO_COUNT - - status := C.host_statistics(C.host_t(C.mach_host_self()), - C.HOST_CPU_LOAD_INFO, - C.host_info_t(unsafe.Pointer(&cpuload)), - &count) - - if status != C.KERN_SUCCESS { - return nil, fmt.Errorf("host_statistics error=%d", status) - } - - c := TimesStat{ - CPU: "cpu-total", - User: float64(cpuload.cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, - System: float64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, - Nice: float64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, - Idle: float64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, - } - - return []TimesStat{c}, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go deleted file mode 100644 index e067e99f9..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build darwin && !cgo -// +build darwin,!cgo - -package cpu - -import "github.com/shirou/gopsutil/v3/internal/common" - -func perCPUTimes() ([]TimesStat, error) { - return []TimesStat{}, common.ErrNotImplementedError -} - -func allCPUTimes() ([]TimesStat, error) { - return []TimesStat{}, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go deleted file mode 100644 index f1a784597..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go +++ /dev/null @@ -1,66 +0,0 @@ -//go:build darwin -// +build darwin - -package common - -import ( - "context" - "os" - "os/exec" - "strings" - "unsafe" - - "golang.org/x/sys/unix" -) - -func DoSysctrlWithContext(ctx context.Context, mib string) ([]string, error) { - cmd := exec.CommandContext(ctx, "sysctl", "-n", mib) - cmd.Env = getSysctrlEnv(os.Environ()) - out, err := cmd.Output() - if err != nil { - return []string{}, err - } - v := strings.Replace(string(out), "{ ", "", 1) - v = strings.Replace(string(v), " }", "", 1) - values := strings.Fields(string(v)) - - return values, nil -} - -func CallSyscall(mib []int32) ([]byte, uint64, error) { - miblen := uint64(len(mib)) - - // get required buffer size - length := uint64(0) - _, _, err := unix.Syscall6( - 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 - uintptr(unsafe.Pointer(&mib[0])), - uintptr(miblen), - 0, - uintptr(unsafe.Pointer(&length)), - 0, - 0) - if err != 0 { - var b []byte - return b, length, err - } - if length == 0 { - var b []byte - return b, length, err - } - // get proc info itself - buf := make([]byte, length) - _, _, err = unix.Syscall6( - 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 - uintptr(unsafe.Pointer(&mib[0])), - uintptr(miblen), - uintptr(unsafe.Pointer(&buf[0])), - uintptr(unsafe.Pointer(&length)), - 0, - 0) - if err != 0 { - return buf, length, err - } - - return buf, length, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go deleted file mode 100644 index a05a0faba..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build darwin -// +build darwin - -package mem - -import ( - "context" - "fmt" - "unsafe" - - "golang.org/x/sys/unix" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -func getHwMemsize() (uint64, error) { - total, err := unix.SysctlUint64("hw.memsize") - if err != nil { - return 0, err - } - return total, nil -} - -// xsw_usage in sys/sysctl.h -type swapUsage struct { - Total uint64 - Avail uint64 - Used uint64 - Pagesize int32 - Encrypted bool -} - -// SwapMemory returns swapinfo. -func SwapMemory() (*SwapMemoryStat, error) { - return SwapMemoryWithContext(context.Background()) -} - -func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { - // https://github.com/yanllearnn/go-osstat/blob/ae8a279d26f52ec946a03698c7f50a26cfb427e3/memory/memory_darwin.go - var ret *SwapMemoryStat - - value, err := unix.SysctlRaw("vm.swapusage") - if err != nil { - return ret, err - } - if len(value) != 32 { - return ret, fmt.Errorf("unexpected output of sysctl vm.swapusage: %v (len: %d)", value, len(value)) - } - swap := (*swapUsage)(unsafe.Pointer(&value[0])) - - u := float64(0) - if swap.Total != 0 { - u = ((float64(swap.Total) - float64(swap.Avail)) / float64(swap.Total)) * 100.0 - } - - ret = &SwapMemoryStat{ - Total: swap.Total, - Used: swap.Used, - Free: swap.Avail, - UsedPercent: u, - } - - return ret, nil -} - -func SwapDevices() ([]*SwapDevice, error) { - return SwapDevicesWithContext(context.Background()) -} - -func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { - return nil, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go deleted file mode 100644 index e5da7dcdb..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go +++ /dev/null @@ -1,58 +0,0 @@ -//go:build darwin && cgo -// +build darwin,cgo - -package mem - -/* -#include -#include -*/ -import "C" - -import ( - "context" - "fmt" - "unsafe" -) - -// VirtualMemory returns VirtualmemoryStat. -func VirtualMemory() (*VirtualMemoryStat, error) { - return VirtualMemoryWithContext(context.Background()) -} - -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { - count := C.mach_msg_type_number_t(C.HOST_VM_INFO_COUNT) - var vmstat C.vm_statistics_data_t - - status := C.host_statistics(C.host_t(C.mach_host_self()), - C.HOST_VM_INFO, - C.host_info_t(unsafe.Pointer(&vmstat)), - &count) - - if status != C.KERN_SUCCESS { - return nil, fmt.Errorf("host_statistics error=%d", status) - } - - pageSize := uint64(C.vm_kernel_page_size) - total, err := getHwMemsize() - if err != nil { - return nil, err - } - totalCount := C.natural_t(total / pageSize) - - availableCount := vmstat.inactive_count + vmstat.free_count - usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount) - - usedCount := totalCount - availableCount - - return &VirtualMemoryStat{ - Total: total, - Available: pageSize * uint64(availableCount), - Used: pageSize * uint64(usedCount), - UsedPercent: usedPercent, - Free: pageSize * uint64(vmstat.free_count), - Active: pageSize * uint64(vmstat.active_count), - Inactive: pageSize * uint64(vmstat.inactive_count), - Wired: pageSize * uint64(vmstat.wire_count), - }, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go deleted file mode 100644 index c93931680..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go +++ /dev/null @@ -1,89 +0,0 @@ -//go:build darwin && !cgo -// +build darwin,!cgo - -package mem - -import ( - "context" - "strconv" - "strings" - - "golang.org/x/sys/unix" -) - -// Runs vm_stat and returns Free and inactive pages -func getVMStat(vms *VirtualMemoryStat) error { - out, err := invoke.Command("vm_stat") - if err != nil { - return err - } - return parseVMStat(string(out), vms) -} - -func parseVMStat(out string, vms *VirtualMemoryStat) error { - var err error - - lines := strings.Split(out, "\n") - pagesize := uint64(unix.Getpagesize()) - for _, line := range lines { - fields := strings.Split(line, ":") - if len(fields) < 2 { - continue - } - key := strings.TrimSpace(fields[0]) - value := strings.Trim(fields[1], " .") - switch key { - case "Pages free": - free, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Free = free * pagesize - case "Pages inactive": - inactive, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Inactive = inactive * pagesize - case "Pages active": - active, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Active = active * pagesize - case "Pages wired down": - wired, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Wired = wired * pagesize - } - } - return err -} - -// VirtualMemory returns VirtualmemoryStat. -func VirtualMemory() (*VirtualMemoryStat, error) { - return VirtualMemoryWithContext(context.Background()) -} - -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { - ret := &VirtualMemoryStat{} - - total, err := getHwMemsize() - if err != nil { - return nil, err - } - err = getVMStat(ret) - if err != nil { - return nil, err - } - - ret.Available = ret.Free + ret.Inactive - ret.Total = total - - ret.Used = ret.Total - ret.Available - ret.UsedPercent = 100 * float64(ret.Used) / float64(ret.Total) - - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go b/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go deleted file mode 100644 index e136be1ba..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go +++ /dev/null @@ -1,93 +0,0 @@ -//go:build !aix && !darwin && !linux && !freebsd && !openbsd && !windows && !solaris -// +build !aix,!darwin,!linux,!freebsd,!openbsd,!windows,!solaris - -package net - -import ( - "context" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - -func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { - return []IOCountersStat{}, common.ErrNotImplementedError -} - -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { - return []FilterStat{}, common.ErrNotImplementedError -} - -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - -func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { - return nil, common.ErrNotImplementedError -} - -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { - return []ProtoCountersStat{}, common.ErrNotImplementedError -} - -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - -func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} - -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} - -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) -} - -func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { - return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) -} - -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) -} - -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) -} - -func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) -} - -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) -} - -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) -} - -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go deleted file mode 100644 index bd5c95871..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !go1.16 -// +build !go1.16 - -package net - -import ( - "os" -) - -func readDir(f *os.File, max int) ([]os.FileInfo, error) { - return f.Readdir(max) -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go deleted file mode 100644 index a45072e92..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package net - -import ( - "os" -) - -func readDir(f *os.File, max int) ([]os.DirEntry, error) { - return f.ReadDir(max) -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go b/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go deleted file mode 100644 index 263829ffa..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build darwin || freebsd || openbsd -// +build darwin freebsd openbsd - -package process - -import ( - "bytes" - "context" - "encoding/binary" - - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" -) - -type MemoryInfoExStat struct{} - -type MemoryMapsStat struct{} - -func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { - return nil, common.ErrNotImplementedError -} - -func parseKinfoProc(buf []byte) (KinfoProc, error) { - var k KinfoProc - br := bytes.NewReader(buf) - err := common.Read(br, binary.LittleEndian, &k) - return k, err -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go deleted file mode 100644 index 176661cbd..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go +++ /dev/null @@ -1,325 +0,0 @@ -//go:build darwin -// +build darwin - -package process - -import ( - "context" - "fmt" - "path/filepath" - "strconv" - "strings" - - "github.com/tklauser/go-sysconf" - "golang.org/x/sys/unix" - - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" -) - -// copied from sys/sysctl.h -const ( - CTLKern = 1 // "high kernel": proc, limits - KernProc = 14 // struct: process entries - KernProcPID = 1 // by process id - KernProcProc = 8 // only return procs - KernProcAll = 0 // everything - KernProcPathname = 12 // path to executable -) - -var clockTicks = 100 // default value - -func init() { - clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) - // ignore errors - if err == nil { - clockTicks = int(clkTck) - } -} - -type _Ctype_struct___0 struct { - Pad uint64 -} - -func pidsWithContext(ctx context.Context) ([]int32, error) { - var ret []int32 - - kprocs, err := unix.SysctlKinfoProcSlice("kern.proc.all") - if err != nil { - return ret, err - } - - for _, proc := range kprocs { - ret = append(ret, int32(proc.Proc.P_pid)) - } - - return ret, nil -} - -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { - k, err := p.getKProc() - if err != nil { - return 0, err - } - - return k.Eproc.Ppid, nil -} - -func (p *Process) NameWithContext(ctx context.Context) (string, error) { - k, err := p.getKProc() - if err != nil { - return "", err - } - - name := common.ByteToString(k.Proc.P_comm[:]) - - if len(name) >= 15 { - cmdName, err := p.cmdNameWithContext(ctx) - if err != nil { - return "", err - } - if len(cmdName) > 0 { - extendedName := filepath.Base(cmdName) - if strings.HasPrefix(extendedName, p.name) { - name = extendedName - } - } - } - - return name, nil -} - -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { - k, err := p.getKProc() - if err != nil { - return 0, err - } - - return k.Proc.P_starttime.Sec*1000 + int64(k.Proc.P_starttime.Usec)/1000, nil -} - -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { - r, err := callPsWithContext(ctx, "state", p.Pid, false, false) - if err != nil { - return []string{""}, err - } - status := convertStatusChar(r[0][0][0:1]) - return []string{status}, err -} - -func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { - // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details - pid := p.Pid - out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid))) - if err != nil { - return false, err - } - return strings.IndexByte(string(out), '+') != -1, nil -} - -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { - k, err := p.getKProc() - if err != nil { - return nil, err - } - - // See: http://unix.superglobalmegacorp.com/Net2/newsrc/sys/ucred.h.html - userEffectiveUID := int32(k.Eproc.Ucred.Uid) - - return []int32{userEffectiveUID}, nil -} - -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { - k, err := p.getKProc() - if err != nil { - return nil, err - } - - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Eproc.Pcred.P_rgid), int32(k.Eproc.Pcred.P_rgid), int32(k.Eproc.Pcred.P_svgid)) - - return gids, nil -} - -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError - // k, err := p.getKProc() - // if err != nil { - // return nil, err - // } - - // groups := make([]int32, k.Eproc.Ucred.Ngroups) - // for i := int16(0); i < k.Eproc.Ucred.Ngroups; i++ { - // groups[i] = int32(k.Eproc.Ucred.Groups[i]) - // } - - // return groups, nil -} - -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError - /* - k, err := p.getKProc() - if err != nil { - return "", err - } - - ttyNr := uint64(k.Eproc.Tdev) - termmap, err := getTerminalMap() - if err != nil { - return "", err - } - - return termmap[ttyNr], nil - */ -} - -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { - k, err := p.getKProc() - if err != nil { - return 0, err - } - return int32(k.Proc.P_nice), nil -} - -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { - return nil, common.ErrNotImplementedError -} - -func convertCPUTimes(s string) (ret float64, err error) { - var t int - var _tmp string - if strings.Contains(s, ":") { - _t := strings.Split(s, ":") - switch len(_t) { - case 3: - hour, err := strconv.Atoi(_t[0]) - if err != nil { - return ret, err - } - t += hour * 60 * 60 * clockTicks - - mins, err := strconv.Atoi(_t[1]) - if err != nil { - return ret, err - } - t += mins * 60 * clockTicks - _tmp = _t[2] - case 2: - mins, err := strconv.Atoi(_t[0]) - if err != nil { - return ret, err - } - t += mins * 60 * clockTicks - _tmp = _t[1] - case 1, 0: - _tmp = s - default: - return ret, fmt.Errorf("wrong cpu time string") - } - } else { - _tmp = s - } - - _t := strings.Split(_tmp, ".") - if err != nil { - return ret, err - } - h, err := strconv.Atoi(_t[0]) - t += h * clockTicks - h, err = strconv.Atoi(_t[1]) - t += h - return float64(t) / float64(clockTicks), nil -} - -func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) - if err != nil { - return nil, err - } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) - if err != nil { - return nil, err - } - ret = append(ret, np) - } - return ret, nil -} - -func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { - return net.ConnectionsPidWithContext(ctx, "all", p.Pid) -} - -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) -} - -func ProcessesWithContext(ctx context.Context) ([]*Process, error) { - out := []*Process{} - - pids, err := PidsWithContext(ctx) - if err != nil { - return out, err - } - - for _, pid := range pids { - p, err := NewProcessWithContext(ctx, pid) - if err != nil { - continue - } - out = append(out, p) - } - - return out, nil -} - -// Returns a proc as defined here: -// http://unix.superglobalmegacorp.com/Net2/newsrc/sys/kinfo_proc.h.html -func (p *Process) getKProc() (*unix.KinfoProc, error) { - return unix.SysctlKinfoProc("kern.proc.pid", int(p.Pid)) -} - -// call ps command. -// Return value deletes Header line(you must not input wrong arg). -// And splited by Space. Caller have responsibility to manage. -// If passed arg pid is 0, get information from all process. -func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption bool, nameOption bool) ([][]string, error) { - var cmd []string - if pid == 0 { // will get from all processes. - cmd = []string{"-ax", "-o", arg} - } else if threadOption { - cmd = []string{"-x", "-o", arg, "-M", "-p", strconv.Itoa(int(pid))} - } else { - cmd = []string{"-x", "-o", arg, "-p", strconv.Itoa(int(pid))} - } - if nameOption { - cmd = append(cmd, "-c") - } - out, err := invoke.CommandWithContext(ctx, "ps", cmd...) - if err != nil { - return [][]string{}, err - } - lines := strings.Split(string(out), "\n") - - var ret [][]string - for _, l := range lines[1:] { - var lr []string - if nameOption { - lr = append(lr, l) - } else { - for _, r := range strings.Split(l, " ") { - if r == "" { - continue - } - lr = append(lr, strings.TrimSpace(r)) - } - } - if len(lr) != 0 { - ret = append(ret, lr) - } - } - - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go deleted file mode 100644 index 858f08e7a..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go +++ /dev/null @@ -1,222 +0,0 @@ -//go:build darwin && cgo -// +build darwin,cgo - -package process - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -import "C" - -import ( - "bytes" - "context" - "fmt" - "strings" - "syscall" - "unsafe" - - "github.com/shirou/gopsutil/v3/cpu" -) - -var ( - argMax int - timescaleToNanoSeconds float64 -) - -func init() { - argMax = getArgMax() - timescaleToNanoSeconds = getTimeScaleToNanoSeconds() -} - -func getArgMax() int { - var ( - mib = [...]C.int{C.CTL_KERN, C.KERN_ARGMAX} - argmax C.int - size C.size_t = C.ulong(unsafe.Sizeof(argmax)) - ) - retval := C.sysctl(&mib[0], 2, unsafe.Pointer(&argmax), &size, C.NULL, 0) - if retval == 0 { - return int(argmax) - } - return 0 -} - -func getTimeScaleToNanoSeconds() float64 { - var timeBaseInfo C.struct_mach_timebase_info - - C.mach_timebase_info(&timeBaseInfo) - - return float64(timeBaseInfo.numer) / float64(timeBaseInfo.denom) -} - -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - var c C.char // need a var for unsafe.Sizeof need a var - const bufsize = C.PROC_PIDPATHINFO_MAXSIZE * unsafe.Sizeof(c) - buffer := (*C.char)(C.malloc(C.size_t(bufsize))) - defer C.free(unsafe.Pointer(buffer)) - - ret, err := C.proc_pidpath(C.int(p.Pid), unsafe.Pointer(buffer), C.uint32_t(bufsize)) - if err != nil { - return "", err - } - if ret <= 0 { - return "", fmt.Errorf("unknown error: proc_pidpath returned %d", ret) - } - - return C.GoString(buffer), nil -} - -// CwdWithContext retrieves the Current Working Directory for the given process. -// It uses the proc_pidinfo from libproc and will only work for processes the -// EUID can access. Otherwise "operation not permitted" will be returned as the -// error. -// Note: This might also work for other *BSD OSs. -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - const vpiSize = C.sizeof_struct_proc_vnodepathinfo - vpi := (*C.struct_proc_vnodepathinfo)(C.malloc(vpiSize)) - defer C.free(unsafe.Pointer(vpi)) - ret, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDVNODEPATHINFO, 0, unsafe.Pointer(vpi), vpiSize) - if err != nil { - // fmt.Printf("ret: %d %T\n", ret, err) - if err == syscall.EPERM { - return "", ErrorNotPermitted - } - return "", err - } - if ret <= 0 { - return "", fmt.Errorf("unknown error: proc_pidinfo returned %d", ret) - } - if ret != C.sizeof_struct_proc_vnodepathinfo { - return "", fmt.Errorf("too few bytes; expected %d, got %d", vpiSize, ret) - } - return C.GoString(&vpi.pvi_cdir.vip_path[0]), err -} - -func procArgs(pid int32) ([]byte, int, error) { - var ( - mib = [...]C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)} - size C.size_t = C.ulong(argMax) - nargs C.int - result []byte - ) - procargs := (*C.char)(C.malloc(C.ulong(argMax))) - defer C.free(unsafe.Pointer(procargs)) - retval, err := C.sysctl(&mib[0], 3, unsafe.Pointer(procargs), &size, C.NULL, 0) - if retval == 0 { - C.memcpy(unsafe.Pointer(&nargs), unsafe.Pointer(procargs), C.sizeof_int) - result = C.GoBytes(unsafe.Pointer(procargs), C.int(size)) - // fmt.Printf("size: %d %d\n%s\n", size, nargs, hex.Dump(result)) - return result, int(nargs), nil - } - return nil, 0, err -} - -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { - return p.cmdlineSliceWithContext(ctx, true) -} - -func (p *Process) cmdlineSliceWithContext(ctx context.Context, fallback bool) ([]string, error) { - pargs, nargs, err := procArgs(p.Pid) - if err != nil { - return nil, err - } - // The first bytes hold the nargs int, skip it. - args := bytes.Split((pargs)[C.sizeof_int:], []byte{0}) - var argStr string - // The first element is the actual binary/command path. - // command := args[0] - var argSlice []string - // var envSlice []string - // All other, non-zero elements are arguments. The first "nargs" elements - // are the arguments. Everything else in the slice is then the environment - // of the process. - for _, arg := range args[1:] { - argStr = string(arg[:]) - if len(argStr) > 0 { - if nargs > 0 { - argSlice = append(argSlice, argStr) - nargs-- - continue - } - break - // envSlice = append(envSlice, argStr) - } - } - return argSlice, err -} - -// cmdNameWithContext returns the command name (including spaces) without any arguments -func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) { - r, err := p.cmdlineSliceWithContext(ctx, false) - if err != nil { - return "", err - } - - if len(r) == 0 { - return "", nil - } - - return r[0], err -} - -func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { - r, err := p.CmdlineSliceWithContext(ctx) - if err != nil { - return "", err - } - return strings.Join(r, " "), err -} - -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { - const tiSize = C.sizeof_struct_proc_taskinfo - ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) - defer C.free(unsafe.Pointer(ti)) - - _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) - if err != nil { - return 0, err - } - - return int32(ti.pti_threadnum), nil -} - -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { - const tiSize = C.sizeof_struct_proc_taskinfo - ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) - defer C.free(unsafe.Pointer(ti)) - - _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) - if err != nil { - return nil, err - } - - ret := &cpu.TimesStat{ - CPU: "cpu", - User: float64(ti.pti_total_user) * timescaleToNanoSeconds / 1e9, - System: float64(ti.pti_total_system) * timescaleToNanoSeconds / 1e9, - } - return ret, nil -} - -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { - const tiSize = C.sizeof_struct_proc_taskinfo - ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) - defer C.free(unsafe.Pointer(ti)) - - _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) - if err != nil { - return nil, err - } - - ret := &MemoryInfoStat{ - RSS: uint64(ti.pti_resident_size), - VMS: uint64(ti.pti_virtual_size), - Swap: uint64(ti.pti_pageins), - } - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go deleted file mode 100644 index bc1d357df..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go +++ /dev/null @@ -1,127 +0,0 @@ -//go:build darwin && !cgo -// +build darwin,!cgo - -package process - -import ( - "context" - "fmt" - "strconv" - "strings" - - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" -) - -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - out, err := invoke.CommandWithContext(ctx, "lsof", "-p", strconv.Itoa(int(p.Pid)), "-Fpfn") - if err != nil { - return "", fmt.Errorf("bad call to lsof: %s", err) - } - txtFound := 0 - lines := strings.Split(string(out), "\n") - for i := 1; i < len(lines); i++ { - if lines[i] == "ftxt" { - txtFound++ - if txtFound == 2 { - return lines[i-1][1:], nil - } - } - } - return "", fmt.Errorf("missing txt data returned by lsof") -} - -func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { - r, err := callPsWithContext(ctx, "command", p.Pid, false, false) - if err != nil { - return "", err - } - return strings.Join(r[0], " "), err -} - -func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) { - r, err := callPsWithContext(ctx, "command", p.Pid, false, true) - if err != nil { - return "", err - } - if len(r) > 0 && len(r[0]) > 0 { - return r[0][0], err - } - - return "", err -} - -// CmdlineSliceWithContext returns the command line arguments of the process as a slice with each -// element being an argument. Because of current deficiencies in the way that the command -// line arguments are found, single arguments that have spaces in the will actually be -// reported as two separate items. In order to do something better CGO would be needed -// to use the native darwin functions. -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { - r, err := callPsWithContext(ctx, "command", p.Pid, false, false) - if err != nil { - return nil, err - } - return r[0], err -} - -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { - r, err := callPsWithContext(ctx, "utime,stime", p.Pid, true, false) - if err != nil { - return 0, err - } - return int32(len(r)), nil -} - -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { - r, err := callPsWithContext(ctx, "utime,stime", p.Pid, false, false) - if err != nil { - return nil, err - } - - utime, err := convertCPUTimes(r[0][0]) - if err != nil { - return nil, err - } - stime, err := convertCPUTimes(r[0][1]) - if err != nil { - return nil, err - } - - ret := &cpu.TimesStat{ - CPU: "cpu", - User: utime, - System: stime, - } - return ret, nil -} - -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { - r, err := callPsWithContext(ctx, "rss,vsize,pagein", p.Pid, false, false) - if err != nil { - return nil, err - } - rss, err := strconv.Atoi(r[0][0]) - if err != nil { - return nil, err - } - vms, err := strconv.Atoi(r[0][1]) - if err != nil { - return nil, err - } - pagein, err := strconv.Atoi(r[0][2]) - if err != nil { - return nil, err - } - - ret := &MemoryInfoStat{ - RSS: uint64(rss) * 1024, - VMS: uint64(vms) * 1024, - Swap: uint64(pagein), - } - - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go b/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go deleted file mode 100644 index 1a5d0c4b4..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go +++ /dev/null @@ -1,203 +0,0 @@ -//go:build !darwin && !linux && !freebsd && !openbsd && !windows && !solaris && !plan9 -// +build !darwin,!linux,!freebsd,!openbsd,!windows,!solaris,!plan9 - -package process - -import ( - "context" - "syscall" - - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" -) - -type Signal = syscall.Signal - -type MemoryMapsStat struct { - Path string `json:"path"` - Rss uint64 `json:"rss"` - Size uint64 `json:"size"` - Pss uint64 `json:"pss"` - SharedClean uint64 `json:"sharedClean"` - SharedDirty uint64 `json:"sharedDirty"` - PrivateClean uint64 `json:"privateClean"` - PrivateDirty uint64 `json:"privateDirty"` - Referenced uint64 `json:"referenced"` - Anonymous uint64 `json:"anonymous"` - Swap uint64 `json:"swap"` -} - -type MemoryInfoExStat struct{} - -func pidsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func ProcessesWithContext(ctx context.Context) ([]*Process, error) { - return nil, common.ErrNotImplementedError -} - -func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { - return false, common.ErrNotImplementedError -} - -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) NameWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { - return []string{""}, common.ErrNotImplementedError -} - -func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { - return false, common.ErrNotImplementedError -} - -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) SendSignalWithContext(ctx context.Context, sig Signal) error { - return common.ErrNotImplementedError -} - -func (p *Process) SuspendWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) ResumeWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) TerminateWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) KillWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { - return nil, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go deleted file mode 100644 index 560e627d2..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go +++ /dev/null @@ -1,192 +0,0 @@ -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_freebsd.go - -package process - -const ( - CTLKern = 1 - KernProc = 14 - KernProcPID = 1 - KernProcProc = 8 - KernProcPathname = 12 - KernProcArgs = 7 -) - -const ( - sizeofPtr = 0x8 - sizeofShort = 0x2 - sizeofInt = 0x4 - sizeofLong = 0x8 - sizeofLongLong = 0x8 -) - -const ( - sizeOfKinfoVmentry = 0x488 - sizeOfKinfoProc = 0x440 -) - -const ( - SIDL = 1 - SRUN = 2 - SSLEEP = 3 - SSTOP = 4 - SZOMB = 5 - SWAIT = 6 - SLOCK = 7 -) - -type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 -) - -type Timespec struct { - Sec int64 - Nsec int64 -} - -type Timeval struct { - Sec int64 - Usec int64 -} - -type Rusage struct { - Utime Timeval - Stime Timeval - Maxrss int64 - Ixrss int64 - Idrss int64 - Isrss int64 - Minflt int64 - Majflt int64 - Nswap int64 - Inblock int64 - Oublock int64 - Msgsnd int64 - Msgrcv int64 - Nsignals int64 - Nvcsw int64 - Nivcsw int64 -} - -type Rlimit struct { - Cur int64 - Max int64 -} - -type KinfoProc struct { - Structsize int32 - Layout int32 - Args int64 /* pargs */ - Paddr int64 /* proc */ - Addr int64 /* user */ - Tracep int64 /* vnode */ - Textvp int64 /* vnode */ - Fd int64 /* filedesc */ - Vmspace int64 /* vmspace */ - Wchan int64 - Pid int32 - Ppid int32 - Pgid int32 - Tpgid int32 - Sid int32 - Tsid int32 - Jobc int16 - Spare_short1 int16 - Tdev uint32 - Siglist [16]byte /* sigset */ - Sigmask [16]byte /* sigset */ - Sigignore [16]byte /* sigset */ - Sigcatch [16]byte /* sigset */ - Uid uint32 - Ruid uint32 - Svuid uint32 - Rgid uint32 - Svgid uint32 - Ngroups int16 - Spare_short2 int16 - Groups [16]uint32 - Size uint64 - Rssize int64 - Swrss int64 - Tsize int64 - Dsize int64 - Ssize int64 - Xstat uint16 - Acflag uint16 - Pctcpu uint32 - Estcpu uint32 - Slptime uint32 - Swtime uint32 - Cow uint32 - Runtime uint64 - Start Timeval - Childtime Timeval - Flag int64 - Kiflag int64 - Traceflag int32 - Stat int8 - Nice int8 - Lock int8 - Rqindex int8 - Oncpu uint8 - Lastcpu uint8 - Tdname [17]int8 - Wmesg [9]int8 - Login [18]int8 - Lockname [9]int8 - Comm [20]int8 - Emul [17]int8 - Loginclass [18]int8 - Sparestrings [50]int8 - Spareints [7]int32 - Flag2 int32 - Fibnum int32 - Cr_flags uint32 - Jid int32 - Numthreads int32 - Tid int32 - Pri Priority - Rusage Rusage - Rusage_ch Rusage - Pcb int64 /* pcb */ - Kstack int64 - Udata int64 - Tdaddr int64 /* thread */ - Spareptrs [6]int64 - Sparelongs [12]int64 - Sflag int64 - Tdflags int64 -} - -type Priority struct { - Class uint8 - Level uint8 - Native uint8 - User uint8 -} - -type KinfoVmentry struct { - Structsize int32 - Type int32 - Start uint64 - End uint64 - Offset uint64 - Vn_fileid uint64 - Vn_fsid uint32 - Flags int32 - Resident int32 - Private_resident int32 - Protection int32 - Ref_count int32 - Shadow_count int32 - Vn_type int32 - Vn_size uint64 - Vn_rdev uint32 - Vn_mode uint16 - Status uint16 - X_kve_ispare [12]int32 - Path [1024]int8 -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go b/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go deleted file mode 100644 index bc4bc062a..000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go +++ /dev/null @@ -1,203 +0,0 @@ -//go:build plan9 -// +build plan9 - -package process - -import ( - "context" - "syscall" - - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" -) - -type Signal = syscall.Note - -type MemoryMapsStat struct { - Path string `json:"path"` - Rss uint64 `json:"rss"` - Size uint64 `json:"size"` - Pss uint64 `json:"pss"` - SharedClean uint64 `json:"sharedClean"` - SharedDirty uint64 `json:"sharedDirty"` - PrivateClean uint64 `json:"privateClean"` - PrivateDirty uint64 `json:"privateDirty"` - Referenced uint64 `json:"referenced"` - Anonymous uint64 `json:"anonymous"` - Swap uint64 `json:"swap"` -} - -type MemoryInfoExStat struct{} - -func pidsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func ProcessesWithContext(ctx context.Context) ([]*Process, error) { - return nil, common.ErrNotImplementedError -} - -func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { - return false, common.ErrNotImplementedError -} - -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) NameWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { - return []string{""}, common.ErrNotImplementedError -} - -func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { - return false, common.ErrNotImplementedError -} - -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError -} - -func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { - return nil, common.ErrNotImplementedError -} - -func (p *Process) SendSignalWithContext(ctx context.Context, sig Signal) error { - return common.ErrNotImplementedError -} - -func (p *Process) SuspendWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) ResumeWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) TerminateWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) KillWithContext(ctx context.Context) error { - return common.ErrNotImplementedError -} - -func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { - return nil, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/LICENSE b/vendor/github.com/shirou/gopsutil/v4/LICENSE similarity index 100% rename from vendor/github.com/shirou/gopsutil/v3/LICENSE rename to vendor/github.com/shirou/gopsutil/v4/LICENSE diff --git a/vendor/github.com/shirou/gopsutil/v3/common/env.go b/vendor/github.com/shirou/gopsutil/v4/common/env.go similarity index 51% rename from vendor/github.com/shirou/gopsutil/v3/common/env.go rename to vendor/github.com/shirou/gopsutil/v4/common/env.go index 4b5f4980c..47e471c40 100644 --- a/vendor/github.com/shirou/gopsutil/v3/common/env.go +++ b/vendor/github.com/shirou/gopsutil/v4/common/env.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common type EnvKeyType string @@ -11,13 +12,14 @@ type EnvKeyType string var EnvKey = EnvKeyType("env") const ( - HostProcEnvKey EnvKeyType = "HOST_PROC" - HostSysEnvKey EnvKeyType = "HOST_SYS" - HostEtcEnvKey EnvKeyType = "HOST_ETC" - HostVarEnvKey EnvKeyType = "HOST_VAR" - HostRunEnvKey EnvKeyType = "HOST_RUN" - HostDevEnvKey EnvKeyType = "HOST_DEV" - HostRootEnvKey EnvKeyType = "HOST_ROOT" + HostProcEnvKey EnvKeyType = "HOST_PROC" + HostSysEnvKey EnvKeyType = "HOST_SYS" + HostEtcEnvKey EnvKeyType = "HOST_ETC" + HostVarEnvKey EnvKeyType = "HOST_VAR" + HostRunEnvKey EnvKeyType = "HOST_RUN" + HostDevEnvKey EnvKeyType = "HOST_DEV" + HostRootEnvKey EnvKeyType = "HOST_ROOT" + HostProcMountinfo EnvKeyType = "HOST_PROC_MOUNTINFO" ) type EnvMap map[EnvKeyType]string diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go index 83bc23d45..9bc3dfb51 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go @@ -1,8 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( "context" "encoding/json" + "errors" "fmt" "math" "runtime" @@ -11,7 +13,7 @@ import ( "sync" "time" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) // TimesStat contains the amounts of time the CPU has spent performing different @@ -194,7 +196,7 @@ func percentUsedFromLastCallWithContext(ctx context.Context, percpu bool) ([]flo } if lastTimes == nil { - return nil, fmt.Errorf("error getting times for cpu percent. lastTimes was nil") + return nil, errors.New("error getting times for cpu percent. lastTimes was nil") } return calculateAllBusy(lastTimes, cpuTimes) } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go similarity index 85% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go index 1439d1d79..bc766bd4f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package cpu diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go index 9c1e70b17..559dc5fea 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package cpu diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go new file mode 100644 index 000000000..981e32e51 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix && !cgo + +package cpu + +import ( + "context" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + var ret []TimesStat + if percpu { + perOut, err := invoke.CommandWithContext(ctx, "sar", "-u", "-P", "ALL", "10", "1") + if err != nil { + return nil, err + } + lines := strings.Split(string(perOut), "\n") + if len(lines) < 6 { + return []TimesStat{}, common.ErrNotImplementedError + } + + hp := strings.Fields(lines[5]) // headers + for l := 6; l < len(lines)-1; l++ { + ct := &TimesStat{} + v := strings.Fields(lines[l]) // values + for i, header := range hp { + // We're done in any of these use cases + if i >= len(v) || v[0] == "-" { + break + } + + // Position variable for v + pos := i + // There is a missing field at the beginning of all but the first line + // so adjust the position + if l > 6 { + pos = i - 1 + } + // We don't want invalid positions + if pos < 0 { + continue + } + + if t, err := strconv.ParseFloat(v[pos], 64); err == nil { + switch header { + case `cpu`: + ct.CPU = strconv.FormatFloat(t, 'f', -1, 64) + case `%usr`: + ct.User = t + case `%sys`: + ct.System = t + case `%wio`: + ct.Iowait = t + case `%idle`: + ct.Idle = t + } + } + } + // Valid CPU data, so append it + ret = append(ret, *ct) + } + } else { + out, err := invoke.CommandWithContext(ctx, "sar", "-u", "10", "1") + if err != nil { + return nil, err + } + lines := strings.Split(string(out), "\n") + if len(lines) < 5 { + return []TimesStat{}, common.ErrNotImplementedError + } + + ct := &TimesStat{CPU: "cpu-total"} + h := strings.Fields(lines[len(lines)-3]) // headers + v := strings.Fields(lines[len(lines)-2]) // values + for i, header := range h { + if t, err := strconv.ParseFloat(v[i], 64); err == nil { + switch header { + case `%usr`: + ct.User = t + case `%sys`: + ct.System = t + case `%wio`: + ct.Iowait = t + case `%idle`: + ct.Idle = t + } + } + } + + ret = append(ret, *ct) + } + + return ret, nil +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + out, err := invoke.CommandWithContext(ctx, "prtconf") + if err != nil { + return nil, err + } + + ret := InfoStat{} + for _, line := range strings.Split(string(out), "\n") { + switch { + case strings.HasPrefix(line, "Number Of Processors:"): + p := strings.Fields(line) + if len(p) > 3 { + if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { + ret.Cores = int32(t) + } + } + case strings.HasPrefix(line, "Processor Clock Speed:"): + p := strings.Fields(line) + if len(p) > 4 { + if t, err := strconv.ParseFloat(p[3], 64); err == nil { + switch strings.ToUpper(p[4]) { + case "MHZ": + ret.Mhz = t + case "GHZ": + ret.Mhz = t * 1000.0 + case "KHZ": + ret.Mhz = t / 1000.0 + default: + ret.Mhz = t + } + } + } + case strings.HasPrefix(line, "System Model:"): + p := strings.Split(string(line), ":") + if p != nil { + ret.VendorID = strings.TrimSpace(p[1]) + } + case strings.HasPrefix(line, "Processor Type:"): + p := strings.Split(string(line), ":") + if p != nil { + c := strings.Split(string(p[1]), "_") + if c != nil { + ret.Family = strings.TrimSpace(c[0]) + ret.Model = strings.TrimSpace(c[1]) + } + } + } + } + return []InfoStat{ret}, nil +} + +func CountsWithContext(ctx context.Context, _ bool) (int, error) { + info, err := InfoWithContext(ctx) + if err == nil { + return int(info[0].Cores), nil + } + return 0, err +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go new file mode 100644 index 000000000..c61a470fb --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package cpu + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "unsafe" + + "github.com/tklauser/go-sysconf" + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +// sys/resource.h +const ( + CPUser = 0 + cpNice = 1 + cpSys = 2 + cpIntr = 3 + cpIdle = 4 + cpUStates = 5 +) + +// mach/machine.h +const ( + cpuStateUser = 0 + cpuStateSystem = 1 + cpuStateIdle = 2 + cpuStateNice = 3 + cpuStateMax = 4 +) + +// mach/processor_info.h +const ( + processorCpuLoadInfo = 2 //nolint:revive //FIXME +) + +type hostCpuLoadInfoData struct { //nolint:revive //FIXME + cpuTicks [cpuStateMax]uint32 +} + +// default value. from time.h +var ClocksPerSec = float64(128) + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + ClocksPerSec = float64(clkTck) + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(_ context.Context, percpu bool) ([]TimesStat, error) { + lib, err := common.NewLibrary(common.System) + if err != nil { + return nil, err + } + defer lib.Close() + + if percpu { + return perCPUTimes(lib) + } + + return allCPUTimes(lib) +} + +// Returns only one CPUInfoStat on FreeBSD +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(_ context.Context) ([]InfoStat, error) { + var ret []InfoStat + + c := InfoStat{} + c.ModelName, _ = unix.Sysctl("machdep.cpu.brand_string") + family, _ := unix.SysctlUint32("machdep.cpu.family") + c.Family = strconv.FormatUint(uint64(family), 10) + model, _ := unix.SysctlUint32("machdep.cpu.model") + c.Model = strconv.FormatUint(uint64(model), 10) + stepping, _ := unix.SysctlUint32("machdep.cpu.stepping") + c.Stepping = int32(stepping) + features, err := unix.Sysctl("machdep.cpu.features") + if err == nil { + for _, v := range strings.Fields(features) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + leaf7Features, err := unix.Sysctl("machdep.cpu.leaf7_features") + if err == nil { + for _, v := range strings.Fields(leaf7Features) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + extfeatures, err := unix.Sysctl("machdep.cpu.extfeatures") + if err == nil { + for _, v := range strings.Fields(extfeatures) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + cores, _ := unix.SysctlUint32("machdep.cpu.core_count") + c.Cores = int32(cores) + cacheSize, _ := unix.SysctlUint32("machdep.cpu.cache.size") + c.CacheSize = int32(cacheSize) + c.VendorID, _ = unix.Sysctl("machdep.cpu.vendor") + + v, err := getFrequency() + if err == nil { + c.Mhz = v + } + + return append(ret, c), nil +} + +func CountsWithContext(_ context.Context, logical bool) (int, error) { + var cpuArgument string + if logical { + cpuArgument = "hw.logicalcpu" + } else { + cpuArgument = "hw.physicalcpu" + } + + count, err := unix.SysctlUint32(cpuArgument) + if err != nil { + return 0, err + } + + return int(count), nil +} + +func perCPUTimes(machLib *common.Library) ([]TimesStat, error) { + machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) + machTaskSelf := common.GetFunc[common.MachTaskSelfFunc](machLib, common.MachTaskSelfSym) + hostProcessorInfo := common.GetFunc[common.HostProcessorInfoFunc](machLib, common.HostProcessorInfoSym) + vmDeallocate := common.GetFunc[common.VMDeallocateFunc](machLib, common.VMDeallocateSym) + + var count, ncpu uint32 + var cpuload *hostCpuLoadInfoData + + status := hostProcessorInfo(machHostSelf(), processorCpuLoadInfo, &ncpu, uintptr(unsafe.Pointer(&cpuload)), &count) + + if status != common.KERN_SUCCESS { + return nil, fmt.Errorf("host_processor_info error=%d", status) + } + + if cpuload == nil { + return nil, errors.New("host_processor_info returned nil cpuload") + } + + defer vmDeallocate(machTaskSelf(), uintptr(unsafe.Pointer(cpuload)), uintptr(ncpu)) + + ret := []TimesStat{} + loads := unsafe.Slice(cpuload, ncpu) + + for i := 0; i < int(ncpu); i++ { + c := TimesStat{ + CPU: fmt.Sprintf("cpu%d", i), + User: float64(loads[i].cpuTicks[cpuStateUser]) / ClocksPerSec, + System: float64(loads[i].cpuTicks[cpuStateSystem]) / ClocksPerSec, + Nice: float64(loads[i].cpuTicks[cpuStateNice]) / ClocksPerSec, + Idle: float64(loads[i].cpuTicks[cpuStateIdle]) / ClocksPerSec, + } + + ret = append(ret, c) + } + + return ret, nil +} + +func allCPUTimes(machLib *common.Library) ([]TimesStat, error) { + machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) + hostStatistics := common.GetFunc[common.HostStatisticsFunc](machLib, common.HostStatisticsSym) + + var cpuload hostCpuLoadInfoData + count := uint32(cpuStateMax) + + status := hostStatistics(machHostSelf(), common.HOST_CPU_LOAD_INFO, + uintptr(unsafe.Pointer(&cpuload)), &count) + + if status != common.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics error=%d", status) + } + + c := TimesStat{ + CPU: "cpu-total", + User: float64(cpuload.cpuTicks[cpuStateUser]) / ClocksPerSec, + System: float64(cpuload.cpuTicks[cpuStateSystem]) / ClocksPerSec, + Nice: float64(cpuload.cpuTicks[cpuStateNice]) / ClocksPerSec, + Idle: float64(cpuload.cpuTicks[cpuStateIdle]) / ClocksPerSec, + } + + return []TimesStat{c}, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go new file mode 100644 index 000000000..8e69d7cb1 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && arm64 + +package cpu + +import ( + "encoding/binary" + "fmt" + "unsafe" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +// https://github.com/shoenig/go-m1cpu/blob/v0.1.6/cpu.go +func getFrequency() (float64, error) { + ioKit, err := common.NewLibrary(common.IOKit) + if err != nil { + return 0, err + } + defer ioKit.Close() + + coreFoundation, err := common.NewLibrary(common.CoreFoundation) + if err != nil { + return 0, err + } + defer coreFoundation.Close() + + ioServiceMatching := common.GetFunc[common.IOServiceMatchingFunc](ioKit, common.IOServiceMatchingSym) + ioServiceGetMatchingServices := common.GetFunc[common.IOServiceGetMatchingServicesFunc](ioKit, common.IOServiceGetMatchingServicesSym) + ioIteratorNext := common.GetFunc[common.IOIteratorNextFunc](ioKit, common.IOIteratorNextSym) + ioRegistryEntryGetName := common.GetFunc[common.IORegistryEntryGetNameFunc](ioKit, common.IORegistryEntryGetNameSym) + ioRegistryEntryCreateCFProperty := common.GetFunc[common.IORegistryEntryCreateCFPropertyFunc](ioKit, common.IORegistryEntryCreateCFPropertySym) + ioObjectRelease := common.GetFunc[common.IOObjectReleaseFunc](ioKit, common.IOObjectReleaseSym) + + cfStringCreateWithCString := common.GetFunc[common.CFStringCreateWithCStringFunc](coreFoundation, common.CFStringCreateWithCStringSym) + cfDataGetLength := common.GetFunc[common.CFDataGetLengthFunc](coreFoundation, common.CFDataGetLengthSym) + cfDataGetBytePtr := common.GetFunc[common.CFDataGetBytePtrFunc](coreFoundation, common.CFDataGetBytePtrSym) + cfRelease := common.GetFunc[common.CFReleaseFunc](coreFoundation, common.CFReleaseSym) + + matching := ioServiceMatching("AppleARMIODevice") + + var iterator uint32 + if status := ioServiceGetMatchingServices(common.KIOMainPortDefault, uintptr(matching), &iterator); status != common.KERN_SUCCESS { + return 0.0, fmt.Errorf("IOServiceGetMatchingServices error=%d", status) + } + defer ioObjectRelease(iterator) + + pCorekey := cfStringCreateWithCString(common.KCFAllocatorDefault, "voltage-states5-sram", common.KCFStringEncodingUTF8) + defer cfRelease(uintptr(pCorekey)) + + var pCoreHz uint32 + for { + service := ioIteratorNext(iterator) + if service <= 0 { + break + } + + buf := common.NewCStr(512) + ioRegistryEntryGetName(service, buf) + + if buf.GoString() == "pmgr" { + pCoreRef := ioRegistryEntryCreateCFProperty(service, uintptr(pCorekey), common.KCFAllocatorDefault, common.KNilOptions) + length := cfDataGetLength(uintptr(pCoreRef)) + data := cfDataGetBytePtr(uintptr(pCoreRef)) + + // composite uint32 from the byte array + buf := unsafe.Slice((*byte)(data), length) + + // combine the bytes into a uint32 value + b := buf[length-8 : length-4] + pCoreHz = binary.LittleEndian.Uint32(b) + ioObjectRelease(service) + break + } + + ioObjectRelease(service) + } + + return float64(pCoreHz / 1_000_000), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go new file mode 100644 index 000000000..b9e52aba1 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && !arm64 + +package cpu + +import "golang.org/x/sys/unix" + +func getFrequency() (float64, error) { + // Use the rated frequency of the CPU. This is a static value and does not + // account for low power or Turbo Boost modes. + cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency") + return float64(cpuFrequency) / 1000000.0, err +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go similarity index 91% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go index fef53e5dc..8232c483c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( @@ -10,9 +11,10 @@ import ( "strings" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) var ( @@ -49,7 +51,7 @@ func Times(percpu bool) ([]TimesStat, error) { return TimesWithContext(context.Background(), percpu) } -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { +func TimesWithContext(_ context.Context, percpu bool) ([]TimesStat, error) { if percpu { buf, err := unix.SysctlRaw("kern.cp_times") if err != nil { @@ -90,7 +92,7 @@ func Info() ([]InfoStat, error) { return InfoWithContext(context.Background()) } -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { +func InfoWithContext(_ context.Context) ([]InfoStat, error) { const dmesgBoot = "/var/run/dmesg.boot" c, err := parseDmesgBoot(dmesgBoot) @@ -134,7 +136,7 @@ func parseDmesgBoot(fileName string) (InfoStat, error) { c.VendorID = matches[1] t, err := strconv.ParseInt(matches[2], 10, 32) if err != nil { - return c, fmt.Errorf("unable to parse DragonflyBSD CPU stepping information from %q: %v", line, err) + return c, fmt.Errorf("unable to parse DragonflyBSD CPU stepping information from %q: %w", line, err) } c.Stepping = int32(t) } else if matches := featuresMatch.FindStringSubmatch(line); matches != nil { @@ -151,6 +153,6 @@ func parseDmesgBoot(fileName string) (InfoStat, error) { return c, nil } -func CountsWithContext(ctx context.Context, logical bool) (int, error) { +func CountsWithContext(_ context.Context, _ bool) (int, error) { return runtime.NumCPU(), nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go index 57e14528d..25ececa68 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go index 089f603c8..245c1ec98 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !solaris && !windows && !dragonfly && !plan9 && !aix -// +build !darwin,!linux,!freebsd,!openbsd,!netbsd,!solaris,!windows,!dragonfly,!plan9,!aix package cpu @@ -7,7 +7,7 @@ import ( "context" "runtime" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func Times(percpu bool) ([]TimesStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go similarity index 91% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go index d3f47353c..107b574f8 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( @@ -10,9 +11,10 @@ import ( "strings" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) var ( @@ -50,7 +52,7 @@ func Times(percpu bool) ([]TimesStat, error) { return TimesWithContext(context.Background(), percpu) } -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { +func TimesWithContext(_ context.Context, percpu bool) ([]TimesStat, error) { if percpu { buf, err := unix.SysctlRaw("kern.cp_times") if err != nil { @@ -91,7 +93,7 @@ func Info() ([]InfoStat, error) { return InfoWithContext(context.Background()) } -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { +func InfoWithContext(_ context.Context) ([]InfoStat, error) { const dmesgBoot = "/var/run/dmesg.boot" c, num, err := parseDmesgBoot(dmesgBoot) @@ -135,7 +137,7 @@ func parseDmesgBoot(fileName string) (InfoStat, int, error) { c.Model = matches[4] t, err := strconv.ParseInt(matches[5], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU stepping information from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU stepping information from %q: %w", line, err) } c.Stepping = int32(t) } else if matches := featuresMatch.FindStringSubmatch(line); matches != nil { @@ -149,12 +151,12 @@ func parseDmesgBoot(fileName string) (InfoStat, int, error) { } else if matches := cpuCores.FindStringSubmatch(line); matches != nil { t, err := strconv.ParseInt(matches[1], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU Nums from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU Nums from %q: %w", line, err) } cpuNum = int(t) t2, err := strconv.ParseInt(matches[2], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU cores from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU cores from %q: %w", line, err) } c.Cores = int32(t2) } @@ -163,6 +165,6 @@ func parseDmesgBoot(fileName string) (InfoStat, int, error) { return c, cpuNum, nil } -func CountsWithContext(ctx context.Context, logical bool) (int, error) { +func CountsWithContext(_ context.Context, _ bool) (int, error) { return runtime.NumCPU(), nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go index 8b7f4c321..e4799bcf5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go index 57e14528d..25ececa68 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go index 8b7f4c321..e4799bcf5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go index 57e14528d..25ececa68 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go index da467e2dd..c6ec17e9c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package cpu @@ -13,7 +13,7 @@ import ( "github.com/tklauser/go-sysconf" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var ClocksPerSec = float64(100) @@ -135,7 +135,7 @@ func finishCPUInfo(ctx context.Context, c *InfoStat) { var err error var value float64 - if len(c.CoreID) == 0 { + if c.CoreID == "" { lines, err = common.ReadLines(sysCPUPath(ctx, c.CPU, "topology/core_id")) if err == nil { c.CoreID = lines[0] @@ -157,7 +157,7 @@ func finishCPUInfo(ctx context.Context, c *InfoStat) { } c.Mhz = value / 1000.0 // value is in kHz if c.Mhz > 9999 { - c.Mhz = c.Mhz / 1000.0 // value in Hz + c.Mhz /= 1000.0 // value in Hz } } @@ -395,7 +395,7 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) { for _, line := range lines { line = strings.ToLower(line) if strings.HasPrefix(line, "processor") { - _, err = strconv.Atoi(strings.TrimSpace(line[strings.IndexByte(line, ':')+1:])) + _, err = strconv.ParseInt(strings.TrimSpace(line[strings.IndexByte(line, ':')+1:]), 10, 32) if err == nil { ret++ } @@ -464,11 +464,11 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) { } fields[0] = strings.TrimSpace(fields[0]) if fields[0] == "physical id" || fields[0] == "cpu cores" { - val, err := strconv.Atoi(strings.TrimSpace(fields[1])) + val, err := strconv.ParseInt(strings.TrimSpace(fields[1]), 10, 32) if err != nil { continue } - currentInfo[fields[0]] = val + currentInfo[fields[0]] = int(val) } } ret := 0 diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go similarity index 87% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go index 1f66be342..a1dc14d21 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package cpu @@ -9,9 +9,10 @@ import ( "runtime" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) const ( @@ -35,7 +36,7 @@ func Times(percpu bool) ([]TimesStat, error) { return TimesWithContext(context.Background(), percpu) } -func TimesWithContext(ctx context.Context, percpu bool) (ret []TimesStat, err error) { +func TimesWithContext(_ context.Context, percpu bool) (ret []TimesStat, err error) { if !percpu { mib := []int32{ctlKern, kernCpTime} buf, _, err := common.CallSyscall(mib) @@ -56,7 +57,7 @@ func TimesWithContext(ctx context.Context, percpu bool) (ret []TimesStat, err er ncpu, err := unix.SysctlUint32("hw.ncpu") if err != nil { - return + return ret, err } var i uint32 @@ -86,7 +87,7 @@ func Info() ([]InfoStat, error) { return InfoWithContext(context.Background()) } -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { +func InfoWithContext(_ context.Context) ([]InfoStat, error) { var ret []InfoStat var err error @@ -114,6 +115,6 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { return append(ret, c), nil } -func CountsWithContext(ctx context.Context, logical bool) (int, error) { +func CountsWithContext(_ context.Context, _ bool) (int, error) { return runtime.NumCPU(), nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go index 57e14528d..25ececa68 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go new file mode 100644 index 000000000..e4799bcf5 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go index 57e14528d..25ececa68 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go similarity index 88% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go index fe3329030..4ab02d03b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package cpu @@ -9,9 +9,10 @@ import ( "runtime" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) const ( @@ -53,7 +54,7 @@ func Times(percpu bool) ([]TimesStat, error) { return TimesWithContext(context.Background(), percpu) } -func TimesWithContext(ctx context.Context, percpu bool) (ret []TimesStat, err error) { +func TimesWithContext(_ context.Context, percpu bool) (ret []TimesStat, err error) { if !percpu { mib := []int32{ctlKern, kernCpTime} buf, _, err := common.CallSyscall(mib) @@ -74,7 +75,7 @@ func TimesWithContext(ctx context.Context, percpu bool) (ret []TimesStat, err er ncpu, err := unix.SysctlUint32("hw.ncpu") if err != nil { - return + return ret, err } var i uint32 @@ -107,7 +108,7 @@ func Info() ([]InfoStat, error) { return InfoWithContext(context.Background()) } -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { +func InfoWithContext(_ context.Context) ([]InfoStat, error) { var ret []InfoStat var err error @@ -132,6 +133,6 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { return append(ret, c), nil } -func CountsWithContext(ctx context.Context, logical bool) (int, error) { +func CountsWithContext(_ context.Context, _ bool) (int, error) { return runtime.NumCPU(), nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go index 5e878399a..40a6f43e4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go index d659058cd..464156d54 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go index 5e878399a..40a6f43e4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go index d659058cd..464156d54 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go new file mode 100644 index 000000000..464156d54 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Spin uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go index a2e99d8c0..02ad3f747 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build plan9 -// +build plan9 package cpu @@ -9,14 +9,15 @@ import ( "runtime" stats "github.com/lufia/plan9stats" - "github.com/shirou/gopsutil/v3/internal/common" + + "github.com/shirou/gopsutil/v4/internal/common" ) func Times(percpu bool) ([]TimesStat, error) { return TimesWithContext(context.Background(), percpu) } -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { +func TimesWithContext(ctx context.Context, _ bool) ([]TimesStat, error) { // BUG: percpu flag is not supported yet. root := os.Getenv("HOST_ROOT") c, err := stats.ReadCPUType(ctx, stats.WithRootDir(root)) @@ -41,10 +42,10 @@ func Info() ([]InfoStat, error) { return InfoWithContext(context.Background()) } -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { +func InfoWithContext(_ context.Context) ([]InfoStat, error) { return []InfoStat{}, common.ErrNotImplementedError } -func CountsWithContext(ctx context.Context, logical bool) (int, error) { +func CountsWithContext(_ context.Context, _ bool) (int, error) { return runtime.NumCPU(), nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go similarity index 84% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go index 4231ad168..9494e3c3f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( @@ -41,7 +42,7 @@ var kstatSplit = regexp.MustCompile(`[:\s]+`) func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { kstatSysOut, err := invoke.CommandWithContext(ctx, "kstat", "-p", "cpu_stat:*:*:/^idle$|^user$|^kernel$|^iowait$|^swap$/") if err != nil { - return nil, fmt.Errorf("cannot execute kstat: %s", err) + return nil, fmt.Errorf("cannot execute kstat: %w", err) } cpu := make(map[float64]float64) idle := make(map[float64]float64) @@ -56,31 +57,31 @@ func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { } cpuNumber, err := strconv.ParseFloat(fields[1], 64) if err != nil { - return nil, fmt.Errorf("cannot parse cpu number: %s", err) + return nil, fmt.Errorf("cannot parse cpu number: %w", err) } cpu[cpuNumber] = cpuNumber switch fields[3] { case "idle": idle[cpuNumber], err = strconv.ParseFloat(fields[4], 64) if err != nil { - return nil, fmt.Errorf("cannot parse idle: %s", err) + return nil, fmt.Errorf("cannot parse idle: %w", err) } case "user": user[cpuNumber], err = strconv.ParseFloat(fields[4], 64) if err != nil { - return nil, fmt.Errorf("cannot parse user: %s", err) + return nil, fmt.Errorf("cannot parse user: %w", err) } case "kernel": kern[cpuNumber], err = strconv.ParseFloat(fields[4], 64) if err != nil { - return nil, fmt.Errorf("cannot parse kernel: %s", err) + return nil, fmt.Errorf("cannot parse kernel: %w", err) } case "iowait": iowt[cpuNumber], err = strconv.ParseFloat(fields[4], 64) if err != nil { - return nil, fmt.Errorf("cannot parse iowait: %s", err) + return nil, fmt.Errorf("cannot parse iowait: %w", err) } - //not sure how this translates, don't report, add to kernel, something else? + // not sure how this translates, don't report, add to kernel, something else? /*case "swap": swap[cpuNumber], err = strconv.ParseFloat(fields[4], 64) if err != nil { @@ -120,27 +121,27 @@ func Info() ([]InfoStat, error) { func InfoWithContext(ctx context.Context) ([]InfoStat, error) { psrInfoOut, err := invoke.CommandWithContext(ctx, "psrinfo", "-p", "-v") if err != nil { - return nil, fmt.Errorf("cannot execute psrinfo: %s", err) + return nil, fmt.Errorf("cannot execute psrinfo: %w", err) } procs, err := parseProcessorInfo(string(psrInfoOut)) if err != nil { - return nil, fmt.Errorf("error parsing psrinfo output: %s", err) + return nil, fmt.Errorf("error parsing psrinfo output: %w", err) } isaInfoOut, err := invoke.CommandWithContext(ctx, "isainfo", "-b", "-v") if err != nil { - return nil, fmt.Errorf("cannot execute isainfo: %s", err) + return nil, fmt.Errorf("cannot execute isainfo: %w", err) } flags, err := parseISAInfo(string(isaInfoOut)) if err != nil { - return nil, fmt.Errorf("error parsing isainfo output: %s", err) + return nil, fmt.Errorf("error parsing isainfo output: %w", err) } result := make([]InfoStat, 0, len(flags)) - for _, proc := range procs { - procWithFlags := proc + for i := range procs { + procWithFlags := procs[i] procWithFlags.Flags = flags result = append(result, procWithFlags) } @@ -148,7 +149,7 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { return result, nil } -var flagsMatch = regexp.MustCompile(`[\w\.]+`) +var flagsMatch = regexp.MustCompile(`[\w.]+`) func parseISAInfo(cmdOutput string) ([]string, error) { words := flagsMatch.FindAllString(cmdOutput, -1) @@ -158,10 +159,7 @@ func parseISAInfo(cmdOutput string) ([]string, error) { return nil, errors.New("attempted to parse invalid isainfo output") } - flags := make([]string, len(words)-4) - for i, val := range words[4:] { - flags[i] = val - } + flags := words[4:] sort.Strings(flags) return flags, nil @@ -193,7 +191,7 @@ func parseProcessorInfo(cmdOutput string) ([]InfoStat, error) { if physicalCPU[psrStepOffset] != "" { stepParsed, err := strconv.ParseInt(physicalCPU[psrStepOffset], 10, 32) if err != nil { - return nil, fmt.Errorf("cannot parse value %q for step as 32-bit integer: %s", physicalCPU[9], err) + return nil, fmt.Errorf("cannot parse value %q for step as 32-bit integer: %w", physicalCPU[9], err) } step = int32(stepParsed) } @@ -201,7 +199,7 @@ func parseProcessorInfo(cmdOutput string) ([]InfoStat, error) { if physicalCPU[psrClockOffset] != "" { clockParsed, err := strconv.ParseInt(physicalCPU[psrClockOffset], 10, 64) if err != nil { - return nil, fmt.Errorf("cannot parse value %q for clock as 32-bit integer: %s", physicalCPU[10], err) + return nil, fmt.Errorf("cannot parse value %q for clock as 32-bit integer: %w", physicalCPU[10], err) } clock = float64(clockParsed) } @@ -213,7 +211,7 @@ func parseProcessorInfo(cmdOutput string) ([]InfoStat, error) { case physicalCPU[psrNumCoresOffset] != "": numCores, err = strconv.ParseInt(physicalCPU[psrNumCoresOffset], 10, 32) if err != nil { - return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %s", physicalCPU[1], err) + return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %w", physicalCPU[1], err) } for i := 0; i < int(numCores); i++ { @@ -234,12 +232,12 @@ func parseProcessorInfo(cmdOutput string) ([]InfoStat, error) { case physicalCPU[psrNumCoresHTOffset] != "": numCores, err = strconv.ParseInt(physicalCPU[psrNumCoresHTOffset], 10, 32) if err != nil { - return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %s", physicalCPU[3], err) + return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %w", physicalCPU[3], err) } numHT, err = strconv.ParseInt(physicalCPU[psrNumHTOffset], 10, 32) if err != nil { - return nil, fmt.Errorf("cannot parse value %q for hyperthread count as 32-bit integer: %s", physicalCPU[4], err) + return nil, fmt.Errorf("cannot parse value %q for hyperthread count as 32-bit integer: %w", physicalCPU[4], err) } for i := 0; i < int(numCores); i++ { @@ -264,6 +262,6 @@ func parseProcessorInfo(cmdOutput string) ([]InfoStat, error) { return result, nil } -func CountsWithContext(ctx context.Context, logical bool) (int, error) { +func CountsWithContext(_ context.Context, _ bool) (int, error) { return runtime.NumCPU(), nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go similarity index 63% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go index e10612fd1..3f4416bfd 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go @@ -1,23 +1,27 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package cpu import ( "context" + "errors" "fmt" + "strconv" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/yusufpapurcu/wmi" "golang.org/x/sys/windows" + + "github.com/shirou/gopsutil/v4/internal/common" ) var ( - procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") + procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") + procGetLogicalProcessorInformationEx = common.Modkernel32.NewProc("GetLogicalProcessorInformationEx") ) -type win32_Processor struct { +type win32_Processor struct { //nolint:revive //FIXME Family uint16 Manufacturer string Name string @@ -33,13 +37,13 @@ type win32_Processor struct { // https://docs.microsoft.com/en-us/windows/desktop/api/winternl/nf-winternl-ntquerysysteminformation#system_processor_performance_information // additional fields documented here // https://www.geoffchappell.com/studies/windows/km/ntoskrnl/api/ex/sysinfo/processor_performance.htm -type win32_SystemProcessorPerformanceInformation struct { - IdleTime int64 // idle time in 100ns (this is not a filetime). - KernelTime int64 // kernel time in 100ns. kernel time includes idle time. (this is not a filetime). - UserTime int64 // usertime in 100ns (this is not a filetime). - DpcTime int64 // dpc time in 100ns (this is not a filetime). - InterruptTime int64 // interrupt time in 100ns - InterruptCount uint32 +type win32_SystemProcessorPerformanceInformation struct { //nolint:revive //FIXME + IdleTime int64 // idle time in 100ns (this is not a filetime). + KernelTime int64 // kernel time in 100ns. kernel time includes idle time. (this is not a filetime). + UserTime int64 // usertime in 100ns (this is not a filetime). + DpcTime int64 // dpc time in 100ns (this is not a filetime). + InterruptTime int64 // interrupt time in 100ns + InterruptCount uint64 // ULONG needs to be uint64 } const ( @@ -47,10 +51,10 @@ const ( // systemProcessorPerformanceInformationClass information class to query with NTQuerySystemInformation // https://processhacker.sourceforge.io/doc/ntexapi_8h.html#ad5d815b48e8f4da1ef2eb7a2f18a54e0 - win32_SystemProcessorPerformanceInformationClass = 8 + win32_SystemProcessorPerformanceInformationClass = 8 //nolint:revive //FIXME // size of systemProcessorPerformanceInfoSize in memory - win32_SystemProcessorPerformanceInfoSize = uint32(unsafe.Sizeof(win32_SystemProcessorPerformanceInformation{})) + win32_SystemProcessorPerformanceInfoSize = uint32(unsafe.Sizeof(win32_SystemProcessorPerformanceInformation{})) //nolint:revive //FIXME ) // Times returns times stat per cpu and combined for all CPUs @@ -58,7 +62,7 @@ func Times(percpu bool) ([]TimesStat, error) { return TimesWithContext(context.Background(), percpu) } -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { +func TimesWithContext(_ context.Context, percpu bool) ([]TimesStat, error) { if percpu { return perCPUTimes() } @@ -67,12 +71,14 @@ func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { var lpIdleTime common.FILETIME var lpKernelTime common.FILETIME var lpUserTime common.FILETIME - r, _, _ := common.ProcGetSystemTimes.Call( + // GetSystemTimes returns 0 for error, in which case we check err, + // see https://pkg.go.dev/golang.org/x/sys/windows#LazyProc.Call + r, _, err := common.ProcGetSystemTimes.Call( uintptr(unsafe.Pointer(&lpIdleTime)), uintptr(unsafe.Pointer(&lpKernelTime)), uintptr(unsafe.Pointer(&lpUserTime))) if r == 0 { - return ret, windows.GetLastError() + return nil, err } LOT := float64(0.0000001) @@ -112,7 +118,7 @@ func InfoWithContext(ctx context.Context) ([]InfoStat, error) { cpu := InfoStat{ CPU: int32(i), - Family: fmt.Sprintf("%d", l.Family), + Family: strconv.FormatUint(uint64(l.Family), 10), VendorID: l.Manufacturer, ModelName: l.Name, Cores: int32(l.NumberOfLogicalProcessors), @@ -200,13 +206,70 @@ type systemInfo struct { wProcessorRevision uint16 } -func CountsWithContext(ctx context.Context, logical bool) (int, error) { +type groupAffinity struct { + mask uintptr // https://learn.microsoft.com/it-it/windows-hardware/drivers/kernel/interrupt-affinity-and-priority#about-kaffinity + group uint16 + reserved [3]uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-processor_relationship +type processorRelationship struct { + flags byte + efficientClass byte + reserved [20]byte + groupCount uint16 + groupMask [1]groupAffinity +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-system_logical_processor_information_ex +type systemLogicalProcessorInformationEx struct { + Relationship uint32 + Size uint32 + Processor processorRelationship +} + +func getPhysicalCoreCount() (int, error) { + var length uint32 + const relationAll = 0xffff + const relationProcessorCore = 0x0 + + // First call to determine the required buffer size + _, _, err := procGetLogicalProcessorInformationEx.Call(uintptr(relationAll), 0, uintptr(unsafe.Pointer(&length))) + if err != nil && !errors.Is(err, windows.ERROR_INSUFFICIENT_BUFFER) { + return 0, fmt.Errorf("failed to get buffer size: %w", err) + } + + // Allocate the buffer + buffer := make([]byte, length) + + // Second call to retrieve the processor information + _, _, err = procGetLogicalProcessorInformationEx.Call(uintptr(relationAll), uintptr(unsafe.Pointer(&buffer[0])), uintptr(unsafe.Pointer(&length))) + if err != nil && !errors.Is(err, windows.NTE_OP_OK) { + return 0, fmt.Errorf("failed to get logical processor information: %w", err) + } + + // Iterate through the buffer to count physical cores + offset := uintptr(0) + ncpus := 0 + for offset < uintptr(length) { + info := (*systemLogicalProcessorInformationEx)(unsafe.Pointer(uintptr(unsafe.Pointer(&buffer[0])) + offset)) + if info.Relationship == relationProcessorCore { + ncpus++ + } + offset += uintptr(info.Size) + } + + return ncpus, nil +} + +func CountsWithContext(_ context.Context, logical bool) (int, error) { if logical { - // https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L97 + // Get logical processor count https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L97 ret := windows.GetActiveProcessorCount(windows.ALL_PROCESSOR_GROUPS) if ret != 0 { return int(ret), nil } + var systemInfo systemInfo _, _, err := procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo))) if systemInfo.dwNumberOfProcessors == 0 { @@ -214,16 +277,7 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) { } return int(systemInfo.dwNumberOfProcessors), nil } - // physical cores https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L499 - // for the time being, try with unreliable and slow WMI call… - var dst []win32_Processor - q := wmi.CreateQuery(&dst, "") - if err := common.WMIQueryWithContext(ctx, q, &dst); err != nil { - return 0, err - } - var count uint32 - for _, d := range dst { - count += d.NumberOfCores - } - return int(count), nil + + // Get physical core count https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L499 + return getPhysicalCoreCount() } diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go index 5e8d43db8..11a4fd410 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common // Copyright 2009 The Go Authors. All rights reserved. @@ -136,7 +137,7 @@ func (bigEndian) GoString() string { return "binary.BigEndian" } // blank (_) field names is skipped; i.e., blank field names // may be used for padding. // When reading into a struct, all non-blank fields must be exported. -func Read(r io.Reader, order ByteOrder, data interface{}) error { +func Read(r io.Reader, order ByteOrder, data any) error { // Fast path for basic types and slices. if n := intDataSize(data); n != 0 { var b [8]byte @@ -228,7 +229,7 @@ func Read(r io.Reader, order ByteOrder, data interface{}) error { // and read from successive fields of the data. // When writing structs, zero values are written for fields // with blank (_) field names. -func Write(w io.Writer, order ByteOrder, data interface{}) error { +func Write(w io.Writer, order ByteOrder, data any) error { // Fast path for basic types and slices. if n := intDataSize(data); n != 0 { var b [8]byte @@ -338,7 +339,7 @@ func Write(w io.Writer, order ByteOrder, data interface{}) error { // Size returns how many bytes Write would generate to encode the value v, which // must be a fixed-size value or a slice of fixed-size values, or a pointer to such data. // If v is neither of these, Size returns -1. -func Size(v interface{}) int { +func Size(v any) int { return dataSize(reflect.Indirect(reflect.ValueOf(v))) } @@ -606,7 +607,7 @@ func (e *encoder) skip(v reflect.Value) { // intDataSize returns the size of the data required to represent the data when encoded. // It returns zero if the type cannot be implemented by the fast path in Read or Write. -func intDataSize(data interface{}) int { +func intDataSize(data any) int { switch data := data.(type) { case int8, *int8, *uint8: return 1 diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go similarity index 90% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common.go index 99ed6a58e..d48b41e51 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common // @@ -14,6 +15,7 @@ import ( "errors" "fmt" "io" + "math" "net/url" "os" "os/exec" @@ -25,12 +27,13 @@ import ( "strings" "time" - "github.com/shirou/gopsutil/v3/common" + "github.com/shirou/gopsutil/v4/common" ) var ( - Timeout = 3 * time.Second - ErrTimeout = errors.New("command timed out") + Timeout = 3 * time.Second + ErrNotImplementedError = errors.New("not implemented yet") + ErrTimeout = errors.New("command timed out") ) type Invoker interface { @@ -91,12 +94,10 @@ func (i FakeInvoke) Command(name string, arg ...string) ([]byte, error) { return []byte{}, fmt.Errorf("could not find testdata: %s", fpath) } -func (i FakeInvoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) { +func (i FakeInvoke) CommandWithContext(_ context.Context, name string, arg ...string) ([]byte, error) { return i.Command(name, arg...) } -var ErrNotImplementedError = errors.New("not implemented yet") - // ReadFile reads contents from a file func ReadFile(filename string) (string, error) { content, err := os.ReadFile(filename) @@ -114,7 +115,7 @@ func ReadLines(filename string) ([]string, error) { } // ReadLine reads a file and returns the first occurrence of a line that is prefixed with prefix. -func ReadLine(filename string, prefix string) (string, error) { +func ReadLine(filename, prefix string) (string, error) { f, err := os.Open(filename) if err != nil { return "", err @@ -152,15 +153,15 @@ func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) { var ret []string r := bufio.NewReader(f) - for i := 0; i < n+int(offset) || n < 0; i++ { + for i := uint(0); i < uint(n)+offset || n < 0; i++ { line, err := r.ReadString('\n') if err != nil { - if err == io.EOF && len(line) > 0 { + if err == io.EOF && line != "" { ret = append(ret, strings.Trim(line, "\n")) } break } - if i < int(offset) { + if i < offset { continue } ret = append(ret, strings.Trim(line, "\n")) @@ -309,7 +310,7 @@ func IntContains(target []int, src int) bool { // get struct attributes. // This method is used only for debugging platform dependent code. -func attributes(m interface{}) map[string]reflect.Type { +func attributes(m any) map[string]reflect.Type { typ := reflect.TypeOf(m) if typ.Kind() == reflect.Ptr { typ = typ.Elem() @@ -343,12 +344,12 @@ func PathExistsWithContents(filename string) bool { if err != nil { return false } - return info.Size() > 4 // at least 4 bytes + return info.Size() > 4 && !info.IsDir() // at least 4 bytes } // GetEnvWithContext retrieves the environment variable key. If it does not exist it returns the default. // The context may optionally contain a map superseding os.EnvKey. -func GetEnvWithContext(ctx context.Context, key string, dfault string, combineWith ...string) string { +func GetEnvWithContext(ctx context.Context, key, dfault string, combineWith ...string) string { var value string if env, ok := ctx.Value(common.EnvKey).(common.EnvMap); ok { value = env[common.EnvKeyType(key)] @@ -364,7 +365,7 @@ func GetEnvWithContext(ctx context.Context, key string, dfault string, combineWi } // GetEnv retrieves the environment variable key. If it does not exist it returns the default. -func GetEnv(key string, dfault string, combineWith ...string) string { +func GetEnv(key, dfault string, combineWith ...string) string { value := os.Getenv(key) if value == "" { value = dfault @@ -462,3 +463,11 @@ func getSysctrlEnv(env []string) []string { } return env } + +// Round places rounds the number 'val' to 'n' decimal places +func Round(val float64, n int) float64 { + // Calculate the power of 10 to the n + pow10 := math.Pow(10, float64(n)) + // Multiply the value by pow10, round it, then divide it by pow10 + return math.Round(val*pow10) / pow10 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go new file mode 100644 index 000000000..c9d610540 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go @@ -0,0 +1,400 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package common + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "strings" + "unsafe" + + "github.com/ebitengine/purego" + "golang.org/x/sys/unix" +) + +func DoSysctrlWithContext(ctx context.Context, mib string) ([]string, error) { + cmd := exec.CommandContext(ctx, "sysctl", "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 + uintptr(unsafe.Pointer(&mib[0])), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 + uintptr(unsafe.Pointer(&mib[0])), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} + +// Library represents a dynamic library loaded by purego. +type Library struct { + addr uintptr + path string + close func() +} + +// library paths +const ( + IOKit = "/System/Library/Frameworks/IOKit.framework/IOKit" + CoreFoundation = "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation" + System = "/usr/lib/libSystem.B.dylib" +) + +func NewLibrary(path string) (*Library, error) { + lib, err := purego.Dlopen(path, purego.RTLD_LAZY|purego.RTLD_GLOBAL) + if err != nil { + return nil, err + } + + closeFunc := func() { + purego.Dlclose(lib) + } + + return &Library{ + addr: lib, + path: path, + close: closeFunc, + }, nil +} + +func (lib *Library) Dlsym(symbol string) (uintptr, error) { + return purego.Dlsym(lib.addr, symbol) +} + +func GetFunc[T any](lib *Library, symbol string) T { + var fptr T + purego.RegisterLibFunc(&fptr, lib.addr, symbol) + return fptr +} + +func (lib *Library) Close() { + lib.close() +} + +// status codes +const ( + KERN_SUCCESS = 0 +) + +// IOKit functions and symbols. +type ( + IOServiceGetMatchingServiceFunc func(mainPort uint32, matching uintptr) uint32 + IOServiceGetMatchingServicesFunc func(mainPort uint32, matching uintptr, existing *uint32) int + IOServiceMatchingFunc func(name string) unsafe.Pointer + IOServiceOpenFunc func(service, owningTask, connType uint32, connect *uint32) int + IOServiceCloseFunc func(connect uint32) int + IOIteratorNextFunc func(iterator uint32) uint32 + IORegistryEntryGetNameFunc func(entry uint32, name CStr) int + IORegistryEntryGetParentEntryFunc func(entry uint32, plane string, parent *uint32) int + IORegistryEntryCreateCFPropertyFunc func(entry uint32, key, allocator uintptr, options uint32) unsafe.Pointer + IORegistryEntryCreateCFPropertiesFunc func(entry uint32, properties unsafe.Pointer, allocator uintptr, options uint32) int + IOObjectConformsToFunc func(object uint32, className string) bool + IOObjectReleaseFunc func(object uint32) int + IOConnectCallStructMethodFunc func(connection, selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int + + IOHIDEventSystemClientCreateFunc func(allocator uintptr) unsafe.Pointer + IOHIDEventSystemClientSetMatchingFunc func(client, match uintptr) int + IOHIDServiceClientCopyEventFunc func(service uintptr, eventType int64, + options int32, timeout int64) unsafe.Pointer + IOHIDServiceClientCopyPropertyFunc func(service, property uintptr) unsafe.Pointer + IOHIDEventGetFloatValueFunc func(event uintptr, field int32) float64 + IOHIDEventSystemClientCopyServicesFunc func(client uintptr) unsafe.Pointer +) + +const ( + IOServiceGetMatchingServiceSym = "IOServiceGetMatchingService" + IOServiceGetMatchingServicesSym = "IOServiceGetMatchingServices" + IOServiceMatchingSym = "IOServiceMatching" + IOServiceOpenSym = "IOServiceOpen" + IOServiceCloseSym = "IOServiceClose" + IOIteratorNextSym = "IOIteratorNext" + IORegistryEntryGetNameSym = "IORegistryEntryGetName" + IORegistryEntryGetParentEntrySym = "IORegistryEntryGetParentEntry" + IORegistryEntryCreateCFPropertySym = "IORegistryEntryCreateCFProperty" + IORegistryEntryCreateCFPropertiesSym = "IORegistryEntryCreateCFProperties" + IOObjectConformsToSym = "IOObjectConformsTo" + IOObjectReleaseSym = "IOObjectRelease" + IOConnectCallStructMethodSym = "IOConnectCallStructMethod" + + IOHIDEventSystemClientCreateSym = "IOHIDEventSystemClientCreate" + IOHIDEventSystemClientSetMatchingSym = "IOHIDEventSystemClientSetMatching" + IOHIDServiceClientCopyEventSym = "IOHIDServiceClientCopyEvent" + IOHIDServiceClientCopyPropertySym = "IOHIDServiceClientCopyProperty" + IOHIDEventGetFloatValueSym = "IOHIDEventGetFloatValue" + IOHIDEventSystemClientCopyServicesSym = "IOHIDEventSystemClientCopyServices" +) + +const ( + KIOMainPortDefault = 0 + + KIOHIDEventTypeTemperature = 15 + + KNilOptions = 0 +) + +const ( + KIOMediaWholeKey = "Media" + KIOServicePlane = "IOService" +) + +// CoreFoundation functions and symbols. +type ( + CFGetTypeIDFunc func(cf uintptr) int32 + CFNumberCreateFunc func(allocator uintptr, theType int32, valuePtr uintptr) unsafe.Pointer + CFNumberGetValueFunc func(num uintptr, theType int32, valuePtr uintptr) bool + CFDictionaryCreateFunc func(allocator uintptr, keys, values *unsafe.Pointer, numValues int32, + keyCallBacks, valueCallBacks uintptr) unsafe.Pointer + CFDictionaryAddValueFunc func(theDict, key, value uintptr) + CFDictionaryGetValueFunc func(theDict, key uintptr) unsafe.Pointer + CFArrayGetCountFunc func(theArray uintptr) int32 + CFArrayGetValueAtIndexFunc func(theArray uintptr, index int32) unsafe.Pointer + CFStringCreateMutableFunc func(alloc uintptr, maxLength int32) unsafe.Pointer + CFStringGetLengthFunc func(theString uintptr) int32 + CFStringGetCStringFunc func(theString uintptr, buffer CStr, bufferSize int32, encoding uint32) + CFStringCreateWithCStringFunc func(alloc uintptr, cStr string, encoding uint32) unsafe.Pointer + CFDataGetLengthFunc func(theData uintptr) int32 + CFDataGetBytePtrFunc func(theData uintptr) unsafe.Pointer + CFReleaseFunc func(cf uintptr) +) + +const ( + CFGetTypeIDSym = "CFGetTypeID" + CFNumberCreateSym = "CFNumberCreate" + CFNumberGetValueSym = "CFNumberGetValue" + CFDictionaryCreateSym = "CFDictionaryCreate" + CFDictionaryAddValueSym = "CFDictionaryAddValue" + CFDictionaryGetValueSym = "CFDictionaryGetValue" + CFArrayGetCountSym = "CFArrayGetCount" + CFArrayGetValueAtIndexSym = "CFArrayGetValueAtIndex" + CFStringCreateMutableSym = "CFStringCreateMutable" + CFStringGetLengthSym = "CFStringGetLength" + CFStringGetCStringSym = "CFStringGetCString" + CFStringCreateWithCStringSym = "CFStringCreateWithCString" + CFDataGetLengthSym = "CFDataGetLength" + CFDataGetBytePtrSym = "CFDataGetBytePtr" + CFReleaseSym = "CFRelease" +) + +const ( + KCFStringEncodingUTF8 = 0x08000100 + KCFNumberSInt64Type = 4 + KCFNumberIntType = 9 + KCFAllocatorDefault = 0 +) + +// Kernel functions and symbols. +type MachTimeBaseInfo struct { + Numer uint32 + Denom uint32 +} + +type ( + HostProcessorInfoFunc func(host uint32, flavor int32, outProcessorCount *uint32, outProcessorInfo uintptr, + outProcessorInfoCnt *uint32) int + HostStatisticsFunc func(host uint32, flavor int32, hostInfoOut uintptr, hostInfoOutCnt *uint32) int + MachHostSelfFunc func() uint32 + MachTaskSelfFunc func() uint32 + MachTimeBaseInfoFunc func(info uintptr) int + VMDeallocateFunc func(targetTask uint32, vmAddress, vmSize uintptr) int +) + +const ( + HostProcessorInfoSym = "host_processor_info" + HostStatisticsSym = "host_statistics" + MachHostSelfSym = "mach_host_self" + MachTaskSelfSym = "mach_task_self" + MachTimeBaseInfoSym = "mach_timebase_info" + VMDeallocateSym = "vm_deallocate" +) + +const ( + CTL_KERN = 1 + KERN_ARGMAX = 8 + KERN_PROCARGS2 = 49 + + HOST_VM_INFO = 2 + HOST_CPU_LOAD_INFO = 3 + + HOST_VM_INFO_COUNT = 0xf +) + +// System functions and symbols. +type ( + ProcPidPathFunc func(pid int32, buffer uintptr, bufferSize uint32) int32 + ProcPidInfoFunc func(pid, flavor int32, arg uint64, buffer uintptr, bufferSize int32) int32 +) + +const ( + SysctlSym = "sysctl" + ProcPidPathSym = "proc_pidpath" + ProcPidInfoSym = "proc_pidinfo" +) + +const ( + MAXPATHLEN = 1024 + PROC_PIDPATHINFO_MAXSIZE = 4 * MAXPATHLEN + PROC_PIDTASKINFO = 4 + PROC_PIDVNODEPATHINFO = 9 +) + +// SMC represents a SMC instance. +type SMC struct { + lib *Library + conn uint32 + callStruct IOConnectCallStructMethodFunc +} + +const ioServiceSMC = "AppleSMC" + +const ( + KSMCUserClientOpen = 0 + KSMCUserClientClose = 1 + KSMCHandleYPCEvent = 2 + KSMCReadKey = 5 + KSMCWriteKey = 6 + KSMCGetKeyCount = 7 + KSMCGetKeyFromIndex = 8 + KSMCGetKeyInfo = 9 +) + +const ( + KSMCSuccess = 0 + KSMCError = 1 + KSMCKeyNotFound = 132 +) + +func NewSMC(ioKit *Library) (*SMC, error) { + if ioKit.path != IOKit { + return nil, errors.New("library is not IOKit") + } + + ioServiceGetMatchingService := GetFunc[IOServiceGetMatchingServiceFunc](ioKit, IOServiceGetMatchingServiceSym) + ioServiceMatching := GetFunc[IOServiceMatchingFunc](ioKit, IOServiceMatchingSym) + ioServiceOpen := GetFunc[IOServiceOpenFunc](ioKit, IOServiceOpenSym) + ioObjectRelease := GetFunc[IOObjectReleaseFunc](ioKit, IOObjectReleaseSym) + machTaskSelf := GetFunc[MachTaskSelfFunc](ioKit, MachTaskSelfSym) + + ioConnectCallStructMethod := GetFunc[IOConnectCallStructMethodFunc](ioKit, IOConnectCallStructMethodSym) + + service := ioServiceGetMatchingService(0, uintptr(ioServiceMatching(ioServiceSMC))) + if service == 0 { + return nil, fmt.Errorf("ERROR: %s NOT FOUND", ioServiceSMC) + } + + var conn uint32 + if result := ioServiceOpen(service, machTaskSelf(), 0, &conn); result != 0 { + return nil, errors.New("ERROR: IOServiceOpen failed") + } + + ioObjectRelease(service) + return &SMC{ + lib: ioKit, + conn: conn, + callStruct: ioConnectCallStructMethod, + }, nil +} + +func (s *SMC) CallStruct(selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int { + return s.callStruct(s.conn, selector, inputStruct, inputStructCnt, outputStruct, outputStructCnt) +} + +func (s *SMC) Close() error { + ioServiceClose := GetFunc[IOServiceCloseFunc](s.lib, IOServiceCloseSym) + + if result := ioServiceClose(s.conn); result != 0 { + return errors.New("ERROR: IOServiceClose failed") + } + return nil +} + +type CStr []byte + +func NewCStr(length int32) CStr { + return make(CStr, length) +} + +func (s CStr) Length() int32 { + // Include null terminator to make CFStringGetCString properly functions + return int32(len(s)) + 1 +} + +func (s CStr) Ptr() *byte { + if len(s) < 1 { + return nil + } + + return &s[0] +} + +func (s CStr) Addr() uintptr { + return uintptr(unsafe.Pointer(s.Ptr())) +} + +func (s CStr) GoString() string { + if s == nil { + return "" + } + + var length int + for _, char := range s { + if char == '\x00' { + break + } + length++ + } + return string(s[:length]) +} + +// https://github.com/ebitengine/purego/blob/main/internal/strings/strings.go#L26 +func GoString(cStr *byte) string { + if cStr == nil { + return "" + } + var length int + for *(*byte)(unsafe.Add(unsafe.Pointer(cStr), uintptr(length))) != '\x00' { + length++ + } + return string(unsafe.Slice(cStr, length)) +} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go index f590e2e67..53cdceeb6 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || openbsd -// +build freebsd openbsd package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go index a644687ba..ffaae423b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go @@ -1,21 +1,25 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package common import ( "context" - "fmt" + "errors" "os" "os/exec" "path/filepath" "strconv" "strings" "sync" + "sync/atomic" "syscall" "time" ) +// cachedBootTime must be accessed via atomic.Load/StoreUint64 +var cachedBootTime uint64 + func DoSysctrl(mib string) ([]string, error) { cmd := exec.Command("sysctl", "-n", mib) cmd.Env = getSysctrlEnv(os.Environ()) @@ -56,7 +60,14 @@ func NumProcsWithContext(ctx context.Context) (uint64, error) { return cnt, nil } -func BootTimeWithContext(ctx context.Context) (uint64, error) { +func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) { + if enableCache { + t := atomic.LoadUint64(&cachedBootTime) + if t != 0 { + return t, nil + } + } + system, role, err := VirtualizationWithContext(ctx) if err != nil { return 0, err @@ -72,7 +83,15 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) { } if useStatFile { - return readBootTimeStat(ctx) + t, err := readBootTimeStat(ctx) + if err != nil { + return 0, err + } + if enableCache { + atomic.StoreUint64(&cachedBootTime, t) + } + + return t, nil } filename := HostProcWithContext(ctx, "uptime") @@ -80,16 +99,22 @@ func BootTimeWithContext(ctx context.Context) (uint64, error) { if err != nil { return handleBootTimeFileReadErr(err) } + currentTime := float64(time.Now().UnixNano()) / float64(time.Second) + if len(lines) != 1 { - return 0, fmt.Errorf("wrong uptime format") + return 0, errors.New("wrong uptime format") } f := strings.Fields(lines[0]) b, err := strconv.ParseFloat(f[0], 64) if err != nil { return 0, err } - currentTime := float64(time.Now().UnixNano()) / float64(time.Second) t := currentTime - b + + if enableCache { + atomic.StoreUint64(&cachedBootTime, uint64(t)) + } + return uint64(t), nil } @@ -117,7 +142,7 @@ func readBootTimeStat(ctx context.Context) (uint64, error) { if strings.HasPrefix(line, "btime") { f := strings.Fields(line) if len(f) != 2 { - return 0, fmt.Errorf("wrong btime format") + return 0, errors.New("wrong btime format") } b, err := strconv.ParseInt(f[1], 10, 64) if err != nil { @@ -126,7 +151,7 @@ func readBootTimeStat(ctx context.Context) (uint64, error) { t := uint64(b) return t, nil } - return 0, fmt.Errorf("could not find btime") + return 0, errors.New("could not find btime") } func Virtualization() (string, string, error) { @@ -171,19 +196,20 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) { if PathExists(filename) { contents, err := ReadLines(filename) if err == nil { - if StringsContains(contents, "kvm") { + switch { + case StringsContains(contents, "kvm"): system = "kvm" role = "host" - } else if StringsContains(contents, "hv_util") { + case StringsContains(contents, "hv_util"): system = "hyperv" role = "guest" - } else if StringsContains(contents, "vboxdrv") { + case StringsContains(contents, "vboxdrv"): system = "vbox" role = "host" - } else if StringsContains(contents, "vboxguest") { + case StringsContains(contents, "vboxguest"): system = "vbox" role = "guest" - } else if StringsContains(contents, "vmware") { + case StringsContains(contents, "vmware"): system = "vmware" role = "guest" } @@ -248,16 +274,17 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) { if PathExists(filepath.Join(filename, "self", "cgroup")) { contents, err := ReadLines(filepath.Join(filename, "self", "cgroup")) if err == nil { - if StringsContains(contents, "lxc") { + switch { + case StringsContains(contents, "lxc"): system = "lxc" role = "guest" - } else if StringsContains(contents, "docker") { + case StringsContains(contents, "docker"): system = "docker" role = "guest" - } else if StringsContains(contents, "machine-rkt") { + case StringsContains(contents, "machine-rkt"): system = "rkt" role = "guest" - } else if PathExists("/usr/bin/lxc-version") { + case PathExists("/usr/bin/lxc-version"): system = "lxc" role = "host" } @@ -290,11 +317,11 @@ func VirtualizationWithContext(ctx context.Context) (string, string, error) { return system, role, nil } -func GetOSRelease() (platform string, version string, err error) { +func GetOSRelease() (platform, version string, err error) { return GetOSReleaseWithContext(context.Background()) } -func GetOSReleaseWithContext(ctx context.Context) (platform string, version string, err error) { +func GetOSReleaseWithContext(ctx context.Context) (platform, version string, err error) { contents, err := ReadLines(HostEtcWithContext(ctx, "os-release")) if err != nil { return "", "", nil // return empty @@ -307,7 +334,7 @@ func GetOSReleaseWithContext(ctx context.Context) (platform string, version stri switch field[0] { case "ID": // use ID for lowercase platform = trimQuotes(field[1]) - case "VERSION": + case "VERSION_ID": version = trimQuotes(field[1]) } } diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go index efbc710a5..206532126 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go index 58d76f334..00fa19a2f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package common diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_testing.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_testing.go new file mode 100644 index 000000000..55f36f1f3 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_testing.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: BSD-3-Clause +package common + +import ( + "errors" + "testing" +) + +func SkipIfNotImplementedErr(tb testing.TB, err error) { + tb.Helper() + if errors.Is(err, ErrNotImplementedError) { + tb.Skip("not implemented") + } +} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go similarity index 61% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go index 4af7e5c2a..2ccb37608 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux || freebsd || darwin || openbsd -// +build linux freebsd darwin openbsd package common @@ -33,30 +33,10 @@ func CallLsofWithContext(ctx context.Context, invoke Invoker, pid int32, args .. var ret []string for _, l := range lines[1:] { - if len(l) == 0 { + if l == "" { continue } ret = append(ret, l) } return ret, nil } - -func CallPgrepWithContext(ctx context.Context, invoke Invoker, pid int32) ([]int32, error) { - out, err := invoke.CommandWithContext(ctx, "pgrep", "-P", strconv.Itoa(int(pid))) - if err != nil { - return []int32{}, err - } - lines := strings.Split(string(out), "\n") - ret := make([]int32, 0, len(lines)) - for _, l := range lines { - if len(l) == 0 { - continue - } - i, err := strconv.ParseInt(l, 10, 32) - if err != nil { - continue - } - ret = append(ret, int32(i)) - } - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go index 301b2315b..f3ec5a986 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package common @@ -17,19 +17,19 @@ import ( ) // for double values -type PDH_FMT_COUNTERVALUE_DOUBLE struct { +type PDH_FMT_COUNTERVALUE_DOUBLE struct { //nolint:revive //FIXME CStatus uint32 DoubleValue float64 } // for 64 bit integer values -type PDH_FMT_COUNTERVALUE_LARGE struct { +type PDH_FMT_COUNTERVALUE_LARGE struct { //nolint:revive //FIXME CStatus uint32 LargeValue int64 } // for long values -type PDH_FMT_COUNTERVALUE_LONG struct { +type PDH_FMT_COUNTERVALUE_LONG struct { //nolint:revive //FIXME CStatus uint32 LongValue int32 padding [4]byte @@ -197,7 +197,7 @@ func ProcessorQueueLengthCounter() (*Win32PerformanceCounter, error) { } // WMIQueryWithContext - wraps wmi.Query with a timed-out context to avoid hanging -func WMIQueryWithContext(ctx context.Context, query string, dst interface{}, connectServerArgs ...interface{}) error { +func WMIQueryWithContext(ctx context.Context, query string, dst any, connectServerArgs ...any) error { if _, ok := ctx.Deadline(); !ok { ctxTimeout, cancel := context.WithTimeout(ctx, Timeout) defer cancel() @@ -233,7 +233,7 @@ func ConvertDOSPath(p string) string { ret, _, _ := procQueryDosDeviceW.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(szDeviceName))), uintptr(unsafe.Pointer(&szTarget[0])), uintptr(len(szTarget))) - if ret != 0 && windows.UTF16ToString(szTarget[:]) == rawDrive { + if ret != 0 && windows.UTF16ToString(szTarget) == rawDrive { return filepath.Join(szDeviceName, p[len(rawDrive):]) } } @@ -273,19 +273,19 @@ type SystemExtendedHandleInformation struct { // CallWithExpandingBuffer https://github.com/hillu/go-ntdll func CallWithExpandingBuffer(fn func() NtStatus, buf *[]byte, resultLength *uint32) NtStatus { for { - if st := fn(); st == STATUS_BUFFER_OVERFLOW || st == STATUS_BUFFER_TOO_SMALL || st == STATUS_INFO_LENGTH_MISMATCH { + st := fn() + if st == STATUS_BUFFER_OVERFLOW || st == STATUS_BUFFER_TOO_SMALL || st == STATUS_INFO_LENGTH_MISMATCH { if int(*resultLength) <= cap(*buf) { (*reflect.SliceHeader)(unsafe.Pointer(buf)).Len = int(*resultLength) } else { *buf = make([]byte, int(*resultLength)) } continue - } else { - if !st.IsError() { - *buf = (*buf)[:int(*resultLength)] - } - return st } + if !st.IsError() { + *buf = (*buf)[:int(*resultLength)] + } + return st } } diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go similarity index 88% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go index 147cfdc4b..113ff2e9f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common import "unsafe" diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/readlink_linux.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/readlink_linux.go new file mode 100644 index 000000000..ea2d4677b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/readlink_linux.go @@ -0,0 +1,53 @@ +package common + +import ( + "errors" + "os" + "sync" + "syscall" +) + +var bufferPool = sync.Pool{ + New: func() any { + b := make([]byte, syscall.PathMax) + return &b + }, +} + +// The following three functions are copied from stdlib. + +// ignoringEINTR2 is ignoringEINTR, but returning an additional value. +func ignoringEINTR2[T any](fn func() (T, error)) (T, error) { + for { + v, err := fn() + if !errors.Is(err, syscall.EINTR) { + return v, err + } + } +} + +// Many functions in package syscall return a count of -1 instead of 0. +// Using fixCount(call()) instead of call() corrects the count. +func fixCount(n int, err error) (int, error) { + if n < 0 { + n = 0 + } + return n, err +} + +// Readlink behaves like os.Readlink but caches the buffer passed to syscall.Readlink. +func Readlink(name string) (string, error) { + b := bufferPool.Get().(*[]byte) + + n, err := ignoringEINTR2(func() (int, error) { + return fixCount(syscall.Readlink(name, *b)) + }) + if err != nil { + bufferPool.Put(b) + return "", &os.PathError{Op: "readlink", Path: name, Err: err} + } + + result := string((*b)[:n]) + bufferPool.Put(b) + return result, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go similarity index 77% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go index 9bed2419e..504f13ffd 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common import ( @@ -6,7 +7,7 @@ import ( ) // Sleep awaits for provided interval. -// Can be interrupted by context cancelation. +// Can be interrupted by context cancellation. func Sleep(ctx context.Context, interval time.Duration) error { timer := time.NewTimer(interval) select { diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go index a4aaadaf5..888cc57fa 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common import "fmt" diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go b/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go new file mode 100644 index 000000000..0a12fe2fe --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux + +package mem + +import ( + "context" + "encoding/json" +) + +type ExVirtualMemory struct { + ActiveFile uint64 `json:"activefile"` + InactiveFile uint64 `json:"inactivefile"` + ActiveAnon uint64 `json:"activeanon"` + InactiveAnon uint64 `json:"inactiveanon"` + Unevictable uint64 `json:"unevictable"` +} + +func (v ExVirtualMemory) String() string { + s, _ := json.Marshal(v) + return string(s) +} + +type ExLinux struct{} + +func NewExLinux() *ExLinux { + return &ExLinux{} +} + +func (ex *ExLinux) VirtualMemory() (*ExVirtualMemory, error) { + return ex.VirtualMemoryWithContext(context.Background()) +} + +func (ex *ExLinux) VirtualMemoryWithContext(ctx context.Context) (*ExVirtualMemory, error) { + _, vmEx, err := fillFromMeminfoWithContext(ctx) + if err != nil { + return nil, err + } + return vmEx, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go b/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go new file mode 100644 index 000000000..c1a9ed12d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build windows + +package mem + +import ( + "unsafe" +) + +// ExVirtualMemory represents Windows specific information +// https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-memorystatusex +// https://learn.microsoft.com/en-us/windows/win32/api/psapi/ns-psapi-performance_information +type ExVirtualMemory struct { + CommitLimit uint64 `json:"commitLimit"` + CommitTotal uint64 `json:"commitTotal"` + VirtualTotal uint64 `json:"virtualTotal"` + VirtualAvail uint64 `json:"virtualAvail"` + PhysTotal uint64 `json:"physTotal"` + PhysAvail uint64 `json:"physAvail"` + PageFileTotal uint64 `json:"pageFileTotal"` + PageFileAvail uint64 `json:"pageFileAvail"` +} + +type ExWindows struct{} + +func NewExWindows() *ExWindows { + return &ExWindows{} +} + +func (e *ExWindows) VirtualMemory() (*ExVirtualMemory, error) { + var memInfo memoryStatusEx + memInfo.cbSize = uint32(unsafe.Sizeof(memInfo)) + // If mem == 0 since this is an error according to GlobalMemoryStatusEx documentation + // In that case, use err which is constructed from GetLastError(), + // see https://pkg.go.dev/golang.org/x/sys/windows#LazyProc.Call + mem, _, err := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))) + if mem == 0 { + return nil, err + } + + var perfInfo performanceInformation + perfInfo.cb = uint32(unsafe.Sizeof(perfInfo)) + // Analogous to above: perf == 0 is an error according to the GetPerformanceInfo documentation, + // use err in that case + perf, _, err := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb)) + if perf == 0 { + return nil, err + } + + ret := &ExVirtualMemory{ + CommitLimit: perfInfo.commitLimit * perfInfo.pageSize, + CommitTotal: perfInfo.commitTotal * perfInfo.pageSize, + VirtualTotal: memInfo.ullTotalVirtual, + VirtualAvail: memInfo.ullAvailVirtual, + PhysTotal: memInfo.ullTotalPhys, + PhysAvail: memInfo.ullAvailPhys, + PageFileTotal: memInfo.ullTotalPageFile, + PageFileAvail: memInfo.ullAvailPageFile, + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem.go index edaf268bb..0da71a988 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem.go @@ -1,9 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause package mem import ( "encoding/json" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var invoke common.Invoker = common.Invoke{} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go similarity index 58% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go index 22a6a4e92..ac2c39dd3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go @@ -1,10 +1,12 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package mem import ( "context" + + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemory() (*VirtualMemoryStat, error) { @@ -14,3 +16,7 @@ func VirtualMemory() (*VirtualMemoryStat, error) { func SwapMemory() (*SwapMemoryStat, error) { return SwapMemoryWithContext(context.Background()) } + +func SwapDevices() ([]*SwapDevice, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go index 67e11dff8..2d03dd0c3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go index cc6a76d2f..bc3c0ed3b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && !cgo -// +build aix,!cgo package mem @@ -8,11 +8,11 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { - vmem, swap, err := callSVMon(ctx) + vmem, swap, err := callSVMon(ctx, true) if err != nil { return nil, err } @@ -25,7 +25,7 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { } func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { - _, swap, err := callSVMon(ctx) + _, swap, err := callSVMon(ctx, false) if err != nil { return nil, err } @@ -35,7 +35,7 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { return swap, nil } -func callSVMon(ctx context.Context) (*VirtualMemoryStat, *SwapMemoryStat, error) { +func callSVMon(ctx context.Context, virt bool) (*VirtualMemoryStat, *SwapMemoryStat, error) { out, err := invoke.CommandWithContext(ctx, "svmon", "-G") if err != nil { return nil, nil, err @@ -45,7 +45,7 @@ func callSVMon(ctx context.Context) (*VirtualMemoryStat, *SwapMemoryStat, error) vmem := &VirtualMemoryStat{} swap := &SwapMemoryStat{} for _, line := range strings.Split(string(out), "\n") { - if strings.HasPrefix(line, "memory") { + if virt && strings.HasPrefix(line, "memory") { p := strings.Fields(line) if len(p) > 2 { if t, err := strconv.ParseUint(p[1], 10, 64); err == nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go index ef867d742..4f3e57c03 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || openbsd || netbsd -// +build freebsd openbsd netbsd package mem diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go new file mode 100644 index 000000000..7d96a3bb0 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package mem + +import ( + "context" + "fmt" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func getHwMemsize() (uint64, error) { + total, err := unix.SysctlUint64("hw.memsize") + if err != nil { + return 0, err + } + return total, nil +} + +// xsw_usage in sys/sysctl.h +type swapUsage struct { + Total uint64 + Avail uint64 + Used uint64 + Pagesize int32 + Encrypted bool +} + +// SwapMemory returns swapinfo. +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(_ context.Context) (*SwapMemoryStat, error) { + // https://github.com/yanllearnn/go-osstat/blob/ae8a279d26f52ec946a03698c7f50a26cfb427e3/memory/memory_darwin.go + var ret *SwapMemoryStat + + value, err := unix.SysctlRaw("vm.swapusage") + if err != nil { + return ret, err + } + if len(value) != 32 { + return ret, fmt.Errorf("unexpected output of sysctl vm.swapusage: %v (len: %d)", value, len(value)) + } + swap := (*swapUsage)(unsafe.Pointer(&value[0])) + + u := float64(0) + if swap.Total != 0 { + u = ((float64(swap.Total) - float64(swap.Avail)) / float64(swap.Total)) * 100.0 + } + + ret = &SwapMemoryStat{ + Total: swap.Total, + Used: swap.Used, + Free: swap.Avail, + UsedPercent: u, + } + + return ret, nil +} + +func SwapDevices() ([]*SwapDevice, error) { + return SwapDevicesWithContext(context.Background()) +} + +func SwapDevicesWithContext(_ context.Context) ([]*SwapDevice, error) { + return nil, common.ErrNotImplementedError +} + +type vmStatisticsData struct { + freeCount uint32 + activeCount uint32 + inactiveCount uint32 + wireCount uint32 + _ [44]byte // Not used here +} + +// VirtualMemory returns VirtualmemoryStat. +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(_ context.Context) (*VirtualMemoryStat, error) { + machLib, err := common.NewLibrary(common.System) + if err != nil { + return nil, err + } + defer machLib.Close() + + hostStatistics := common.GetFunc[common.HostStatisticsFunc](machLib, common.HostStatisticsSym) + machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) + + count := uint32(common.HOST_VM_INFO_COUNT) + var vmstat vmStatisticsData + + status := hostStatistics(machHostSelf(), common.HOST_VM_INFO, + uintptr(unsafe.Pointer(&vmstat)), &count) + + if status != common.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics error=%d", status) + } + + pageSizeAddr, _ := machLib.Dlsym("vm_kernel_page_size") + pageSize := **(**uint64)(unsafe.Pointer(&pageSizeAddr)) + total, err := getHwMemsize() + if err != nil { + return nil, err + } + totalCount := uint32(total / pageSize) + + availableCount := vmstat.inactiveCount + vmstat.freeCount + usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount) + + usedCount := totalCount - availableCount + + return &VirtualMemoryStat{ + Total: total, + Available: pageSize * uint64(availableCount), + Used: pageSize * uint64(usedCount), + UsedPercent: usedPercent, + Free: pageSize * uint64(vmstat.freeCount), + Active: pageSize * uint64(vmstat.activeCount), + Inactive: pageSize * uint64(vmstat.inactiveCount), + Wired: pageSize * uint64(vmstat.wireCount), + }, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go similarity index 62% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go index 697fd8709..74283a2b5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go @@ -1,19 +1,19 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows && !plan9 && !aix && !netbsd -// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows,!plan9,!aix,!netbsd package mem import ( "context" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { +func VirtualMemoryWithContext(_ context.Context) (*VirtualMemoryStat, error) { return nil, common.ErrNotImplementedError } @@ -21,7 +21,7 @@ func SwapMemory() (*SwapMemoryStat, error) { return SwapMemoryWithContext(context.Background()) } -func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { +func SwapMemoryWithContext(_ context.Context) (*SwapMemoryStat, error) { return nil, common.ErrNotImplementedError } @@ -29,6 +29,6 @@ func SwapDevices() ([]*SwapDevice, error) { return SwapDevicesWithContext(context.Background()) } -func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { +func SwapDevicesWithContext(_ context.Context) ([]*SwapDevice, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go similarity index 91% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go index 9a56785b3..dbe6d9199 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package mem @@ -8,15 +8,16 @@ import ( "errors" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { +func VirtualMemoryWithContext(_ context.Context) (*VirtualMemoryStat, error) { pageSize, err := common.SysctlUint("vm.stats.vm.v_page_size") if err != nil { return nil, err @@ -85,7 +86,6 @@ func SwapMemory() (*SwapMemoryStat, error) { } // Constants from vm/vm_param.h -// nolint: golint const ( XSWDEV_VERSION11 = 1 XSWDEV_VERSION = 2 @@ -110,7 +110,7 @@ type xswdev11 struct { Used int32 // Used is the number of blocks used } -func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { +func SwapMemoryWithContext(_ context.Context) (*SwapMemoryStat, error) { // FreeBSD can have multiple swap devices so we total them up i, err := common.SysctlUint("vm.nswapdev") if err != nil { @@ -139,7 +139,8 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { // first, try to parse with version 2 xsw := (*xswdev)(unsafe.Pointer(&buf[0])) - if xsw.Version == XSWDEV_VERSION11 { + switch { + case xsw.Version == XSWDEV_VERSION11: // this is version 1, so try to parse again xsw := (*xswdev11)(unsafe.Pointer(&buf[0])) if xsw.Version != XSWDEV_VERSION11 { @@ -147,9 +148,9 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { } s.Total += uint64(xsw.NBlks) s.Used += uint64(xsw.Used) - } else if xsw.Version != XSWDEV_VERSION { + case xsw.Version != XSWDEV_VERSION: return nil, errors.New("xswdev version mismatch") - } else { + default: s.Total += uint64(xsw.NBlks) s.Used += uint64(xsw.Used) } diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go index 214a91e47..3e6e4e3e4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go @@ -1,12 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package mem import ( "bufio" "context" - "encoding/json" "fmt" "io" "math" @@ -16,22 +15,9 @@ import ( "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) -type VirtualMemoryExStat struct { - ActiveFile uint64 `json:"activefile"` - InactiveFile uint64 `json:"inactivefile"` - ActiveAnon uint64 `json:"activeanon"` - InactiveAnon uint64 `json:"inactiveanon"` - Unevictable uint64 `json:"unevictable"` -} - -func (v VirtualMemoryExStat) String() string { - s, _ := json.Marshal(v) - return string(s) -} - func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } @@ -44,19 +30,7 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { return vm, nil } -func VirtualMemoryEx() (*VirtualMemoryExStat, error) { - return VirtualMemoryExWithContext(context.Background()) -} - -func VirtualMemoryExWithContext(ctx context.Context) (*VirtualMemoryExStat, error) { - _, vmEx, err := fillFromMeminfoWithContext(ctx) - if err != nil { - return nil, err - } - return vmEx, nil -} - -func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *VirtualMemoryExStat, error) { +func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *ExVirtualMemory, error) { filename := common.HostProcWithContext(ctx, "meminfo") lines, _ := common.ReadLines(filename) @@ -67,7 +41,7 @@ func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *Virtu sReclaimable := false // "Sreclaimable:" not available: 2.6.19 / Nov 2006 ret := &VirtualMemoryStat{} - retEx := &VirtualMemoryExStat{} + retEx := &ExVirtualMemory{} for _, line := range lines { fields := strings.Split(line, ":") @@ -76,7 +50,7 @@ func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *Virtu } key := strings.TrimSpace(fields[0]) value := strings.TrimSpace(fields[1]) - value = strings.Replace(value, " kB", "", -1) + value = strings.ReplaceAll(value, " kB", "") switch key { case "MemTotal": @@ -409,7 +383,7 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { // calculateAvailVmem is a fallback under kernel 3.14 where /proc/meminfo does not provide // "MemAvailable:" column. It reimplements an algorithm from the link below // https://github.com/giampaolo/psutil/pull/890 -func calculateAvailVmem(ctx context.Context, ret *VirtualMemoryStat, retEx *VirtualMemoryExStat) uint64 { +func calculateAvailVmem(ctx context.Context, ret *VirtualMemoryStat, retEx *ExVirtualMemory) uint64 { var watermarkLow uint64 fn := common.HostProcWithContext(ctx, "zoneinfo") diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go similarity index 90% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go index d1f54ecaf..8ef539ca3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package mem @@ -15,7 +15,7 @@ func GetPageSize() (uint64, error) { return GetPageSizeWithContext(context.Background()) } -func GetPageSizeWithContext(ctx context.Context) (uint64, error) { +func GetPageSizeWithContext(_ context.Context) (uint64, error) { uvmexp, err := unix.SysctlUvmexp("vm.uvmexp2") if err != nil { return 0, err @@ -27,7 +27,7 @@ func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { +func VirtualMemoryWithContext(_ context.Context) (*VirtualMemoryStat, error) { uvmexp, err := unix.SysctlUvmexp("vm.uvmexp2") if err != nil { return nil, err diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go similarity index 90% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go index e37d5abe0..680cad12b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package mem @@ -10,15 +10,16 @@ import ( "errors" "fmt" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) func GetPageSize() (uint64, error) { return GetPageSizeWithContext(context.Background()) } -func GetPageSizeWithContext(ctx context.Context) (uint64, error) { +func GetPageSizeWithContext(_ context.Context) (uint64, error) { uvmexp, err := unix.SysctlUvmexp("vm.uvmexp") if err != nil { return 0, err @@ -30,7 +31,7 @@ func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { +func VirtualMemoryWithContext(_ context.Context) (*VirtualMemoryStat, error) { uvmexp, err := unix.SysctlUvmexp("vm.uvmexp") if err != nil { return nil, err diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go index de2b26ca4..552e93f4a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && 386 -// +build openbsd,386 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go index d187abf01..73e5b72aa 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go index 2488f1851..57b5861de 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm -// +build openbsd,arm // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go index 3661b16fb..f39a6456b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm64 -// +build openbsd,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go new file mode 100644 index 000000000..f9f838f54 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd && riscv64 + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs mem/types_openbsd.go + +package mem + +const ( + CTLVfs = 10 + VfsGeneric = 0 + VfsBcacheStat = 3 +) + +const ( + sizeOfBcachestats = 0x90 +) + +type Bcachestats struct { + Numbufs int64 + Numbufpages int64 + Numdirtypages int64 + Numcleanpages int64 + Pendingwrites int64 + Pendingreads int64 + Numwrites int64 + Numreads int64 + Cachehits int64 + Busymapped int64 + Dmapages int64 + Highpages int64 + Delwribufs int64 + Kvaslots int64 + Avail int64 + Highflips int64 + Highflops int64 + Dmaflips int64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go similarity index 91% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go index b5259f844..0df0745c7 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build plan9 -// +build plan9 package mem @@ -8,7 +8,8 @@ import ( "os" stats "github.com/lufia/plan9stats" - "github.com/shirou/gopsutil/v3/internal/common" + + "github.com/shirou/gopsutil/v4/internal/common" ) func SwapMemory() (*SwapMemoryStat, error) { @@ -63,6 +64,6 @@ func SwapDevices() ([]*SwapDevice, error) { return SwapDevicesWithContext(context.Background()) } -func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { +func SwapDevicesWithContext(_ context.Context) ([]*SwapDevice, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go similarity index 90% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go index c911267e1..1a391dc4b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build solaris -// +build solaris package mem @@ -11,8 +11,9 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" + + "github.com/shirou/gopsutil/v4/internal/common" ) // VirtualMemory for Solaris is a minimal implementation which only returns @@ -24,17 +25,17 @@ func VirtualMemory() (*VirtualMemoryStat, error) { func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { result := &VirtualMemoryStat{} - zoneName, err := zoneName() + zoneName, err := zoneName(ctx) if err != nil { return nil, err } if zoneName == "global" { - cap, err := globalZoneMemoryCapacity() + capacity, err := globalZoneMemoryCapacity(ctx) if err != nil { return nil, err } - result.Total = cap + result.Total = capacity freemem, err := globalZoneFreeMemory(ctx) if err != nil { return nil, err @@ -43,11 +44,11 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { result.Free = freemem result.Used = result.Total - result.Free } else { - cap, err := nonGlobalZoneMemoryCapacity() + capacity, err := nonGlobalZoneMemoryCapacity(ctx) if err != nil { return nil, err } - result.Total = cap + result.Total = capacity } return result, nil @@ -57,12 +58,11 @@ func SwapMemory() (*SwapMemoryStat, error) { return SwapMemoryWithContext(context.Background()) } -func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { +func SwapMemoryWithContext(_ context.Context) (*SwapMemoryStat, error) { return nil, common.ErrNotImplementedError } -func zoneName() (string, error) { - ctx := context.Background() +func zoneName(ctx context.Context) (string, error) { out, err := invoke.CommandWithContext(ctx, "zonename") if err != nil { return "", err @@ -73,8 +73,7 @@ func zoneName() (string, error) { var globalZoneMemoryCapacityMatch = regexp.MustCompile(`[Mm]emory size: (\d+) Megabytes`) -func globalZoneMemoryCapacity() (uint64, error) { - ctx := context.Background() +func globalZoneMemoryCapacity(ctx context.Context) (uint64, error) { out, err := invoke.CommandWithContext(ctx, "prtconf") if err != nil { return 0, err @@ -114,8 +113,7 @@ func globalZoneFreeMemory(ctx context.Context) (uint64, error) { var kstatMatch = regexp.MustCompile(`(\S+)\s+(\S*)`) -func nonGlobalZoneMemoryCapacity() (uint64, error) { - ctx := context.Background() +func nonGlobalZoneMemoryCapacity(ctx context.Context) (uint64, error) { out, err := invoke.CommandWithContext(ctx, "kstat", "-p", "-c", "zone_memory_cap", "memory_cap:*:*:physcap") if err != nil { return 0, err diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go similarity index 67% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go index 8c7fb1a13..f7421f647 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package mem @@ -9,8 +9,9 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/windows" + + "github.com/shirou/gopsutil/v4/internal/common" ) var ( @@ -36,12 +37,14 @@ func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { +func VirtualMemoryWithContext(_ context.Context) (*VirtualMemoryStat, error) { var memInfo memoryStatusEx memInfo.cbSize = uint32(unsafe.Sizeof(memInfo)) - mem, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))) + // GlobalMemoryStatusEx returns 0 for error, in which case we check err, + // see https://pkg.go.dev/golang.org/x/sys/windows#LazyProc.Call + mem, _, err := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))) if mem == 0 { - return nil, windows.GetLastError() + return nil, err } ret := &VirtualMemoryStat{ @@ -76,27 +79,45 @@ func SwapMemory() (*SwapMemoryStat, error) { return SwapMemoryWithContext(context.Background()) } -func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { +func SwapMemoryWithContext(_ context.Context) (*SwapMemoryStat, error) { + // Use the performance counter to get the swap usage percentage + counter, err := common.NewWin32PerformanceCounter("swap_percentage", `\Paging File(_Total)\% Usage`) + if err != nil { + return nil, err + } + defer common.PdhCloseQuery.Call(uintptr(counter.Query)) + + usedPercent, err := counter.GetValue() + if err != nil { + return nil, err + } + + // Get total memory from performance information var perfInfo performanceInformation perfInfo.cb = uint32(unsafe.Sizeof(perfInfo)) - mem, _, _ := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb)) + // GetPerformanceInfo returns 0 for error, in which case we check err, + // see https://pkg.go.dev/golang.org/x/sys/windows#LazyProc.Call + mem, _, err := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb)) if mem == 0 { - return nil, windows.GetLastError() + return nil, err } - tot := perfInfo.commitLimit * perfInfo.pageSize - used := perfInfo.commitTotal * perfInfo.pageSize - free := tot - used - var usedPercent float64 - if tot == 0 { - usedPercent = 0 + totalPhys := perfInfo.physicalTotal * perfInfo.pageSize + totalSys := perfInfo.commitLimit * perfInfo.pageSize + total := totalSys - totalPhys + + var used uint64 + if total > 0 { + used = uint64(0.01 * usedPercent * float64(total)) } else { - usedPercent = float64(used) / float64(tot) * 100 + usedPercent = 0.0 + used = 0 } + ret := &SwapMemoryStat{ - Total: tot, + Total: total, Used: used, - Free: free, - UsedPercent: usedPercent, + Free: total - used, + UsedPercent: common.Round(usedPercent, 1), } return ret, nil @@ -134,7 +155,7 @@ func SwapDevices() ([]*SwapDevice, error) { return SwapDevicesWithContext(context.Background()) } -func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { +func SwapDevicesWithContext(_ context.Context) ([]*SwapDevice, error) { pageSizeOnce.Do(func() { var sysInfo systemInfo procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&sysInfo))) @@ -144,9 +165,11 @@ func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { // the following system call invokes the supplied callback function once for each page file before returning // see https://docs.microsoft.com/en-us/windows/win32/api/psapi/nf-psapi-enumpagefilesw var swapDevices []*SwapDevice - result, _, _ := procEnumPageFilesW.Call(windows.NewCallback(pEnumPageFileCallbackW), uintptr(unsafe.Pointer(&swapDevices))) + // EnumPageFilesW returns 0 for error, in which case we check err, + // see https://pkg.go.dev/golang.org/x/sys/windows#LazyProc.Call + result, _, err := procEnumPageFilesW.Call(windows.NewCallback(pEnumPageFileCallbackW), uintptr(unsafe.Pointer(&swapDevices))) if result == 0 { - return nil, windows.GetLastError() + return nil, err } return swapDevices, nil diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net.go b/vendor/github.com/shirou/gopsutil/v4/net/net.go similarity index 67% rename from vendor/github.com/shirou/gopsutil/v3/net/net.go rename to vendor/github.com/shirou/gopsutil/v4/net/net.go index 0f3a62f39..1d1f9f08b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package net import ( @@ -5,7 +6,7 @@ import ( "encoding/json" "net" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var invoke common.Invoker = common.Invoke{} @@ -93,7 +94,7 @@ type ConntrackStat struct { SearchRestart uint32 `json:"searchRestart"` // Conntrack table lookups restarted due to hashtable resizes } -func NewConntrackStat(e uint32, s uint32, f uint32, n uint32, inv uint32, ign uint32, del uint32, dlst uint32, ins uint32, insfail uint32, drop uint32, edrop uint32, ie uint32, en uint32, ec uint32, ed uint32, sr uint32) *ConntrackStat { +func NewConntrackStat(e, s, f, n, inv, ign, del, dlst, ins, insfail, drop, edrop, ie, en, ec, ed, sr uint32) *ConntrackStat { return &ConntrackStat{ Entries: e, Searched: s, @@ -206,7 +207,7 @@ func Interfaces() (InterfaceStatList, error) { return InterfacesWithContext(context.Background()) } -func InterfacesWithContext(ctx context.Context) (InterfaceStatList, error) { +func InterfacesWithContext(_ context.Context) (InterfaceStatList, error) { is, err := net.Interfaces() if err != nil { return nil, err @@ -254,7 +255,7 @@ func InterfacesWithContext(ctx context.Context) (InterfaceStatList, error) { return ret, nil } -func getIOCountersAll(n []IOCountersStat) ([]IOCountersStat, error) { +func getIOCountersAll(n []IOCountersStat) []IOCountersStat { r := IOCountersStat{ Name: "all", } @@ -269,5 +270,87 @@ func getIOCountersAll(n []IOCountersStat) ([]IOCountersStat, error) { r.Dropout += nic.Dropout } - return []IOCountersStat{r}, nil + return []IOCountersStat{r} +} + +// IOCounters returns network I/O statistics for every network +// interface installed on the system. If pernic argument is false, +// return only sum of all information (which name is 'all'). If true, +// every network interface installed on the system is returned +// separately. +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +// ProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Available protocols: +// [ip,icmp,icmpmsg,tcp,udp,udplite] +// Not Implemented for FreeBSD, Windows, OpenBSD, Darwin +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +// FilterCounters returns iptables conntrack statistics +// the currently in use conntrack count and the max. +// If the file does not exist or is invalid it will return nil. +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +// ConntrackStats returns more detailed info about the conntrack table +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +// Return a list of network connections opened. +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +// Return a list of network connections opened returning at most `max` +// connections for each running process. +func ConnectionsMax(kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, maxConn) +} + +// Return a list of network connections opened, omitting `Uids`. +// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be +// removed from the API in the future. +func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { + return ConnectionsWithoutUidsWithContext(context.Background(), kind) +} + +// Return a list of network connections opened by a process. +func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(context.Background(), kind, pid) +} + +// Return a list of network connections opened, omitting `Uids`. +// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be +// removed from the API in the future. +func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidMaxWithoutUids(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, maxConn) +} + +// Return up to `max` network connections opened by a process. +func ConnectionsPidMax(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, maxConn) +} + +// Pids retunres all pids. +// Note: this is a copy of process_linux.Pids() +// FIXME: Import process occurs import cycle. +// move to common made other platform breaking. Need consider. +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go similarity index 67% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix.go index 81feaa8d7..d5a93f41f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package net @@ -11,43 +11,27 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - -func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { - return IOCounters(pernic) -} - -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(_ context.Context) ([]int32, error) { return nil, common.ErrNotImplementedError } -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) +func IOCountersByFileWithContext(ctx context.Context, pernic bool, _ string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) } -func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { +func FilterCountersWithContext(_ context.Context) ([]FilterStat, error) { return nil, common.ErrNotImplementedError } -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) +func ConntrackStatsWithContext(_ context.Context, _ bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError } -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { +func ProtoCountersWithContext(_ context.Context, _ []string) ([]ProtoCountersStat, error) { return nil, common.ErrNotImplementedError } @@ -99,7 +83,7 @@ var portMatch = regexp.MustCompile(`(.*)\.(\d+)$`) // This function only works for netstat returning addresses with a "." // before the port (0.0.0.0.22 instead of 0.0.0.0:22). -func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, raddr Addr, err error) { +func parseNetstatAddr(local, remote string, family uint32) (laddr, raddr Addr, err error) { parse := func(l string) (Addr, error) { matches := portMatch.FindStringSubmatch(l) if matches == nil { @@ -117,7 +101,7 @@ func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, r return Addr{}, fmt.Errorf("unknown family, %d", family) } } - lport, err := strconv.Atoi(port) + lport, err := strconv.ParseInt(port, 10, 32) if err != nil { return Addr{}, err } @@ -199,7 +183,7 @@ func hasCorrectInetProto(kind, proto string) bool { return false } -func parseNetstatA(output string, kind string) ([]ConnectionStat, error) { +func parseNetstatA(output, kind string) ([]ConnectionStat, error) { var ret []ConnectionStat lines := strings.Split(string(output), "\n") @@ -209,7 +193,8 @@ func parseNetstatA(output string, kind string) ([]ConnectionStat, error) { continue } - if strings.HasPrefix(fields[0], "f1") { + switch { + case strings.HasPrefix(fields[0], "f1"): // Unix lines if len(fields) < 2 { // every unix connections have two lines @@ -218,12 +203,12 @@ func parseNetstatA(output string, kind string) ([]ConnectionStat, error) { c, err := parseNetstatUnixLine(fields) if err != nil { - return nil, fmt.Errorf("failed to parse Unix Address (%s): %s", line, err) + return nil, fmt.Errorf("failed to parse Unix Address (%s): %w", line, err) } ret = append(ret, c) - } else if strings.HasPrefix(fields[0], "tcp") || strings.HasPrefix(fields[0], "udp") { + case strings.HasPrefix(fields[0], "tcp") || strings.HasPrefix(fields[0], "udp"): // Inet lines if !hasCorrectInetProto(kind, fields[0]) { continue @@ -237,11 +222,11 @@ func parseNetstatA(output string, kind string) ([]ConnectionStat, error) { c, err := parseNetstatNetLine(line) if err != nil { - return nil, fmt.Errorf("failed to parse Inet Address (%s): %s", line, err) + return nil, fmt.Errorf("failed to parse Inet Address (%s): %w", line, err) } ret = append(ret, c) - } else { + default: // Header lines continue } @@ -250,10 +235,6 @@ func parseNetstatA(output string, kind string) ([]ConnectionStat, error) { return ret, nil } -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { args := []string{"-na"} switch strings.ToLower(kind) { @@ -286,45 +267,34 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, return ret, nil } -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError -} - -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) } func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, pid, 0) } func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int, _ bool) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go similarity index 88% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go index 8c34f881c..f7da4ce13 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package net @@ -29,8 +29,8 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, } iocounters = append(iocounters, n) } - if pernic == false { - return getIOCountersAll(iocounters) + if !pernic { + return getIOCountersAll(iocounters), nil } return iocounters, nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go similarity index 89% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go index e3fce9021..834534d34 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go @@ -1,15 +1,15 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && !cgo -// +build aix,!cgo package net import ( "context" - "fmt" + "errors" "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func parseNetstatI(output string) ([]IOCountersStat, error) { @@ -19,7 +19,7 @@ func parseNetstatI(output string) ([]IOCountersStat, error) { // Check first line is header if len(lines) > 0 && strings.Fields(lines[0])[0] != "Name" { - return nil, fmt.Errorf("not a 'netstat -i' output") + return nil, errors.New("not a 'netstat -i' output") } for _, line := range lines[1:] { @@ -88,8 +88,8 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, if err != nil { return nil, err } - if pernic == false { - return getIOCountersAll(iocounters) + if !pernic { + return getIOCountersAll(iocounters), nil } return iocounters, nil } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go b/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go similarity index 77% rename from vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go index 8a7b63744..c47e0c37f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin -// +build darwin package net @@ -12,11 +12,11 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var ( - errNetstatHeader = errors.New("Can't parse header of netstat output") + errNetstatHeader = errors.New("can't parse header of netstat output") netstatLinkRegexp = regexp.MustCompile(`^$`) ) @@ -29,15 +29,14 @@ func parseNetstatLine(line string) (stat *IOCountersStat, linkID *uint, err erro ) if columns[0] == "Name" { - err = errNetstatHeader - return + return nil, nil, errNetstatHeader } // try to extract the numeric value from if subMatch := netstatLinkRegexp.FindStringSubmatch(columns[2]); len(subMatch) == 2 { numericValue, err = strconv.ParseUint(subMatch[1], 10, 64) if err != nil { - return + return nil, nil, err } linkIDUint := uint(numericValue) linkID = &linkIDUint @@ -50,8 +49,7 @@ func parseNetstatLine(line string) (stat *IOCountersStat, linkID *uint, err erro base = 0 } if numberColumns < 11 || numberColumns > 13 { - err = fmt.Errorf("Line %q do have an invalid number of columns %d", line, numberColumns) - return + return nil, nil, fmt.Errorf("line %q do have an invalid number of columns %d", line, numberColumns) } parsed := make([]uint64, 0, 7) @@ -74,7 +72,7 @@ func parseNetstatLine(line string) (stat *IOCountersStat, linkID *uint, err erro } if numericValue, err = strconv.ParseUint(target, 10, 64); err != nil { - return + return nil, nil, err } parsed = append(parsed, numericValue) } @@ -91,7 +89,7 @@ func parseNetstatLine(line string) (stat *IOCountersStat, linkID *uint, err erro if len(parsed) == 7 { stat.Dropout = parsed[6] } - return + return stat, linkID, nil } type netstatInterface struct { @@ -143,8 +141,8 @@ func newMapInterfaceNameUsage(ifaces []netstatInterface) mapInterfaceNameUsage { return output } -func (min mapInterfaceNameUsage) isTruncated() bool { - for _, usage := range min { +func (mapi mapInterfaceNameUsage) isTruncated() bool { + for _, usage := range mapi { if usage > 1 { return true } @@ -152,9 +150,9 @@ func (min mapInterfaceNameUsage) isTruncated() bool { return false } -func (min mapInterfaceNameUsage) notTruncated() []string { +func (mapi mapInterfaceNameUsage) notTruncated() []string { output := make([]string, 0) - for ifaceName, usage := range min { + for ifaceName, usage := range mapi { if usage == 1 { output = append(output, ifaceName) } @@ -162,15 +160,16 @@ func (min mapInterfaceNameUsage) notTruncated() []string { return output } +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + // example of `netstat -ibdnW` output on yosemite // Name Mtu Network Address Ipkts Ierrs Ibytes Opkts Oerrs Obytes Coll Drop // lo0 16384 869107 0 169411755 869107 0 169411755 0 0 // lo0 16384 ::1/128 ::1 869107 - 169411755 869107 - 169411755 - - // lo0 16384 127 127.0.0.1 869107 - 169411755 869107 - 169411755 - - -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { var ( ret []IOCountersStat @@ -247,45 +246,24 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, } } - if pernic == false { - return getIOCountersAll(ret) + if !pernic { + return getIOCountersAll(ret), nil } return ret, nil } -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - -func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { +func IOCountersByFileWithContext(ctx context.Context, pernic bool, _ string) ([]IOCountersStat, error) { return IOCountersWithContext(ctx, pernic) } -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { +func FilterCountersWithContext(_ context.Context) ([]FilterStat, error) { return nil, common.ErrNotImplementedError } -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - -func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { +func ConntrackStatsWithContext(_ context.Context, _ bool) ([]ConntrackStat, error) { return nil, common.ErrNotImplementedError } -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Not Implemented for Darwin -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { +func ProtoCountersWithContext(_ context.Context, _ []string) ([]ProtoCountersStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go b/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go new file mode 100644 index 000000000..29c2a148e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build !aix && !darwin && !linux && !freebsd && !openbsd && !windows && !solaris + +package net + +import ( + "context" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func IOCountersWithContext(_ context.Context, _ bool) ([]IOCountersStat, error) { + return []IOCountersStat{}, common.ErrNotImplementedError +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, _ string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) +} + +func FilterCountersWithContext(_ context.Context) ([]FilterStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConntrackStatsWithContext(_ context.Context, _ bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +func ProtoCountersWithContext(_ context.Context, _ []string) ([]ProtoCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func ConnectionsWithContext(_ context.Context, _ string) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) +} + +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) +} + +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) +} + +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) +} + +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int, _ bool) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go similarity index 57% rename from vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go index bf8baf094..a72aa00a6 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package net @@ -8,11 +8,12 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError } func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { @@ -83,46 +84,25 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, ret = append(ret, n) } - if pernic == false { - return getIOCountersAll(ret) + if !pernic { + return getIOCountersAll(ret), nil } return ret, nil } -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - -func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { - return IOCounters(pernic) -} - -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) +func IOCountersByFileWithContext(ctx context.Context, pernic bool, _ string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) } -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { +func FilterCountersWithContext(_ context.Context) ([]FilterStat, error) { return nil, common.ErrNotImplementedError } -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - -func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { +func ConntrackStatsWithContext(_ context.Context, _ bool) ([]ConntrackStat, error) { return nil, common.ErrNotImplementedError } -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Not Implemented for FreeBSD -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { +func ProtoCountersWithContext(_ context.Context, _ []string) ([]ProtoCountersStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go similarity index 74% rename from vendor/github.com/shirou/gopsutil/v3/net/net_linux.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_linux.go index 6e8ce67fb..f01b04b50 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package net @@ -16,7 +16,7 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) const ( // Conntrack Column numbers @@ -27,37 +27,24 @@ const ( // Conntrack Column numbers ctINVALID ctIGNORE ctDELETE - ctDELETE_LIST + ctDELETE_LIST //nolint:revive //FIXME ctINSERT - ctINSERT_FAILED + ctINSERT_FAILED //nolint:revive //FIXME ctDROP - ctEARLY_DROP - ctICMP_ERROR - CT_EXPEctNEW - ctEXPECT_CREATE - CT_EXPEctDELETE - ctSEARCH_RESTART + ctEARLY_DROP //nolint:revive //FIXME + ctICMP_ERROR //nolint:revive //FIXME + CT_EXPEctNEW //nolint:revive //FIXME + ctEXPECT_CREATE //nolint:revive //FIXME + CT_EXPEctDELETE //nolint:revive //FIXME + ctSEARCH_RESTART //nolint:revive //FIXME ) -// NetIOCounters returns network I/O statistics for every network -// interface installed on the system. If pernic argument is false, -// return only sum of all information (which name is 'all'). If true, -// every network interface installed on the system is returned -// separately. -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { filename := common.HostProcWithContext(ctx, "net/dev") return IOCountersByFileWithContext(ctx, pernic, filename) } -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - -func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { +func IOCountersByFileWithContext(_ context.Context, pernic bool, filename string) ([]IOCountersStat, error) { lines, err := common.ReadLines(filename) if err != nil { return nil, err @@ -141,7 +128,7 @@ func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename stri } if !pernic { - return getIOCountersAll(ret) + return getIOCountersAll(ret), nil } return ret, nil @@ -156,15 +143,6 @@ var netProtocols = []string{ "udplite", } -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Available protocols: -// [ip,icmp,icmpmsg,tcp,udp,udplite] -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { if len(protocols) == 0 { protocols = netProtocols @@ -221,13 +199,6 @@ func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoC return stats, nil } -// NetFilterCounters returns iptables conntrack statistics -// the currently in use conntrack count and the max. -// If the file does not exist or is invalid it will return nil. -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { countfile := common.HostProcWithContext(ctx, "sys/net/netfilter/nf_conntrack_count") maxfile := common.HostProcWithContext(ctx, "sys/net/netfilter/nf_conntrack_max") @@ -238,25 +209,20 @@ func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { } stats := make([]FilterStat, 0, 1) - max, err := common.ReadInts(maxfile) + maxConn, err := common.ReadInts(maxfile) if err != nil { return nil, err } payload := FilterStat{ ConnTrackCount: count[0], - ConnTrackMax: max[0], + ConnTrackMax: maxConn[0], } stats = append(stats, payload) return stats, nil } -// ConntrackStats returns more detailed info about the conntrack table -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - // ConntrackStatsWithContext returns more detailed info about the conntrack table func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { return conntrackStatsFromFile(common.HostProcWithContext(ctx, "net/stat/nf_conntrack"), percpu) @@ -385,47 +351,20 @@ type connTmp struct { path string } -// Return a list of network connections opened. -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsPidWithContext(ctx, kind, 0) } -// Return a list of network connections opened returning at most `max` -// connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(ctx, kind, 0, max) -} - -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) } func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) -} - -// Return a list of network connections opened by a process. -func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithContext(context.Background(), kind, pid) -} - -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { @@ -436,24 +375,15 @@ func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -// Return up to `max` network connections opened by a process. -func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) -} - -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) -} - -func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max, false) +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max, true) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int, skipUids bool) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int, skipUids bool) ([]ConnectionStat, error) { tmap, ok := netConnectionKindMap[kind] if !ok { return nil, fmt.Errorf("invalid kind, %s", kind) @@ -462,24 +392,20 @@ func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, p var err error var inodes map[string][]inodeMap if pid == 0 { - inodes, err = getProcInodesAllWithContext(ctx, root, max) + inodes, err = getProcInodesAllWithContext(ctx, root, maxConn) } else { - inodes, err = getProcInodes(root, pid, max) + inodes, err = getProcInodes(root, pid, maxConn) if len(inodes) == 0 { // no connection for the pid return []ConnectionStat{}, nil } } if err != nil { - return nil, fmt.Errorf("cound not get pid(s), %d: %w", pid, err) + return nil, fmt.Errorf("could not get pid(s), %d: %w", pid, err) } return statsFromInodesWithContext(ctx, root, pid, tmap, inodes, skipUids) } -func statsFromInodes(root string, pid int32, tmap []netConnectionKindType, inodes map[string][]inodeMap, skipUids bool) ([]ConnectionStat, error) { - return statsFromInodesWithContext(context.Background(), root, pid, tmap, inodes, skipUids) -} - func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tmap []netConnectionKindType, inodes map[string][]inodeMap, skipUids bool) ([]ConnectionStat, error) { dupCheckMap := make(map[string]struct{}) var ret []ConnectionStat @@ -496,7 +422,7 @@ func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tma } switch t.family { case syscall.AF_INET, syscall.AF_INET6: - ls, err = processInetWithContext(ctx, path, t, inodes, pid) + ls, err = processInet(path, t, inodes, pid) case syscall.AF_UNIX: ls, err = processUnix(path, t, inodes, pid) } @@ -543,7 +469,7 @@ func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tma } // getProcInodes returns fd of the pid. -func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, error) { +func getProcInodes(root string, pid int32, maxConn int) (map[string][]inodeMap, error) { ret := make(map[string][]inodeMap) dir := fmt.Sprintf("%s/%d/fd", root, pid) @@ -552,7 +478,7 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro return ret, err } defer f.Close() - dirEntries, err := readDir(f, max) + dirEntries, err := f.ReadDir(maxConn) if err != nil { return ret, err } @@ -573,7 +499,7 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro if !ok { ret[inode] = make([]inodeMap, 0) } - fd, err := strconv.Atoi(dirEntry.Name()) + fd, err := strconv.ParseInt(dirEntry.Name(), 10, 32) if err != nil { continue } @@ -587,14 +513,6 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro return ret, nil } -// Pids retunres all pids. -// Note: this is a copy of process_linux.Pids() -// FIXME: Import process occures import cycle. -// move to common made other platform breaking. Need consider. -func Pids() ([]int32, error) { - return PidsWithContext(context.Background()) -} - func PidsWithContext(ctx context.Context) ([]int32, error) { var ret []int32 @@ -622,7 +540,7 @@ func PidsWithContext(ctx context.Context) ([]int32, error) { // Note: the following is based off process_linux structs and methods // we need these to fetch the owner of a process ID -// FIXME: Import process occures import cycle. +// FIXME: Import process occurs import cycle. // see remarks on pids() type process struct { Pid int32 `json:"pid"` @@ -653,8 +571,7 @@ func (p *process) fillFromStatus(ctx context.Context) error { continue } value := tabParts[1] - switch strings.TrimRight(tabParts[0], ":") { - case "Uid": + if strings.TrimRight(tabParts[0], ":") == "Uid" { p.uids = make([]int32, 0, 4) for _, i := range strings.Split(value, "\t") { v, err := strconv.ParseInt(i, 10, 32) @@ -668,11 +585,7 @@ func (p *process) fillFromStatus(ctx context.Context) error { return nil } -func getProcInodesAll(root string, max int) (map[string][]inodeMap, error) { - return getProcInodesAllWithContext(context.Background(), root, max) -} - -func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map[string][]inodeMap, error) { +func getProcInodesAllWithContext(ctx context.Context, root string, maxConn int) (map[string][]inodeMap, error) { pids, err := PidsWithContext(ctx) if err != nil { return nil, err @@ -680,7 +593,7 @@ func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map ret := make(map[string][]inodeMap) for _, pid := range pids { - t, err := getProcInodes(root, pid, max) + t, err := getProcInodes(root, pid, maxConn) if err != nil { // skip if permission error or no longer exists if os.IsPermission(err) || os.IsNotExist(err) || errors.Is(err, io.EOF) { @@ -702,10 +615,6 @@ func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map // "0500000A:0016" -> "10.0.0.5", 22 // "0085002452100113070057A13F025401:0035" -> "2400:8500:1301:1052:a157:7:154:23f", 53 func decodeAddress(family uint32, src string) (Addr, error) { - return decodeAddressWithContext(context.Background(), family, src) -} - -func decodeAddressWithContext(ctx context.Context, family uint32, src string) (Addr, error) { t := strings.Split(src, ":") if len(t) != 2 { return Addr{}, fmt.Errorf("does not contain port, %s", src) @@ -723,12 +632,12 @@ func decodeAddressWithContext(ctx context.Context, family uint32, src string) (A if family == syscall.AF_INET { if common.IsLittleEndian() { - ip = net.IP(ReverseWithContext(ctx, decoded)) + ip = net.IP(Reverse(decoded)) } else { ip = net.IP(decoded) } } else { // IPv6 - ip, err = parseIPv6HexStringWithContext(ctx, decoded) + ip, err = parseIPv6HexString(decoded) if err != nil { return Addr{}, err } @@ -739,12 +648,7 @@ func decodeAddressWithContext(ctx context.Context, family uint32, src string) (A }, nil } -// Reverse reverses array of bytes. func Reverse(s []byte) []byte { - return ReverseWithContext(context.Background(), s) -} - -func ReverseWithContext(ctx context.Context, s []byte) []byte { for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { s[i], s[j] = s[j], s[i] } @@ -753,27 +657,19 @@ func ReverseWithContext(ctx context.Context, s []byte) []byte { // parseIPv6HexString parse array of bytes to IPv6 string func parseIPv6HexString(src []byte) (net.IP, error) { - return parseIPv6HexStringWithContext(context.Background(), src) -} - -func parseIPv6HexStringWithContext(ctx context.Context, src []byte) (net.IP, error) { if len(src) != 16 { - return nil, fmt.Errorf("invalid IPv6 string") + return nil, errors.New("invalid IPv6 string") } buf := make([]byte, 0, 16) for i := 0; i < len(src); i += 4 { - r := ReverseWithContext(ctx, src[i:i+4]) + r := Reverse(src[i : i+4]) buf = append(buf, r...) } return net.IP(buf), nil } func processInet(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { - return processInetWithContext(context.Background(), file, kind, inodes, filterPid) -} - -func processInetWithContext(ctx context.Context, file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { if strings.HasSuffix(file, "6") && !common.PathExists(file) { // IPv6 not supported, return empty. return []connTmp{}, nil @@ -816,11 +712,11 @@ func processInetWithContext(ctx context.Context, file string, kind netConnection } else { status = "NONE" } - la, err := decodeAddressWithContext(ctx, kind.family, laddr) + la, err := decodeAddress(kind.family, laddr) if err != nil { continue } - ra, err := decodeAddressWithContext(ctx, kind.family, raddr) + ra, err := decodeAddress(kind.family, raddr) if err != nil { continue } @@ -858,7 +754,7 @@ func processUnix(file string, kind netConnectionKindType, inodes map[string][]in if len(tokens) < 6 { continue } - st, err := strconv.Atoi(tokens[4]) + st, err := strconv.ParseInt(tokens[4], 10, 32) if err != nil { return nil, err } @@ -897,7 +793,7 @@ func processUnix(file string, kind netConnectionKindType, inodes map[string][]in return ret, nil } -func updateMap(src map[string][]inodeMap, add map[string][]inodeMap) map[string][]inodeMap { +func updateMap(src, add map[string][]inodeMap) map[string][]inodeMap { for key, value := range add { a, exists := src[key] if !exists { diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go similarity index 65% rename from vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go index cf48f53e7..55087ce37 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package net @@ -12,20 +12,21 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var portMatch = regexp.MustCompile(`(.*)\.(\d+)$`) func ParseNetstat(output string, mode string, - iocs map[string]IOCountersStat) error { + iocs map[string]IOCountersStat, +) error { lines := strings.Split(output, "\n") exists := make([]string, 0, len(lines)-1) - columns := 6 - if mode == "ind" { - columns = 10 + columns := 9 + if mode == "inb" { + columns = 6 } for _, line := range lines { values := strings.Fields(line) @@ -48,18 +49,23 @@ func ParseNetstat(output string, mode string, parsed := make([]uint64, 0, 8) var vv []string - if mode == "inb" { + switch mode { + case "inb": vv = []string{ values[base+3], // BytesRecv values[base+4], // BytesSent } - } else { + case "ind": vv = []string{ values[base+3], // Ipkts - values[base+4], // Ierrs + values[base+4], // Idrop values[base+5], // Opkts + values[base+6], // Odrops + } + case "ine": + vv = []string{ + values[base+4], // Ierrs values[base+6], // Oerrs - values[base+8], // Drops } } for _, target := range vv { @@ -80,16 +86,19 @@ func ParseNetstat(output string, mode string, if !present { n = IOCountersStat{Name: values[0]} } - if mode == "inb" { + + switch mode { + case "inb": n.BytesRecv = parsed[0] n.BytesSent = parsed[1] - } else { + case "ind": n.PacketsRecv = parsed[0] - n.Errin = parsed[1] + n.Dropin = parsed[1] n.PacketsSent = parsed[2] - n.Errout = parsed[3] - n.Dropin = parsed[4] - n.Dropout = parsed[4] + n.Dropout = parsed[3] + case "ine": + n.Errin = parsed[0] + n.Errout = parsed[1] } iocs[n.Name] = n @@ -97,8 +106,9 @@ func ParseNetstat(output string, mode string, return nil } -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError } func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { @@ -114,6 +124,10 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, if err != nil { return nil, err } + out3, err := invoke.CommandWithContext(ctx, netstat, "-ine") + if err != nil { + return nil, err + } iocs := make(map[string]IOCountersStat) lines := strings.Split(string(out), "\n") @@ -127,52 +141,35 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, if err != nil { return nil, err } + err = ParseNetstat(string(out3), "ine", iocs) + if err != nil { + return nil, err + } for _, ioc := range iocs { ret = append(ret, ioc) } - if pernic == false { - return getIOCountersAll(ret) + if !pernic { + return getIOCountersAll(ret), nil } return ret, nil } -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - -func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { - return IOCounters(pernic) +func IOCountersByFileWithContext(ctx context.Context, pernic bool, _ string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) } -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { +func FilterCountersWithContext(_ context.Context) ([]FilterStat, error) { return nil, common.ErrNotImplementedError } -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - -func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { +func ConntrackStatsWithContext(_ context.Context, _ bool) ([]ConntrackStat, error) { return nil, common.ErrNotImplementedError } -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Not Implemented for OpenBSD -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { +func ProtoCountersWithContext(_ context.Context, _ []string) ([]ProtoCountersStat, error) { return nil, common.ErrNotImplementedError } @@ -220,7 +217,7 @@ func parseNetstatLine(line string) (ConnectionStat, error) { return n, nil } -func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, raddr Addr, err error) { +func parseNetstatAddr(local, remote string, family uint32) (laddr, raddr Addr, err error) { parse := func(l string) (Addr, error) { matches := portMatch.FindStringSubmatch(l) if matches == nil { @@ -238,7 +235,7 @@ func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, r return Addr{}, fmt.Errorf("unknown family, %d", family) } } - lport, err := strconv.Atoi(port) + lport, err := strconv.ParseInt(port, 10, 32) if err != nil { return Addr{}, err } @@ -256,11 +253,6 @@ func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, r return laddr, raddr, err } -// Return a list of network connections opened. -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { var ret []ConnectionStat @@ -268,11 +260,7 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, switch strings.ToLower(kind) { default: fallthrough - case "": - fallthrough - case "all": - fallthrough - case "inet": + case "", "all", "inet": // nothing to add case "inet4": args = append(args, "-finet") @@ -304,7 +292,7 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, } lines := strings.Split(string(out), "\n") for _, line := range lines { - if !(strings.HasPrefix(line, "tcp") || strings.HasPrefix(line, "udp")) { + if !strings.HasPrefix(line, "tcp") && !strings.HasPrefix(line, "udp") { continue } n, err := parseNetstatLine(line) @@ -317,3 +305,35 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, return ret, nil } + +func ConnectionsPidWithContext(_ context.Context, _ string, _ int32) ([]ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConnectionsMaxWithContext(_ context.Context, _ string, _ int) ([]ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConnectionsPidMaxWithContext(_ context.Context, _ string, _ int32, _ int) ([]ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) +} + +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) +} + +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn) +} + +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int) ([]ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go b/vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go similarity index 58% rename from vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go index 79d8ac30e..df067806c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go @@ -1,28 +1,20 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build solaris -// +build solaris package net import ( "context" + "errors" "fmt" "regexp" "runtime" "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) -// NetIOCounters returnes network I/O statistics for every network -// interface installed on the system. If pernic argument is false, -// return only sum of all information (which name is 'all'). If true, -// every network interface installed on the system is returned -// separately. -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - var kstatSplit = regexp.MustCompile(`[:\s]+`) func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { @@ -38,7 +30,7 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, lines := strings.Split(strings.TrimSpace(string(kstatSysOut)), "\n") if len(lines) == 0 { - return nil, fmt.Errorf("no interface found") + return nil, errors.New("no interface found") } rbytes64arr := make(map[string]uint64) ipackets64arr := make(map[string]uint64) @@ -113,32 +105,65 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, } if !pernic { - return getIOCountersAll(ret) + return getIOCountersAll(ret), nil } return ret, nil } -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) +func IOCountersByFileWithContext(ctx context.Context, pernic bool, _ string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) +} + +func FilterCountersWithContext(_ context.Context) ([]FilterStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConntrackStatsWithContext(_ context.Context, _ bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +func ProtoCountersWithContext(_ context.Context, _ []string) ([]ProtoCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError } -func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { +func ConnectionsWithContext(_ context.Context, _ string) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) } -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { - return []FilterStat{}, common.ErrNotImplementedError +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { - return []ProtoCountersStat{}, common.ErrNotImplementedError +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) +} + +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int, _ bool) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go b/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go similarity index 61% rename from vendor/github.com/shirou/gopsutil/v3/net/net_unix.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_unix.go index cb846e28a..7c5153d30 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || darwin -// +build freebsd darwin package net @@ -11,33 +11,17 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) -// Return a list of network connections opened. -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) -} - func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsPidWithContext(ctx, kind, 0) } -// Return a list of network connections opened returning at most `max` -// connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { +func ConnectionsMaxWithContext(_ context.Context, _ string, _ int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } -// Return a list of network connections opened by a process. -func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithContext(context.Background(), kind, pid) -} - func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { var ret []ConnectionStat @@ -45,11 +29,7 @@ func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]C switch strings.ToLower(kind) { default: fallthrough - case "": - fallthrough - case "all": - fallthrough - case "inet": + case "", "all", "inet": args = append(args, "tcp", "-i", "udp") case "inet4": args = append(args, "4") @@ -109,11 +89,11 @@ func parseNetLine(line string) (ConnectionStat, error) { f[7] = "unix" } - pid, err := strconv.Atoi(f[1]) + pid, err := strconv.ParseInt(f[1], 10, 32) if err != nil { return ConnectionStat{}, err } - fd, err := strconv.Atoi(strings.Trim(f[3], "u")) + fd, err := strconv.ParseInt(strings.Trim(f[3], "u"), 10, 32) if err != nil { return ConnectionStat{}, fmt.Errorf("unknown fd, %s", f[3]) } @@ -151,13 +131,13 @@ func parseNetLine(line string) (ConnectionStat, error) { return n, nil } -func parseNetAddr(line string) (laddr Addr, raddr Addr, err error) { +func parseNetAddr(line string) (laddr, raddr Addr, err error) { parse := func(l string) (Addr, error) { host, port, err := net.SplitHostPort(l) if err != nil { return Addr{}, fmt.Errorf("wrong addr, %s", l) } - lport, err := strconv.Atoi(port) + lport, err := strconv.ParseInt(port, 10, 32) if err != nil { return Addr{}, err } @@ -179,46 +159,26 @@ func parseNetAddr(line string) (laddr Addr, raddr Addr, err error) { return laddr, raddr, err } -// Return up to `max` network connections opened by a process. -func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) -} - -func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func ConnectionsPidMaxWithContext(_ context.Context, _ string, _ int32, _ int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) -} - func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) -} - -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) -} - -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go b/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go similarity index 80% rename from vendor/github.com/shirou/gopsutil/v3/net/net_windows.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_windows.go index 5d384342f..962289699 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go @@ -1,18 +1,20 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package net import ( "context" + "errors" "fmt" "net" "os" "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/windows" + + "github.com/shirou/gopsutil/v4/internal/common" ) var ( @@ -94,7 +96,7 @@ const ( type mibIfRow2 struct { InterfaceLuid uint64 InterfaceIndex uint32 - InterfaceGuid guid + InterfaceGuid guid //nolint:revive //FIXME Alias [maxStringSize + 1]uint16 Description [maxStringSize + 1]uint16 PhysicalAddressLength uint32 @@ -111,7 +113,7 @@ type mibIfRow2 struct { OperStatus uint32 AdminStatus uint32 MediaConnectState uint32 - NetworkGuid guid + NetworkGuid guid //nolint:revive //FIXME ConnectionType uint32 padding1 [pad0for64_4for32]byte TransmitLinkSpeed uint64 @@ -136,11 +138,7 @@ type mibIfRow2 struct { OutQLen uint64 } -func IOCounters(pernic bool) ([]IOCountersStat, error) { - return IOCountersWithContext(context.Background(), pernic) -} - -func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { +func IOCountersWithContext(_ context.Context, pernic bool) ([]IOCountersStat, error) { ifs, err := net.Interfaces() if err != nil { return nil, err @@ -195,38 +193,20 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, } if !pernic { - return getIOCountersAll(counters) + return getIOCountersAll(counters), nil } return counters, nil } -// IOCountersByFile exists just for compatibility with Linux. -func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { - return IOCountersByFileWithContext(context.Background(), pernic, filename) -} - -func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { - return IOCounters(pernic) -} - -// Return a list of network connections -// Available kind: -// -// reference to netConnectionKindMap -func Connections(kind string) ([]ConnectionStat, error) { - return ConnectionsWithContext(context.Background(), kind) +func IOCountersByFileWithContext(ctx context.Context, pernic bool, _ string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) } func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsPidWithContext(ctx, kind, 0) } -// ConnectionsPid Return a list of network connections opened by a process -func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithContext(context.Background(), kind, pid) -} - -func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { +func ConnectionsPidWithContext(_ context.Context, kind string, pid int32) ([]ConnectionStat, error) { tmap, ok := netConnectionKindMap[kind] if !ok { return nil, fmt.Errorf("invalid kind, %s", kind) @@ -260,7 +240,7 @@ func getProcInet(kinds []netConnectionKindType, pid int32) ([]ConnectionStat, er func getNetStatWithKind(kindType netConnectionKindType) ([]ConnectionStat, error) { if kindType.filename == "" { - return nil, fmt.Errorf("kind filename must be required") + return nil, errors.New("kind filename must be required") } switch kindType.filename { @@ -277,76 +257,48 @@ func getNetStatWithKind(kindType netConnectionKindType) ([]ConnectionStat, error return nil, fmt.Errorf("invalid kind filename, %s", kindType.filename) } -// Return a list of network connections opened returning at most `max` -// connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) -} - -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return []ConnectionStat{}, common.ErrNotImplementedError +// Deprecated: use process.PidsWithContext instead +func PidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError } -// Return a list of network connections opened, omitting `Uids`. -// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be -// removed from the API in the future. -func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { - return ConnectionsWithoutUidsWithContext(context.Background(), kind) +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) } func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) -} - -func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { - return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(_ context.Context, _ string, _ int32, _ int, _ bool) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } -func FilterCounters() ([]FilterStat, error) { - return FilterCountersWithContext(context.Background()) -} - -func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { +func FilterCountersWithContext(_ context.Context) ([]FilterStat, error) { return nil, common.ErrNotImplementedError } -func ConntrackStats(percpu bool) ([]ConntrackStat, error) { - return ConntrackStatsWithContext(context.Background(), percpu) -} - -func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { +func ConntrackStatsWithContext(_ context.Context, _ bool) ([]ConntrackStat, error) { return nil, common.ErrNotImplementedError } -// ProtoCounters returns network statistics for the entire system -// If protocols is empty then all protocols are returned, otherwise -// just the protocols in the list are returned. -// Not Implemented for Windows -func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { - return ProtoCountersWithContext(context.Background(), protocols) -} - -func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { +func ProtoCountersWithContext(_ context.Context, _ []string) ([]ProtoCountersStat, error) { return nil, common.ErrNotImplementedError } @@ -376,7 +328,7 @@ func getTableUintptr(family uint32, buf []byte) uintptr { return p } -func getTableInfo(filename string, table interface{}) (index, step, length int) { +func getTableInfo(filename string, table any) (index, step, length int) { switch filename { case kindTCP4.filename: index = int(unsafe.Sizeof(table.(pmibTCPTableOwnerPidAll).DwNumEntries)) @@ -410,7 +362,7 @@ func getTCPConnections(family uint32) ([]ConnectionStat, error) { ) if family == 0 { - return nil, fmt.Errorf("faimly must be required") + return nil, errors.New("faimly must be required") } for { @@ -431,7 +383,7 @@ func getTCPConnections(family uint32) ([]ConnectionStat, error) { } } - err := getExtendedTcpTable(p, + err := getExtendedTCPTable(p, &size, true, family, @@ -440,7 +392,7 @@ func getTCPConnections(family uint32) ([]ConnectionStat, error) { if err == nil { break } - if err != windows.ERROR_INSUFFICIENT_BUFFER { + if !errors.Is(err, windows.ERROR_INSUFFICIENT_BUFFER) { return nil, err } buf = make([]byte, size) @@ -491,7 +443,7 @@ func getUDPConnections(family uint32) ([]ConnectionStat, error) { ) if family == 0 { - return nil, fmt.Errorf("faimly must be required") + return nil, errors.New("faimly must be required") } for { @@ -512,7 +464,7 @@ func getUDPConnections(family uint32) ([]ConnectionStat, error) { } } - err := getExtendedUdpTable( + err := getExtendedUDPTable( p, &size, true, @@ -523,7 +475,7 @@ func getUDPConnections(family uint32) ([]ConnectionStat, error) { if err == nil { break } - if err != windows.ERROR_INSUFFICIENT_BUFFER { + if !errors.Is(err, windows.ERROR_INSUFFICIENT_BUFFER) { return nil, err } buf = make([]byte, size) @@ -576,16 +528,16 @@ var tcpStatuses = map[mibTCPState]string{ 12: "DELETE", } -func getExtendedTcpTable(pTcpTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass tcpTableClass, reserved uint32) (errcode error) { - r1, _, _ := syscall.Syscall6(procGetExtendedTCPTable.Addr(), 6, pTcpTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved)) +func getExtendedTCPTable(pTCPTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass tcpTableClass, reserved uint32) (errcode error) { + r1, _, _ := syscall.Syscall6(procGetExtendedTCPTable.Addr(), 6, pTCPTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved)) if r1 != 0 { errcode = syscall.Errno(r1) } return } -func getExtendedUdpTable(pUdpTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass udpTableClass, reserved uint32) (errcode error) { - r1, _, _ := syscall.Syscall6(procGetExtendedUDPTable.Addr(), 6, pUdpTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved)) +func getExtendedUDPTable(pUDPTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass udpTableClass, reserved uint32) (errcode error) { + r1, _, _ := syscall.Syscall6(procGetExtendedUDPTable.Addr(), 6, pUDPTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved)) if r1 != 0 { errcode = syscall.Errno(r1) } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process.go b/vendor/github.com/shirou/gopsutil/v4/process/process.go similarity index 87% rename from vendor/github.com/shirou/gopsutil/v3/process/process.go rename to vendor/github.com/shirou/gopsutil/v4/process/process.go index 1a7fe1b80..0bd4d9e1a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package process import ( @@ -9,15 +10,15 @@ import ( "sync" "time" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/mem" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/mem" + "github.com/shirou/gopsutil/v4/net" ) var ( invoke common.Invoker = common.Invoke{} - ErrorNoChildren = errors.New("process does not have children") + ErrorNoChildren = errors.New("process does not have children") // Deprecated: ErrorNoChildren is never returned by process.Children(), check its returned []*Process slice length instead ErrorProcessNotRunning = errors.New("process does not exist") ErrorNotPermitted = errors.New("operation not permitted") ) @@ -29,9 +30,9 @@ type Process struct { parent int32 parentMutex sync.RWMutex // for windows ppid cache numCtxSwitches *NumCtxSwitchesStat - uids []int32 - gids []int32 - groups []int32 + uids []uint32 + gids []uint32 + groups []uint32 numThreads int32 memInfo *MemoryInfoStat sigInfo *SignalInfoStat @@ -102,10 +103,18 @@ type RlimitStat struct { } type IOCountersStat struct { - ReadCount uint64 `json:"readCount"` + // ReadCount is a number of read I/O operations such as syscalls. + ReadCount uint64 `json:"readCount"` + // WriteCount is a number of read I/O operations such as syscalls. WriteCount uint64 `json:"writeCount"` - ReadBytes uint64 `json:"readBytes"` + // ReadBytes is a number of all I/O read in bytes. This includes disk I/O on Linux and Windows. + ReadBytes uint64 `json:"readBytes"` + // WriteBytes is a number of all I/O write in bytes. This includes disk I/O on Linux and Windows. WriteBytes uint64 `json:"writeBytes"` + // DiskReadBytes is a number of disk I/O write in bytes. Currently only Linux has this value. + DiskReadBytes uint64 `json:"diskReadBytes"` + // DiskWriteBytes is a number of disk I/O read in bytes. Currently only Linux has this value. + DiskWriteBytes uint64 `json:"diskWriteBytes"` } type NumCtxSwitchesStat struct { @@ -171,6 +180,13 @@ func (p NumCtxSwitchesStat) String() string { return string(s) } +var enableBootTimeCache bool + +// EnableBootTimeCache change cache behavior of BootTime. If true, cache BootTime value. Default is false. +func EnableBootTimeCache(enable bool) { + enableBootTimeCache = enable +} + // Pids returns a slice of process ID list which are running now. func Pids() ([]int32, error) { return PidsWithContext(context.Background()) @@ -253,13 +269,11 @@ func (p *Process) PercentWithContext(ctx context.Context, interval time.Duration if err != nil { return 0, err } - } else { - if p.lastCPUTimes == nil { - // invoked first time - p.lastCPUTimes = cpuTimes - p.lastCPUTime = now - return 0, nil - } + } else if p.lastCPUTimes == nil { + // invoked first time + p.lastCPUTimes = cpuTimes + p.lastCPUTime = now + return 0, nil } numcpu := runtime.NumCPU() @@ -309,9 +323,13 @@ func calculatePercent(t1, t2 *cpu.TimesStat, delta float64, numcpu int) float64 if delta == 0 { return 0 } - delta_proc := t2.Total() - t1.Total() - overall_percent := ((delta_proc / delta) * 100) * float64(numcpu) - return overall_percent + // https://github.com/giampaolo/psutil/blob/c034e6692cf736b5e87d14418a8153bb03f6cf42/psutil/__init__.py#L1064 + deltaProc := (t2.User - t1.User) + (t2.System - t1.System) + if deltaProc <= 0 { + return 0 + } + overallPercent := ((deltaProc / delta) * 100) * float64(numcpu) + return overallPercent } // MemoryPercent returns how many percent of the total RAM this process uses @@ -341,7 +359,7 @@ func (p *Process) CPUPercent() (float64, error) { } func (p *Process) CPUPercentWithContext(ctx context.Context) (float64, error) { - crt_time, err := p.createTimeWithContext(ctx) + createTime, err := p.createTimeWithContext(ctx) if err != nil { return 0, err } @@ -351,7 +369,7 @@ func (p *Process) CPUPercentWithContext(ctx context.Context) (float64, error) { return 0, err } - created := time.Unix(0, crt_time*int64(time.Millisecond)) + created := time.Unix(0, createTime*int64(time.Millisecond)) totalTime := time.Since(created).Seconds() if totalTime <= 0 { return 0, nil @@ -361,7 +379,7 @@ func (p *Process) CPUPercentWithContext(ctx context.Context) (float64, error) { } // Groups returns all group IDs(include supplementary groups) of the process as a slice of the int -func (p *Process) Groups() ([]int32, error) { +func (p *Process) Groups() ([]uint32, error) { return p.GroupsWithContext(context.Background()) } @@ -388,6 +406,11 @@ func (p *Process) Cmdline() (string, error) { // CmdlineSlice returns the command line arguments of the process as a slice with each // element being an argument. +// +// On Windows, this assumes the command line is encoded according to the convention accepted by +// [golang.org/x/sys/windows.CmdlineToArgv] (the most common convention). If this is not suitable, +// you should instead use [Process.Cmdline] and parse the command line according to your specific +// requirements. func (p *Process) CmdlineSlice() ([]string, error) { return p.CmdlineSliceWithContext(context.Background()) } @@ -426,12 +449,12 @@ func (p *Process) Foreground() (bool, error) { } // Uids returns user ids of the process as a slice of the int -func (p *Process) Uids() ([]int32, error) { +func (p *Process) Uids() ([]uint32, error) { return p.UidsWithContext(context.Background()) } // Gids returns group ids of the process as a slice of the int -func (p *Process) Gids() ([]int32, error) { +func (p *Process) Gids() ([]uint32, error) { return p.GidsWithContext(context.Background()) } @@ -531,8 +554,8 @@ func (p *Process) Connections() ([]net.ConnectionStat, error) { } // ConnectionsMax returns a slice of net.ConnectionStat used by the process at most `max`. -func (p *Process) ConnectionsMax(max int) ([]net.ConnectionStat, error) { - return p.ConnectionsMaxWithContext(context.Background(), max) +func (p *Process) ConnectionsMax(maxConn int) ([]net.ConnectionStat, error) { + return p.ConnectionsMaxWithContext(context.Background(), maxConn) } // MemoryMaps get memory maps from /proc/(pid)/smaps diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go new file mode 100644 index 000000000..1a58c3eca --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin || freebsd || openbsd + +package process + +import ( + "bytes" + "context" + "encoding/binary" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" +) + +type MemoryInfoExStat struct{} + +type MemoryMapsStat struct{} + +func (p *Process) TgidWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) IOniceWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) RlimitWithContext(_ context.Context) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) RlimitUsageWithContext(_ context.Context, _ bool) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumCtxSwitchesWithContext(_ context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumFDsWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) CPUAffinityWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoExWithContext(_ context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaultsWithContext(_ context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) OpenFilesWithContext(_ context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryMapsWithContext(_ context.Context, _ bool) (*[]MemoryMapsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ThreadsWithContext(_ context.Context) (map[int32]*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) EnvironWithContext(_ context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} + +func parseKinfoProc(buf []byte) (KinfoProc, error) { + var k KinfoProc + br := bytes.NewReader(buf) + err := common.Read(br, binary.LittleEndian, &k) + return k, err +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go new file mode 100644 index 000000000..91f393203 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go @@ -0,0 +1,483 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package process + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" +) + +// copied from sys/sysctl.h +const ( + CTLKern = 1 // "high kernel": proc, limits + KernProc = 14 // struct: process entries + KernProcPID = 1 // by process id + KernProcProc = 8 // only return procs + KernProcAll = 0 // everything + KernProcPathname = 12 // path to executable +) + +type _Ctype_struct___0 struct { //nolint:revive //FIXME + Pad uint64 +} + +func pidsWithContext(_ context.Context) ([]int32, error) { + var ret []int32 + + kprocs, err := unix.SysctlKinfoProcSlice("kern.proc.all") + if err != nil { + return ret, err + } + + for i := range kprocs { + proc := &kprocs[i] + ret = append(ret, int32(proc.Proc.P_pid)) + } + + return ret, nil +} + +func (p *Process) PpidWithContext(_ context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Eproc.Ppid, nil +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + name := common.ByteToString(k.Proc.P_comm[:]) + + if len(name) >= 15 { + cmdName, err := p.cmdNameWithContext(ctx) + if err != nil { + return "", err + } + if cmdName != "" { + extendedName := filepath.Base(cmdName) + if strings.HasPrefix(extendedName, p.name) { + name = extendedName + } + } + } + + return name, nil +} + +func (p *Process) createTimeWithContext(_ context.Context) (int64, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Proc.P_starttime.Sec*1000 + int64(k.Proc.P_starttime.Usec)/1000, nil +} + +func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { + r, err := callPsWithContext(ctx, "state", p.Pid, false, false) + if err != nil { + return []string{""}, err + } + status := convertStatusChar(r[0][0][0:1]) + return []string{status}, err +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid))) + if err != nil { + return false, err + } + return strings.IndexByte(string(out), '+') != -1, nil +} + +func (p *Process) UidsWithContext(_ context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + // See: http://unix.superglobalmegacorp.com/Net2/newsrc/sys/ucred.h.html + userEffectiveUID := uint32(k.Eproc.Ucred.Uid) + + return []uint32{userEffectiveUID}, nil +} + +func (p *Process) GidsWithContext(_ context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Eproc.Pcred.P_rgid), uint32(k.Eproc.Pcred.P_rgid), uint32(k.Eproc.Pcred.P_svgid)) + + return gids, nil +} + +func (p *Process) GroupsWithContext(_ context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError + // k, err := p.getKProc() + // if err != nil { + // return nil, err + // } + + // groups := make([]int32, k.Eproc.Ucred.Ngroups) + // for i := int16(0); i < k.Eproc.Ucred.Ngroups; i++ { + // groups[i] = int32(k.Eproc.Ucred.Groups[i]) + // } + + // return groups, nil +} + +func (p *Process) TerminalWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError + /* + k, err := p.getKProc() + if err != nil { + return "", err + } + + ttyNr := uint64(k.Eproc.Tdev) + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + + return termmap[ttyNr], nil + */ +} + +func (p *Process) NiceWithContext(_ context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + return int32(k.Proc.P_nice), nil +} + +func (p *Process) IOCountersWithContext(_ context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + procs, err := ProcessesWithContext(ctx) + if err != nil { + return nil, nil + } + ret := make([]*Process, 0, len(procs)) + for _, proc := range procs { + ppid, err := proc.PpidWithContext(ctx) + if err != nil { + continue + } + if ppid == p.Pid { + ret = append(ret, proc) + } + } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) + return ret, nil +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return net.ConnectionsPidWithContext(ctx, "all", p.Pid) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, err + } + + for _, pid := range pids { + p, err := NewProcessWithContext(ctx, pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +// Returns a proc as defined here: +// http://unix.superglobalmegacorp.com/Net2/newsrc/sys/kinfo_proc.h.html +func (p *Process) getKProc() (*unix.KinfoProc, error) { + return unix.SysctlKinfoProc("kern.proc.pid", int(p.Pid)) +} + +// call ps command. +// Return value deletes Header line(you must not input wrong arg). +// And splited by Space. Caller have responsibility to manage. +// If passed arg pid is 0, get information from all process. +func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption, nameOption bool) ([][]string, error) { + var cmd []string + switch { + case pid == 0: // will get from all processes. + cmd = []string{"-ax", "-o", arg} + case threadOption: + cmd = []string{"-x", "-o", arg, "-M", "-p", strconv.Itoa(int(pid))} + default: + cmd = []string{"-x", "-o", arg, "-p", strconv.Itoa(int(pid))} + } + if nameOption { + cmd = append(cmd, "-c") + } + out, err := invoke.CommandWithContext(ctx, "ps", cmd...) + if err != nil { + return [][]string{}, err + } + lines := strings.Split(string(out), "\n") + + var ret [][]string + for _, l := range lines[1:] { + var lr []string + if nameOption { + lr = append(lr, l) + } else { + for _, r := range strings.Split(l, " ") { + if r == "" { + continue + } + lr = append(lr, strings.TrimSpace(r)) + } + } + if len(lr) != 0 { + ret = append(ret, lr) + } + } + + return ret, nil +} + +var ( + procPidPath common.ProcPidPathFunc + procPidInfo common.ProcPidInfoFunc + machTimeBaseInfo common.MachTimeBaseInfoFunc +) + +func registerFuncs() (*common.Library, error) { + lib, err := common.NewLibrary(common.System) + if err != nil { + return nil, err + } + + procPidPath = common.GetFunc[common.ProcPidPathFunc](lib, common.ProcPidPathSym) + procPidInfo = common.GetFunc[common.ProcPidInfoFunc](lib, common.ProcPidInfoSym) + machTimeBaseInfo = common.GetFunc[common.MachTimeBaseInfoFunc](lib, common.MachTimeBaseInfoSym) + + return lib, nil +} + +func getTimeScaleToNanoSeconds() float64 { + var timeBaseInfo common.MachTimeBaseInfo + + machTimeBaseInfo(uintptr(unsafe.Pointer(&timeBaseInfo))) + + return float64(timeBaseInfo.Numer) / float64(timeBaseInfo.Denom) +} + +func (p *Process) ExeWithContext(_ context.Context) (string, error) { + lib, err := registerFuncs() + if err != nil { + return "", err + } + defer lib.Close() + + buf := common.NewCStr(common.PROC_PIDPATHINFO_MAXSIZE) + ret := procPidPath(p.Pid, buf.Addr(), common.PROC_PIDPATHINFO_MAXSIZE) + + if ret <= 0 { + return "", fmt.Errorf("unknown error: proc_pidpath returned %d", ret) + } + + return buf.GoString(), nil +} + +// sys/proc_info.h +type vnodePathInfo struct { + _ [152]byte + vipPath [common.MAXPATHLEN]byte + _ [1176]byte +} + +// CwdWithContext retrieves the Current Working Directory for the given process. +// It uses the proc_pidinfo from libproc and will only work for processes the +// EUID can access. Otherwise "operation not permitted" will be returned as the +// error. +// Note: This might also work for other *BSD OSs. +func (p *Process) CwdWithContext(_ context.Context) (string, error) { + lib, err := registerFuncs() + if err != nil { + return "", err + } + defer lib.Close() + + // Lock OS thread to ensure the errno does not change + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + var vpi vnodePathInfo + const vpiSize = int32(unsafe.Sizeof(vpi)) + ret := procPidInfo(p.Pid, common.PROC_PIDVNODEPATHINFO, 0, uintptr(unsafe.Pointer(&vpi)), vpiSize) + errno, _ := lib.Dlsym("errno") + err = *(**unix.Errno)(unsafe.Pointer(&errno)) + if errors.Is(err, unix.EPERM) { + return "", ErrorNotPermitted + } + + if ret <= 0 { + return "", fmt.Errorf("unknown error: proc_pidinfo returned %d", ret) + } + + if ret != vpiSize { + return "", fmt.Errorf("too few bytes; expected %d, got %d", vpiSize, ret) + } + return common.GoString(&vpi.vipPath[0]), nil +} + +func procArgs(pid int32) ([]byte, int, error) { + procargs, _, err := common.CallSyscall([]int32{common.CTL_KERN, common.KERN_PROCARGS2, pid}) + if err != nil { + return nil, 0, err + } + + // The first 4 bytes indicate the number of arguments. + nargs := procargs[:4] + return procargs, int(binary.LittleEndian.Uint32(nargs)), nil +} + +func (p *Process) CmdlineSliceWithContext(_ context.Context) ([]string, error) { + return p.cmdlineSlice() +} + +func (p *Process) cmdlineSlice() ([]string, error) { + pargs, nargs, err := procArgs(p.Pid) + if err != nil { + return nil, err + } + // The first bytes hold the nargs int, skip it. + args := bytes.Split((pargs)[unsafe.Sizeof(int(0)):], []byte{0}) + var argStr string + // The first element is the actual binary/command path. + // command := args[0] + var argSlice []string + // var envSlice []string + // All other, non-zero elements are arguments. The first "nargs" elements + // are the arguments. Everything else in the slice is then the environment + // of the process. + for _, arg := range args[1:] { + argStr = string(arg) + if argStr != "" { + if nargs > 0 { + argSlice = append(argSlice, argStr) + nargs-- + continue + } + break + // envSlice = append(envSlice, argStr) + } + } + return argSlice, err +} + +// cmdNameWithContext returns the command name (including spaces) without any arguments +func (p *Process) cmdNameWithContext(_ context.Context) (string, error) { + r, err := p.cmdlineSlice() + if err != nil { + return "", err + } + + if len(r) == 0 { + return "", nil + } + + return r[0], err +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + r, err := p.CmdlineSliceWithContext(ctx) + if err != nil { + return "", err + } + return strings.Join(r, " "), err +} + +func (p *Process) NumThreadsWithContext(_ context.Context) (int32, error) { + lib, err := registerFuncs() + if err != nil { + return 0, err + } + defer lib.Close() + + var ti ProcTaskInfo + procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) + + return int32(ti.Threadnum), nil +} + +func (p *Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { + lib, err := registerFuncs() + if err != nil { + return nil, err + } + defer lib.Close() + + var ti ProcTaskInfo + procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) + + timescaleToNanoSeconds := getTimeScaleToNanoSeconds() + ret := &cpu.TimesStat{ + CPU: "cpu", + User: float64(ti.Total_user) * timescaleToNanoSeconds / 1e9, + System: float64(ti.Total_system) * timescaleToNanoSeconds / 1e9, + } + return ret, nil +} + +func (p *Process) MemoryInfoWithContext(_ context.Context) (*MemoryInfoStat, error) { + lib, err := registerFuncs() + if err != nil { + return nil, err + } + defer lib.Close() + + var ti ProcTaskInfo + procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), int32(unsafe.Sizeof(ti))) + + ret := &MemoryInfoStat{ + RSS: uint64(ti.Resident_size), + VMS: uint64(ti.Virtual_size), + Swap: uint64(ti.Pageins), + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go similarity index 87% rename from vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go index b353e5eac..890a5d533 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_darwin.go @@ -211,6 +212,27 @@ type Posix_cred struct { type Label struct{} +type ProcTaskInfo struct { + Virtual_size uint64 + Resident_size uint64 + Total_user uint64 + Total_system uint64 + Threads_user uint64 + Threads_system uint64 + Policy int32 + Faults int32 + Pageins int32 + Cow_faults int32 + Messages_sent int32 + Messages_received int32 + Syscalls_mach int32 + Syscalls_unix int32 + Csw int32 + Threadnum int32 + Numrunning int32 + Priority int32 +} + type AuditinfoAddr struct { Auid uint32 Mask AuMask diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go similarity index 85% rename from vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go index cbd6bdc79..8075cf227 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin && arm64 -// +build darwin,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_darwin.go @@ -190,6 +190,27 @@ type Posix_cred struct{} type Label struct{} +type ProcTaskInfo struct { + Virtual_size uint64 + Resident_size uint64 + Total_user uint64 + Total_system uint64 + Threads_user uint64 + Threads_system uint64 + Policy int32 + Faults int32 + Pageins int32 + Cow_faults int32 + Messages_sent int32 + Messages_received int32 + Syscalls_mach int32 + Syscalls_unix int32 + Csw int32 + Threadnum int32 + Numrunning int32 + Priority int32 +} + type AuditinfoAddr struct { Auid uint32 Mask AuMask diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go b/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go new file mode 100644 index 000000000..b01429734 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build !darwin && !linux && !freebsd && !openbsd && !windows && !solaris && !plan9 + +package process + +import ( + "context" + "syscall" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" +) + +type Signal = syscall.Signal + +type MemoryMapsStat struct { + Path string `json:"path"` + Rss uint64 `json:"rss"` + Size uint64 `json:"size"` + Pss uint64 `json:"pss"` + SharedClean uint64 `json:"sharedClean"` + SharedDirty uint64 `json:"sharedDirty"` + PrivateClean uint64 `json:"privateClean"` + PrivateDirty uint64 `json:"privateDirty"` + Referenced uint64 `json:"referenced"` + Anonymous uint64 `json:"anonymous"` + Swap uint64 `json:"swap"` +} + +type MemoryInfoExStat struct{} + +func pidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func ProcessesWithContext(_ context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func PidExistsWithContext(_ context.Context, _ int32) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (p *Process) PpidWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) NameWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) TgidWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) ExeWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) CmdlineWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) CmdlineSliceWithContext(_ context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) createTimeWithContext(_ context.Context) (int64, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) CwdWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) StatusWithContext(_ context.Context) ([]string, error) { + return []string{""}, common.ErrNotImplementedError +} + +func (p *Process) ForegroundWithContext(_ context.Context) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (p *Process) UidsWithContext(_ context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) GidsWithContext(_ context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) GroupsWithContext(_ context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) TerminalWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) NiceWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) IOniceWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) RlimitWithContext(_ context.Context) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) RlimitUsageWithContext(_ context.Context, _ bool) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) IOCountersWithContext(_ context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumCtxSwitchesWithContext(_ context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumFDsWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) NumThreadsWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) ThreadsWithContext(_ context.Context) (map[int32]*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) CPUAffinityWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoWithContext(_ context.Context) (*MemoryInfoStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoExWithContext(_ context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaultsWithContext(_ context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ChildrenWithContext(_ context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) OpenFilesWithContext(_ context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ConnectionsWithContext(_ context.Context) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ConnectionsMaxWithContext(_ context.Context, _ int) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryMapsWithContext(_ context.Context, _ bool) (*[]MemoryMapsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) SendSignalWithContext(_ context.Context, _ Signal) error { + return common.ErrNotImplementedError +} + +func (p *Process) SuspendWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) ResumeWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) TerminateWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) KillWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) UsernameWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) EnvironWithContext(_ context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go similarity index 67% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go index 40b10e14f..6df31421c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go @@ -1,19 +1,23 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package process import ( "bytes" "context" + "encoding/binary" + "errors" "path/filepath" + "sort" "strconv" "strings" - cpu "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - net "github.com/shirou/gopsutil/v3/net" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) func pidsWithContext(ctx context.Context) ([]int32, error) { @@ -30,7 +34,7 @@ func pidsWithContext(ctx context.Context) ([]int32, error) { return ret, nil } -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { +func (p *Process) PpidWithContext(_ context.Context) (int32, error) { k, err := p.getKProc() if err != nil { return 0, err @@ -62,11 +66,28 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { return name, nil } -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError +func (p *Process) CwdWithContext(_ context.Context) (string, error) { + mib := []int32{CTLKern, KernProc, KernProcCwd, p.Pid} + buf, length, err := common.CallSyscall(mib) + if err != nil { + return "", err + } + + if length != sizeOfKinfoFile { + return "", errors.New("unexpected size of KinfoFile") + } + + var k kinfoFile + br := bytes.NewReader(buf) + if err := common.Read(br, binary.LittleEndian, &k); err != nil { + return "", err + } + cwd := common.IntToString(k.Path[:]) + + return cwd, nil } -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { +func (p *Process) ExeWithContext(_ context.Context) (string, error) { mib := []int32{CTLKern, KernProc, KernProcPathname, p.Pid} buf, _, err := common.CallSyscall(mib) if err != nil { @@ -76,23 +97,20 @@ func (p *Process) ExeWithContext(ctx context.Context) (string, error) { return strings.Trim(string(buf), "\x00"), nil } -func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { +func (p *Process) CmdlineWithContext(_ context.Context) (string, error) { mib := []int32{CTLKern, KernProc, KernProcArgs, p.Pid} buf, _, err := common.CallSyscall(mib) if err != nil { return "", err } ret := strings.FieldsFunc(string(buf), func(r rune) bool { - if r == '\u0000' { - return true - } - return false + return r == '\u0000' }) return strings.Join(ret, " "), nil } -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { +func (p *Process) CmdlineSliceWithContext(_ context.Context) ([]string, error) { mib := []int32{CTLKern, KernProc, KernProcArgs, p.Pid} buf, _, err := common.CallSyscall(mib) if err != nil { @@ -113,7 +131,7 @@ func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) return strParts, nil } -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { +func (p *Process) createTimeWithContext(_ context.Context) (int64, error) { k, err := p.getKProc() if err != nil { return 0, err @@ -121,7 +139,7 @@ func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { return int64(k.Start.Sec)*1000 + int64(k.Start.Usec)/1000, nil } -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { +func (p *Process) StatusWithContext(_ context.Context) ([]string, error) { k, err := p.getKProc() if err != nil { return []string{""}, err @@ -157,46 +175,46 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return strings.IndexByte(string(out), '+') != -1, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(_ context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - uids := make([]int32, 0, 3) + uids := make([]uint32, 0, 3) - uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid)) + uids = append(uids, uint32(k.Ruid), uint32(k.Uid), uint32(k.Svuid)) return uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(_ context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid)) + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Rgid), uint32(k.Ngroups), uint32(k.Svgid)) return gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(_ context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - groups := make([]int32, k.Ngroups) + groups := make([]uint32, k.Ngroups) for i := int16(0); i < k.Ngroups; i++ { - groups[i] = int32(k.Groups[i]) + groups[i] = uint32(k.Groups[i]) } return groups, nil } -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { +func (p *Process) TerminalWithContext(_ context.Context) (string, error) { k, err := p.getKProc() if err != nil { return "", err @@ -212,7 +230,7 @@ func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { return termmap[ttyNr], nil } -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { +func (p *Process) NiceWithContext(_ context.Context) (int32, error) { k, err := p.getKProc() if err != nil { return 0, err @@ -220,7 +238,7 @@ func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { return int32(k.Nice), nil } -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { +func (p *Process) IOCountersWithContext(_ context.Context) (*IOCountersStat, error) { k, err := p.getKProc() if err != nil { return nil, err @@ -231,7 +249,7 @@ func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, e }, nil } -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { +func (p *Process) NumThreadsWithContext(_ context.Context) (int32, error) { k, err := p.getKProc() if err != nil { return 0, err @@ -240,7 +258,7 @@ func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { return k.Numthreads, nil } -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { +func (p *Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { k, err := p.getKProc() if err != nil { return nil, err @@ -252,7 +270,7 @@ func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) }, nil } -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { +func (p *Process) MemoryInfoWithContext(_ context.Context) (*MemoryInfoStat, error) { k, err := p.getKProc() if err != nil { return nil, err @@ -270,18 +288,21 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e } func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + procs, err := ProcessesWithContext(ctx) if err != nil { - return nil, err + return nil, nil } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) + ret := make([]*Process, 0, len(procs)) + for _, proc := range procs { + ppid, err := proc.PpidWithContext(ctx) if err != nil { - return nil, err + continue + } + if ppid == p.Pid { + ret = append(ret, proc) } - ret = append(ret, np) } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) return ret, nil } @@ -289,8 +310,8 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) } func ProcessesWithContext(ctx context.Context) ([]*Process, error) { @@ -331,7 +352,7 @@ func (p *Process) getKProc() (*KinfoProc, error) { return nil, err } if length != sizeOfKinfoProc { - return nil, err + return nil, errors.New("unexpected size of KinfoProc") } k, err := parseKinfoProc(buf) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go index 08ab333b4..0193ba25b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go @@ -10,6 +11,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 7 + KernProcCwd = 42 ) const ( @@ -23,6 +25,7 @@ const ( const ( sizeOfKinfoVmentry = 0x488 sizeOfKinfoProc = 0x300 + sizeOfKinfoFile = 0x570 // TODO: should be changed by running on the target machine ) const ( @@ -190,3 +193,26 @@ type KinfoVmentry struct { X_kve_ispare [12]int32 Path [1024]int8 } + +// TODO: should be changed by running on the target machine +type kinfoFile struct { + Structsize int32 + Type int32 + Fd int32 + Ref_count int32 + Flags int32 + Pad0 int32 + Offset int64 + Anon0 [304]byte + Status uint16 + Pad1 uint16 + X_kf_ispare0 int32 + Cap_rights capRights + X_kf_cap_spare uint64 + Path [1024]int8 // changed from uint8 by hand +} + +// TODO: should be changed by running on the target machine +type capRights struct { + Rights [2]uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go new file mode 100644 index 000000000..67970f64f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: BSD-3-Clause +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types_freebsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 14 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 7 + KernProcCwd = 42 +) + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x488 + sizeOfKinfoProc = 0x440 + sizeOfKinfoFile = 0x570 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SWAIT = 6 + SLOCK = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type KinfoProc struct { + Structsize int32 + Layout int32 + Args int64 /* pargs */ + Paddr int64 /* proc */ + Addr int64 /* user */ + Tracep int64 /* vnode */ + Textvp int64 /* vnode */ + Fd int64 /* filedesc */ + Vmspace int64 /* vmspace */ + Wchan int64 + Pid int32 + Ppid int32 + Pgid int32 + Tpgid int32 + Sid int32 + Tsid int32 + Jobc int16 + Spare_short1 int16 + Tdev_freebsd11 uint32 + Siglist [16]byte /* sigset */ + Sigmask [16]byte /* sigset */ + Sigignore [16]byte /* sigset */ + Sigcatch [16]byte /* sigset */ + Uid uint32 + Ruid uint32 + Svuid uint32 + Rgid uint32 + Svgid uint32 + Ngroups int16 + Spare_short2 int16 + Groups [16]uint32 + Size uint64 + Rssize int64 + Swrss int64 + Tsize int64 + Dsize int64 + Ssize int64 + Xstat uint16 + Acflag uint16 + Pctcpu uint32 + Estcpu uint32 + Slptime uint32 + Swtime uint32 + Cow uint32 + Runtime uint64 + Start Timeval + Childtime Timeval + Flag int64 + Kiflag int64 + Traceflag int32 + Stat int8 + Nice int8 + Lock int8 + Rqindex int8 + Oncpu_old uint8 + Lastcpu_old uint8 + Tdname [17]int8 + Wmesg [9]int8 + Login [18]int8 + Lockname [9]int8 + Comm [20]int8 + Emul [17]int8 + Loginclass [18]int8 + Moretdname [4]int8 + Sparestrings [46]int8 + Spareints [2]int32 + Tdev uint64 + Oncpu int32 + Lastcpu int32 + Tracer int32 + Flag2 int32 + Fibnum int32 + Cr_flags uint32 + Jid int32 + Numthreads int32 + Tid int32 + Pri Priority + Rusage Rusage + Rusage_ch Rusage + Pcb int64 /* pcb */ + Kstack int64 + Udata int64 + Tdaddr int64 /* thread */ + Pd int64 /* pwddesc, not accurate */ + Spareptrs [5]int64 + Sparelongs [12]int64 + Sflag int64 + Tdflags int64 +} + +type Priority struct { + Class uint8 + Level uint8 + Native uint8 + User uint8 +} + +type KinfoVmentry struct { + Structsize int32 + Type int32 + Start uint64 + End uint64 + Offset uint64 + Vn_fileid uint64 + Vn_fsid_freebsd11 uint32 + Flags int32 + Resident int32 + Private_resident int32 + Protection int32 + Ref_count int32 + Shadow_count int32 + Vn_type int32 + Vn_size uint64 + Vn_rdev_freebsd11 uint32 + Vn_mode uint16 + Status uint16 + Type_spec [8]byte + Vn_rdev uint64 + X_kve_ispare [8]int32 + Path [1024]int8 +} + +type kinfoFile struct { + Structsize int32 + Type int32 + Fd int32 + Ref_count int32 + Flags int32 + Pad0 int32 + Offset int64 + Anon0 [304]byte + Status uint16 + Pad1 uint16 + X_kf_ispare0 int32 + Cap_rights capRights + X_kf_cap_spare uint64 + Path [1024]int8 +} + +type capRights struct { + Rights [2]uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go index 81ae0b9a8..6c4fbf698 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go @@ -10,6 +11,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 7 + KernProcCwd = 42 ) const ( @@ -23,6 +25,7 @@ const ( const ( sizeOfKinfoVmentry = 0x488 sizeOfKinfoProc = 0x440 + sizeOfKinfoFile = 0x570 // TODO: should be changed by running on the target machine ) const ( @@ -190,3 +193,26 @@ type KinfoVmentry struct { X_kve_ispare [12]int32 Path [1024]int8 } + +// TODO: should be changed by running on the target machine +type kinfoFile struct { + Structsize int32 + Type int32 + Fd int32 + Ref_count int32 + Flags int32 + Pad0 int32 + Offset int64 + Anon0 [304]byte + Status uint16 + Pad1 uint16 + X_kf_ispare0 int32 + Cap_rights capRights + X_kf_cap_spare uint64 + Path [1024]int8 // changed from uint8 by hand +} + +// TODO: should be changed by running on the target machine +type capRights struct { + Rights [2]uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go similarity index 76% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go index effd470a0..dabdc3e30 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go @@ -1,8 +1,8 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd && arm64 -// +build freebsd,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs process/types_freebsd.go +// cgo -godefs types_freebsd.go package process @@ -13,6 +13,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 7 + KernProcCwd = 42 ) const ( @@ -26,6 +27,7 @@ const ( const ( sizeOfKinfoVmentry = 0x488 sizeOfKinfoProc = 0x440 + sizeOfKinfoFile = 0x570 ) const ( @@ -82,14 +84,14 @@ type Rlimit struct { type KinfoProc struct { Structsize int32 Layout int32 - Args *int64 /* pargs */ - Paddr *int64 /* proc */ - Addr *int64 /* user */ - Tracep *int64 /* vnode */ - Textvp *int64 /* vnode */ - Fd *int64 /* filedesc */ - Vmspace *int64 /* vmspace */ - Wchan *byte + Args int64 /* pargs */ + Paddr int64 /* proc */ + Addr int64 /* user */ + Tracep int64 /* vnode */ + Textvp int64 /* vnode */ + Fd int64 /* filedesc */ + Vmspace int64 /* vmspace */ + Wchan int64 Pid int32 Ppid int32 Pgid int32 @@ -140,7 +142,7 @@ type KinfoProc struct { Wmesg [9]uint8 Login [18]uint8 Lockname [9]uint8 - Comm [20]int8 + Comm [20]int8 // changed from uint8 by hand Emul [17]uint8 Loginclass [18]uint8 Moretdname [4]uint8 @@ -159,11 +161,12 @@ type KinfoProc struct { Pri Priority Rusage Rusage Rusage_ch Rusage - Pcb *int64 /* pcb */ - Kstack *byte - Udata *byte - Tdaddr *int64 /* thread */ - Spareptrs [6]*byte + Pcb int64 /* pcb */ + Kstack int64 + Udata int64 + Tdaddr int64 /* thread */ + Pd int64 /* pwddesc, not accurate */ + Spareptrs [5]int64 Sparelongs [12]int64 Sflag int64 Tdflags int64 @@ -195,8 +198,29 @@ type KinfoVmentry struct { Vn_rdev_freebsd11 uint32 Vn_mode uint16 Status uint16 - Vn_fsid uint64 + Type_spec [8]byte Vn_rdev uint64 X_kve_ispare [8]int32 Path [1024]uint8 } + +type kinfoFile struct { + Structsize int32 + Type int32 + Fd int32 + Ref_count int32 + Flags int32 + Pad0 int32 + Offset int64 + Anon0 [304]byte + Status uint16 + Pad1 uint16 + X_kf_ispare0 int32 + Cap_rights capRights + X_kf_cap_spare uint64 + Path [1024]int8 // changed from uint8 by hand +} + +type capRights struct { + Rights [2]uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/process/process_linux.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_linux.go index f7989cd21..f44f6bc6f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package process @@ -12,15 +12,16 @@ import ( "math" "os" "path/filepath" + "sort" "strconv" "strings" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) var pageSize = uint64(os.Getpagesize()) @@ -148,26 +149,26 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return pgid == tpgid, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.groups, nil } @@ -193,7 +194,7 @@ func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { return nice, nil } -func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { +func (p *Process) IOniceWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } @@ -309,7 +310,7 @@ func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) return cpuTimes, nil } -func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) CPUAffinityWithContext(_ context.Context) ([]int32, error) { return nil, common.ErrNotImplementedError } @@ -338,43 +339,48 @@ func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, e } func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + statFiles, err := filepath.Glob(common.HostProcWithContext(ctx, "[0-9]*/stat")) if err != nil { return nil, err } - if len(pids) == 0 { - return nil, ErrorNoChildren - } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) + ret := make([]*Process, 0, len(statFiles)) + for _, statFile := range statFiles { + statContents, err := os.ReadFile(statFile) if err != nil { - return nil, err + continue + } + fields := splitProcStat(statContents) + pid, err := strconv.ParseInt(fields[1], 10, 32) + if err != nil { + continue + } + ppid, err := strconv.ParseInt(fields[4], 10, 32) + if err != nil { + continue + } + if ppid == int64(p.Pid) { + np, err := NewProcessWithContext(ctx, int32(pid)) + if err != nil { + continue + } + ret = append(ret, np) } - ret = append(ret, np) } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) return ret, nil } func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { _, ofs, err := p.fillFromfdWithContext(ctx) - if err != nil { - return nil, err - } - ret := make([]OpenFilesStat, len(ofs)) - for i, o := range ofs { - ret[i] = *o - } - - return ret, nil + return ofs, err } func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) } func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { @@ -399,7 +405,9 @@ func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]M // function of parsing a block getBlock := func(firstLine []string, block []string) (MemoryMapsStat, error) { m := MemoryMapsStat{} - m.Path = firstLine[len(firstLine)-1] + if len(firstLine) >= 6 { + m.Path = strings.Join(firstLine[5:], " ") + } for _, line := range block { if strings.Contains(line, "VmFlags") { @@ -613,17 +621,17 @@ func (p *Process) fillFromfdListWithContext(ctx context.Context) (string, []stri } // Get num_fds from /proc/(pid)/fd -func (p *Process) fillFromfdWithContext(ctx context.Context) (int32, []*OpenFilesStat, error) { +func (p *Process) fillFromfdWithContext(ctx context.Context) (int32, []OpenFilesStat, error) { statPath, fnames, err := p.fillFromfdListWithContext(ctx) if err != nil { return 0, nil, err } numFDs := int32(len(fnames)) - var openfiles []*OpenFilesStat + openfiles := make([]OpenFilesStat, 0, numFDs) for _, fd := range fnames { fpath := filepath.Join(statPath, fd) - filepath, err := os.Readlink(fpath) + path, err := common.Readlink(fpath) if err != nil { continue } @@ -631,8 +639,8 @@ func (p *Process) fillFromfdWithContext(ctx context.Context) (int32, []*OpenFile if err != nil { return numFDs, openfiles, err } - o := &OpenFilesStat{ - Path: filepath, + o := OpenFilesStat{ + Path: path, Fd: t, } openfiles = append(openfiles, o) @@ -727,8 +735,12 @@ func (p *Process) fillFromIOWithContext(ctx context.Context) (*IOCountersStat, e case "syscw": ret.WriteCount = t case "read_bytes": - ret.ReadBytes = t + ret.DiskReadBytes = t case "write_bytes": + ret.DiskWriteBytes = t + case "rchar": + ret.ReadBytes = t + case "wchar": ret.WriteBytes = t } } @@ -866,32 +878,32 @@ func (p *Process) fillFromStatusWithContext(ctx context.Context) error { } p.tgid = int32(pval) case "Uid": - p.uids = make([]int32, 0, 4) + p.uids = make([]uint32, 0, 4) for _, i := range strings.Split(value, "\t") { v, err := strconv.ParseInt(i, 10, 32) if err != nil { return err } - p.uids = append(p.uids, int32(v)) + p.uids = append(p.uids, uint32(v)) } case "Gid": - p.gids = make([]int32, 0, 4) + p.gids = make([]uint32, 0, 4) for _, i := range strings.Split(value, "\t") { v, err := strconv.ParseInt(i, 10, 32) if err != nil { return err } - p.gids = append(p.gids, int32(v)) + p.gids = append(p.gids, uint32(v)) } case "Groups": groups := strings.Fields(value) - p.groups = make([]int32, 0, len(groups)) + p.groups = make([]uint32, 0, len(groups)) for _, i := range groups { - v, err := strconv.ParseInt(i, 10, 32) + v, err := strconv.ParseUint(i, 10, 32) if err != nil { return err } - p.groups = append(p.groups, int32(v)) + p.groups = append(p.groups, uint32(v)) } case "Threads": v, err := strconv.ParseInt(value, 10, 32) @@ -1071,13 +1083,12 @@ func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (ui Iowait: iotime / float64(clockTicks), } - bootTime, _ := common.BootTimeWithContext(ctx) + bootTime, _ := common.BootTimeWithContext(ctx, enableBootTimeCache) t, err := strconv.ParseUint(fields[22], 10, 64) if err != nil { return 0, 0, nil, 0, 0, 0, nil, err } - ctime := (t / uint64(clockTicks)) + uint64(bootTime) - createTime := int64(ctime * 1000) + createTime := int64((t * 1000 / uint64(clockTicks)) + uint64(bootTime*1000)) rtpriority, err := strconv.ParseInt(fields[18], 10, 32) if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go index a58c5eb11..063ff20ca 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package process @@ -7,18 +7,20 @@ import ( "bytes" "context" "encoding/binary" - "fmt" + "errors" "io" "path/filepath" + "sort" "strconv" "strings" "unsafe" - cpu "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - mem "github.com/shirou/gopsutil/v3/mem" - net "github.com/shirou/gopsutil/v3/net" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/mem" + "github.com/shirou/gopsutil/v4/net" ) func pidsWithContext(ctx context.Context) ([]int32, error) { @@ -35,7 +37,7 @@ func pidsWithContext(ctx context.Context) ([]int32, error) { return ret, nil } -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { +func (p *Process) PpidWithContext(_ context.Context) (int32, error) { k, err := p.getKProc() if err != nil { return 0, err @@ -67,15 +69,20 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { return name, nil } -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError +func (p *Process) CwdWithContext(_ context.Context) (string, error) { + mib := []int32{CTLKern, KernProcCwd, p.Pid} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return "", err + } + return common.ByteToString(buf), nil } -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { +func (p *Process) ExeWithContext(_ context.Context) (string, error) { return "", common.ErrNotImplementedError } -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { +func (p *Process) CmdlineSliceWithContext(_ context.Context) ([]string, error) { mib := []int32{CTLKern, KernProcArgs, p.Pid, KernProcArgv} buf, _, err := common.CallSyscall(mib) if err != nil { @@ -123,7 +130,7 @@ func readPtr(r io.Reader) (uintptr, error) { } return uintptr(p), nil default: - return 0, fmt.Errorf("unsupported pointer size") + return 0, errors.New("unsupported pointer size") } } @@ -135,11 +142,11 @@ func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { return strings.Join(argv, " "), nil } -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { +func (p *Process) createTimeWithContext(_ context.Context) (int64, error) { return 0, common.ErrNotImplementedError } -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { +func (p *Process) StatusWithContext(_ context.Context) ([]string, error) { k, err := p.getKProc() if err != nil { return []string{""}, err @@ -171,46 +178,46 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return strings.IndexByte(string(out), '+') != -1, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(_ context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - uids := make([]int32, 0, 3) + uids := make([]uint32, 0, 3) - uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid)) + uids = append(uids, uint32(k.Ruid), uint32(k.Uid), uint32(k.Svuid)) return uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(_ context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid)) + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Rgid), uint32(k.Ngroups), uint32(k.Svgid)) return gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(_ context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - groups := make([]int32, k.Ngroups) + groups := make([]uint32, k.Ngroups) for i := int16(0); i < k.Ngroups; i++ { - groups[i] = int32(k.Groups[i]) + groups[i] = uint32(k.Groups[i]) } return groups, nil } -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { +func (p *Process) TerminalWithContext(_ context.Context) (string, error) { k, err := p.getKProc() if err != nil { return "", err @@ -226,7 +233,7 @@ func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { return termmap[ttyNr], nil } -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { +func (p *Process) NiceWithContext(_ context.Context) (int32, error) { k, err := p.getKProc() if err != nil { return 0, err @@ -234,7 +241,7 @@ func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { return int32(k.Nice), nil } -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { +func (p *Process) IOCountersWithContext(_ context.Context) (*IOCountersStat, error) { k, err := p.getKProc() if err != nil { return nil, err @@ -245,12 +252,12 @@ func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, e }, nil } -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { +func (p *Process) NumThreadsWithContext(_ context.Context) (int32, error) { /* not supported, just return 1 */ return 1, nil } -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { +func (p *Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { k, err := p.getKProc() if err != nil { return nil, err @@ -280,26 +287,29 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e } func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + procs, err := ProcessesWithContext(ctx) if err != nil { - return nil, err + return nil, nil } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) + ret := make([]*Process, 0, len(procs)) + for _, proc := range procs { + ppid, err := proc.PpidWithContext(ctx) if err != nil { - return nil, err + continue + } + if ppid == p.Pid { + ret = append(ret, proc) } - ret = append(ret, np) } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) return ret, nil } -func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsWithContext(_ context.Context) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(_ context.Context, _ int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } @@ -338,7 +348,7 @@ func (p *Process) getKProc() (*KinfoProc, error) { return nil, err } if length != sizeOfKinfoProc { - return nil, err + return nil, errors.New("unexpected size of KinfoProc") } k, err := parseKinfoProc(buf) @@ -348,7 +358,7 @@ func (p *Process) getKProc() (*KinfoProc, error) { return &k, nil } -func callKernProcSyscall(op int32, arg int32) ([]byte, uint64, error) { +func callKernProcSyscall(op, arg int32) ([]byte, uint64, error) { mib := []int32{CTLKern, KernProc, op, arg, sizeOfKinfoProc, 0} mibptr := unsafe.Pointer(&mib[0]) miblen := uint64(len(mib)) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go index f4ed02491..5b84706a7 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && 386 -// +build openbsd,386 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go index 8607422b5..3229bb32c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_openbsd.go @@ -11,6 +12,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go index b94429f2e..6f74ce756 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm -// +build openbsd,arm // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go index a3291b8ca..910454562 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm64 -// +build openbsd,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go new file mode 100644 index 000000000..e3e0d36a0 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd && riscv64 + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs process/types_openbsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 66 + KernProcAll = 0 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 55 + KernProcCwd = 78 + KernProcArgv = 1 + KernProcEnv = 3 +) + +const ( + ArgMax = 256 * 1024 +) + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x50 + sizeOfKinfoProc = 0x288 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SDEAD = 6 + SONPROC = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type KinfoProc struct { + Forw uint64 + Back uint64 + Paddr uint64 + Addr uint64 + Fd uint64 + Stats uint64 + Limit uint64 + Vmspace uint64 + Sigacts uint64 + Sess uint64 + Tsess uint64 + Ru uint64 + Eflag int32 + Exitsig int32 + Flag int32 + Pid int32 + Ppid int32 + Sid int32 + X_pgid int32 + Tpgid int32 + Uid uint32 + Ruid uint32 + Gid uint32 + Rgid uint32 + Groups [16]uint32 + Ngroups int16 + Jobc int16 + Tdev uint32 + Estcpu uint32 + Rtime_sec uint32 + Rtime_usec uint32 + Cpticks int32 + Pctcpu uint32 + Swtime uint32 + Slptime uint32 + Schedflags int32 + Uticks uint64 + Sticks uint64 + Iticks uint64 + Tracep uint64 + Traceflag int32 + Holdcnt int32 + Siglist int32 + Sigmask uint32 + Sigignore uint32 + Sigcatch uint32 + Stat int8 + Priority uint8 + Usrpri uint8 + Nice uint8 + Xstat uint16 + Spare uint16 + Comm [24]int8 + Wmesg [8]uint8 + Wchan uint64 + Login [32]uint8 + Vm_rssize int32 + Vm_tsize int32 + Vm_dsize int32 + Vm_ssize int32 + Uvalid int64 + Ustart_sec uint64 + Ustart_usec uint32 + Uutime_sec uint32 + Uutime_usec uint32 + Ustime_sec uint32 + Ustime_usec uint32 + Uru_maxrss uint64 + Uru_ixrss uint64 + Uru_idrss uint64 + Uru_isrss uint64 + Uru_minflt uint64 + Uru_majflt uint64 + Uru_nswap uint64 + Uru_inblock uint64 + Uru_oublock uint64 + Uru_msgsnd uint64 + Uru_msgrcv uint64 + Uru_nsignals uint64 + Uru_nvcsw uint64 + Uru_nivcsw uint64 + Uctime_sec uint32 + Uctime_usec uint32 + Psflags uint32 + Acflag uint32 + Svuid uint32 + Svgid uint32 + Emul [8]uint8 + Rlim_rss_cur uint64 + Cpuid uint64 + Vm_map_size uint64 + Tid int32 + Rtableid uint32 + Pledge uint64 + Name [24]uint8 +} + +type Priority struct{} + +type KinfoVmentry struct { + Start uint64 + End uint64 + Guard uint64 + Fspace uint64 + Fspace_augment uint64 + Offset uint64 + Wired_count int32 + Etype int32 + Protection int32 + Max_protection int32 + Advice int32 + Inheritance int32 + Flags uint8 + Pad_cgo_0 [7]byte +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go b/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go new file mode 100644 index 000000000..7f6877182 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build plan9 + +package process + +import ( + "context" + "syscall" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" +) + +type Signal = syscall.Note + +type MemoryMapsStat struct { + Path string `json:"path"` + Rss uint64 `json:"rss"` + Size uint64 `json:"size"` + Pss uint64 `json:"pss"` + SharedClean uint64 `json:"sharedClean"` + SharedDirty uint64 `json:"sharedDirty"` + PrivateClean uint64 `json:"privateClean"` + PrivateDirty uint64 `json:"privateDirty"` + Referenced uint64 `json:"referenced"` + Anonymous uint64 `json:"anonymous"` + Swap uint64 `json:"swap"` +} + +type MemoryInfoExStat struct{} + +func pidsWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func ProcessesWithContext(_ context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func PidExistsWithContext(_ context.Context, _ int32) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (p *Process) PpidWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) NameWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) TgidWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) ExeWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) CmdlineWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) CmdlineSliceWithContext(_ context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) createTimeWithContext(_ context.Context) (int64, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) CwdWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) StatusWithContext(_ context.Context) ([]string, error) { + return []string{""}, common.ErrNotImplementedError +} + +func (p *Process) ForegroundWithContext(_ context.Context) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (p *Process) UidsWithContext(_ context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) GidsWithContext(_ context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) GroupsWithContext(_ context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) TerminalWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) NiceWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) IOniceWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) RlimitWithContext(_ context.Context) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) RlimitUsageWithContext(_ context.Context, _ bool) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) IOCountersWithContext(_ context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumCtxSwitchesWithContext(_ context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumFDsWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) NumThreadsWithContext(_ context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) ThreadsWithContext(_ context.Context) (map[int32]*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) CPUAffinityWithContext(_ context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoWithContext(_ context.Context) (*MemoryInfoStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoExWithContext(_ context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaultsWithContext(_ context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ChildrenWithContext(_ context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) OpenFilesWithContext(_ context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ConnectionsWithContext(_ context.Context) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ConnectionsMaxWithContext(_ context.Context, _ int) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryMapsWithContext(_ context.Context, _ bool) (*[]MemoryMapsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) SendSignalWithContext(_ context.Context, _ Signal) error { + return common.ErrNotImplementedError +} + +func (p *Process) SuspendWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) ResumeWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) TerminateWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) KillWithContext(_ context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) UsernameWithContext(_ context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) EnvironWithContext(_ context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go b/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/process/process_posix.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_posix.go index a01f9ecfc..9fe55b490 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux || freebsd || openbsd || darwin || solaris -// +build linux freebsd openbsd darwin solaris package process @@ -16,7 +16,7 @@ import ( "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) type Signal = syscall.Signal @@ -67,11 +67,12 @@ func getTerminalMap() (map[uint64]string, error) { for _, name := range termfiles { stat := unix.Stat_t{} - if err = unix.Stat(name, &stat); err != nil { + err = unix.Stat(name, &stat) + if err != nil { return nil, err } rdev := uint64(stat.Rdev) - ret[rdev] = strings.Replace(name, "/dev", "", -1) + ret[rdev] = strings.ReplaceAll(name, "/dev", "") } return ret, nil } @@ -108,6 +109,7 @@ func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { if err != nil { return false, err } + defer proc.Release() if isMount(common.HostProcWithContext(ctx)) { // if //proc exists and is mounted, check if //proc/ folder exists _, err := os.Stat(common.HostProcWithContext(ctx, strconv.Itoa(int(pid)))) @@ -139,11 +141,12 @@ func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { return false, err } -func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error { +func (p *Process) SendSignalWithContext(_ context.Context, sig syscall.Signal) error { process, err := os.FindProcess(int(p.Pid)) if err != nil { return err } + defer process.Release() err = process.Signal(sig) if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go similarity index 68% rename from vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go index dd4bd4760..6af5633e0 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package process import ( @@ -7,9 +8,9 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) type MemoryMapsStat struct { @@ -51,15 +52,15 @@ func ProcessesWithContext(ctx context.Context) ([]*Process, error) { return out, nil } -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { +func (p *Process) PpidWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } -func (p *Process) NameWithContext(ctx context.Context) (string, error) { +func (p *Process) NameWithContext(_ context.Context) (string, error) { return "", common.ErrNotImplementedError } -func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { +func (p *Process) TgidWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } @@ -79,7 +80,7 @@ func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) return p.fillSliceFromCmdlineWithContext(ctx) } -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { +func (p *Process) createTimeWithContext(_ context.Context) (int64, error) { return 0, common.ErrNotImplementedError } @@ -87,51 +88,51 @@ func (p *Process) CwdWithContext(ctx context.Context) (string, error) { return p.fillFromPathCwdWithContext(ctx) } -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { +func (p *Process) StatusWithContext(_ context.Context) ([]string, error) { return []string{""}, common.ErrNotImplementedError } -func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { +func (p *Process) ForegroundWithContext(_ context.Context) (bool, error) { return false, common.ErrNotImplementedError } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(_ context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(_ context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(_ context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { +func (p *Process) TerminalWithContext(_ context.Context) (string, error) { return "", common.ErrNotImplementedError } -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { +func (p *Process) NiceWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } -func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { +func (p *Process) IOniceWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } -func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { +func (p *Process) RlimitWithContext(_ context.Context) ([]RlimitStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { +func (p *Process) RlimitUsageWithContext(_ context.Context, _ bool) ([]RlimitStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { +func (p *Process) IOCountersWithContext(_ context.Context) (*IOCountersStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { +func (p *Process) NumCtxSwitchesWithContext(_ context.Context) (*NumCtxSwitchesStat, error) { return nil, common.ErrNotImplementedError } @@ -140,55 +141,55 @@ func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { return int32(len(fnames)), err } -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { +func (p *Process) NumThreadsWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } -func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { +func (p *Process) ThreadsWithContext(_ context.Context) (map[int32]*cpu.TimesStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { +func (p *Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) CPUAffinityWithContext(_ context.Context) ([]int32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { +func (p *Process) MemoryInfoWithContext(_ context.Context) (*MemoryInfoStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { +func (p *Process) MemoryInfoExWithContext(_ context.Context) (*MemoryInfoExStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { +func (p *Process) PageFaultsWithContext(_ context.Context) (*PageFaultsStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { +func (p *Process) ChildrenWithContext(_ context.Context) ([]*Process, error) { return nil, common.ErrNotImplementedError } -func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { +func (p *Process) OpenFilesWithContext(_ context.Context) ([]OpenFilesStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsWithContext(_ context.Context) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(_ context.Context, _ int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { +func (p *Process) MemoryMapsWithContext(_ context.Context, _ bool) (*[]MemoryMapsStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { +func (p *Process) EnvironWithContext(_ context.Context) ([]string, error) { return nil, common.ErrNotImplementedError } @@ -246,10 +247,7 @@ func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error return "", err } ret := strings.FieldsFunc(string(cmdline), func(r rune) bool { - if r == '\u0000' { - return true - } - return false + return r == '\u0000' }) return strings.Join(ret, " "), nil diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go similarity index 84% rename from vendor/github.com/shirou/gopsutil/v3/process/process_windows.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_windows.go index 14ed0309f..b4748d38e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package process @@ -12,16 +12,16 @@ import ( "os" "path/filepath" "reflect" - "strings" "syscall" "time" "unicode/utf16" "unsafe" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" "golang.org/x/sys/windows" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) type Signal = syscall.Signal @@ -43,6 +43,7 @@ var ( procGetPriorityClass = common.Modkernel32.NewProc("GetPriorityClass") procGetProcessIoCounters = common.Modkernel32.NewProc("GetProcessIoCounters") procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") + procGetProcessHandleCount = common.Modkernel32.NewProc("GetProcessHandleCount") processorArchitecture uint ) @@ -240,11 +241,11 @@ func init() { 0) } -func pidsWithContext(ctx context.Context) ([]int32, error) { +func pidsWithContext(_ context.Context) ([]int32, error) { // inspired by https://gist.github.com/henkman/3083408 // and https://github.com/giampaolo/psutil/blob/1c3a15f637521ba5c0031283da39c733fda53e4c/psutil/arch/windows/process_info.c#L315-L329 var ret []int32 - var read uint32 = 0 + var read uint32 var psSize uint32 = 1024 const dwordSize uint32 = 4 @@ -253,7 +254,7 @@ func pidsWithContext(ctx context.Context) ([]int32, error) { if err := windows.EnumProcesses(ps, &read); err != nil { return nil, err } - if uint32(len(ps)) == read { // ps buffer was too small to host every results, retry with a bigger one + if uint32(len(ps)) == read/dwordSize { // ps buffer was too small to host every results, retry with a bigger one psSize += 1024 continue } @@ -287,10 +288,10 @@ func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { return false, err } h, err := windows.OpenProcess(windows.SYNCHRONIZE, false, uint32(pid)) - if err == windows.ERROR_ACCESS_DENIED { + if errors.Is(err, windows.ERROR_ACCESS_DENIED) { return true, nil } - if err == windows.ERROR_INVALID_PARAMETER { + if errors.Is(err, windows.ERROR_INVALID_PARAMETER) { return false, nil } if err != nil { @@ -301,7 +302,7 @@ func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { return event == uint32(windows.WAIT_TIMEOUT), err } -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { +func (p *Process) PpidWithContext(_ context.Context) (int32, error) { // if cached already, return from cache cachedPpid := p.getPpid() if cachedPpid != 0 { @@ -329,17 +330,17 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { exe, err := p.ExeWithContext(ctx) if err != nil { - return "", fmt.Errorf("could not get Name: %s", err) + return "", fmt.Errorf("could not get Name: %w", err) } return filepath.Base(exe), nil } -func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { +func (p *Process) TgidWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { +func (p *Process) ExeWithContext(_ context.Context) (string, error) { c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) if err != nil { return "", err @@ -356,20 +357,20 @@ func (p *Process) ExeWithContext(ctx context.Context) (string, error) { if ret == 0 { return "", err } - return windows.UTF16ToString(buf[:]), nil + return windows.UTF16ToString(buf), nil } // XP fallback ret, _, err := procGetProcessImageFileNameW.Call(uintptr(c), uintptr(unsafe.Pointer(&buf[0])), uintptr(size)) if ret == 0 { return "", err } - return common.ConvertDOSPath(windows.UTF16ToString(buf[:])), nil + return common.ConvertDOSPath(windows.UTF16ToString(buf)), nil } func (p *Process) CmdlineWithContext(_ context.Context) (string, error) { cmdline, err := getProcessCommandLine(p.Pid) if err != nil { - return "", fmt.Errorf("could not get CommandLine: %s", err) + return "", fmt.Errorf("could not get CommandLine: %w", err) } return cmdline, nil } @@ -379,13 +380,33 @@ func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) if err != nil { return nil, err } - return strings.Split(cmdline, " "), nil + return parseCmdline(cmdline) } -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { +func parseCmdline(cmdline string) ([]string, error) { + cmdlineptr, err := windows.UTF16PtrFromString(cmdline) + if err != nil { + return nil, err + } + + var argc int32 + argvptr, err := windows.CommandLineToArgv(cmdlineptr, &argc) + if err != nil { + return nil, err + } + defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argvptr)))) + + argv := make([]string, argc) + for i, v := range (*argvptr)[:argc] { + argv[i] = windows.UTF16ToString((*v)[:]) + } + return argv, nil +} + +func (p *Process) createTimeWithContext(_ context.Context) (int64, error) { ru, err := getRusage(p.Pid) if err != nil { - return 0, fmt.Errorf("could not get CreationDate: %s", err) + return 0, fmt.Errorf("could not get CreationDate: %w", err) } return ru.CreationTime.Nanoseconds() / 1000000, nil @@ -393,7 +414,7 @@ func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { func (p *Process) CwdWithContext(_ context.Context) (string, error) { h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(p.Pid)) - if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { + if errors.Is(err, windows.ERROR_ACCESS_DENIED) || errors.Is(err, windows.ERROR_INVALID_PARAMETER) { return "", nil } if err != nil { @@ -435,15 +456,15 @@ func (p *Process) CwdWithContext(_ context.Context) (string, error) { return "", nil } -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { +func (p *Process) StatusWithContext(_ context.Context) ([]string, error) { return []string{""}, common.ErrNotImplementedError } -func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { +func (p *Process) ForegroundWithContext(_ context.Context) (bool, error) { return false, common.ErrNotImplementedError } -func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { +func (p *Process) UsernameWithContext(_ context.Context) (string, error) { pid := p.Pid c, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) if err != nil { @@ -466,19 +487,19 @@ func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { return domain + "\\" + user, err } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(_ context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(_ context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(_ context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { +func (p *Process) TerminalWithContext(_ context.Context) (string, error) { return "", common.ErrNotImplementedError } @@ -494,7 +515,7 @@ var priorityClasses = map[int]int32{ 0x00000100: 24, // REALTIME_PRIORITY_CLASS } -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { +func (p *Process) NiceWithContext(_ context.Context) (int32, error) { c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) if err != nil { return 0, err @@ -511,19 +532,19 @@ func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { return priority, nil } -func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { +func (p *Process) IOniceWithContext(_ context.Context) (int32, error) { return 0, common.ErrNotImplementedError } -func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { +func (p *Process) RlimitWithContext(_ context.Context) ([]RlimitStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { +func (p *Process) RlimitUsageWithContext(_ context.Context, _ bool) ([]RlimitStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { +func (p *Process) IOCountersWithContext(_ context.Context) (*IOCountersStat, error) { c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) if err != nil { return nil, err @@ -544,15 +565,28 @@ func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, e return stats, nil } -func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { +func (p *Process) NumCtxSwitchesWithContext(_ context.Context) (*NumCtxSwitchesStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError +// NumFDsWithContext returns the number of handles for a process on Windows, +// not the number of file descriptors (FDs). +func (p *Process) NumFDsWithContext(_ context.Context) (int32, error) { + handle, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) + if err != nil { + return 0, err + } + defer windows.CloseHandle(handle) + + var handleCount uint32 + ret, _, err := procGetProcessHandleCount.Call(uintptr(handle), uintptr(unsafe.Pointer(&handleCount))) + if ret == 0 { + return 0, err + } + return int32(handleCount), nil } -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { +func (p *Process) NumThreadsWithContext(_ context.Context) (int32, error) { ppid, ret, _, err := getFromSnapProcess(p.Pid) if err != nil { return 0, err @@ -560,18 +594,18 @@ func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { // if no errors and not cached already, cache ppid p.parent = ppid - if 0 == p.getPpid() { + if p.getPpid() == 0 { p.setPpid(ppid) } return ret, nil } -func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { +func (p *Process) ThreadsWithContext(_ context.Context) (map[int32]*cpu.TimesStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { +func (p *Process) TimesWithContext(_ context.Context) (*cpu.TimesStat, error) { sysTimes, err := getProcessCPUTimes(p.Pid) if err != nil { return nil, err @@ -595,11 +629,11 @@ func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) }, nil } -func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) CPUAffinityWithContext(_ context.Context) ([]int32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { +func (p *Process) MemoryInfoWithContext(_ context.Context) (*MemoryInfoStat, error) { mem, err := getMemoryInfo(p.Pid) if err != nil { return nil, err @@ -613,12 +647,22 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e return ret, nil } -func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { +func (p *Process) MemoryInfoExWithContext(_ context.Context) (*MemoryInfoExStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { - return nil, common.ErrNotImplementedError +func (p *Process) PageFaultsWithContext(_ context.Context) (*PageFaultsStat, error) { + mem, err := getMemoryInfo(p.Pid) + if err != nil { + return nil, err + } + + ret := &PageFaultsStat{ + // Since Windows does not distinguish between Major and Minor faults, all faults are treated as Major + MajorFaults: uint64(mem.PageFaultCount), + } + + return ret, nil } func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { @@ -744,19 +788,19 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(_ context.Context, _ int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { +func (p *Process) MemoryMapsWithContext(_ context.Context, _ bool) (*[]MemoryMapsStat, error) { return nil, common.ErrNotImplementedError } -func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error { +func (p *Process) SendSignalWithContext(_ context.Context, _ syscall.Signal) error { return common.ErrNotImplementedError } -func (p *Process) SuspendWithContext(ctx context.Context) error { +func (p *Process) SuspendWithContext(_ context.Context) error { c, err := windows.OpenProcess(windows.PROCESS_SUSPEND_RESUME, false, uint32(p.Pid)) if err != nil { return err @@ -772,7 +816,7 @@ func (p *Process) SuspendWithContext(ctx context.Context) error { return nil } -func (p *Process) ResumeWithContext(ctx context.Context) error { +func (p *Process) ResumeWithContext(_ context.Context) error { c, err := windows.OpenProcess(windows.PROCESS_SUSPEND_RESUME, false, uint32(p.Pid)) if err != nil { return err @@ -788,7 +832,7 @@ func (p *Process) ResumeWithContext(ctx context.Context) error { return nil } -func (p *Process) TerminateWithContext(ctx context.Context) error { +func (p *Process) TerminateWithContext(_ context.Context) error { proc, err := windows.OpenProcess(windows.PROCESS_TERMINATE, false, uint32(p.Pid)) if err != nil { return err @@ -798,18 +842,19 @@ func (p *Process) TerminateWithContext(ctx context.Context) error { return err } -func (p *Process) KillWithContext(ctx context.Context) error { +func (p *Process) KillWithContext(_ context.Context) error { process, err := os.FindProcess(int(p.Pid)) if err != nil { return err } + defer process.Release() return process.Kill() } func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { - envVars, err := getProcessEnvironmentVariables(p.Pid, ctx) + envVars, err := getProcessEnvironmentVariables(ctx, p.Pid) if err != nil { - return nil, fmt.Errorf("could not get environment variables: %s", err) + return nil, fmt.Errorf("could not get environment variables: %w", err) } return envVars, nil } @@ -829,7 +874,7 @@ func (p *Process) setPpid(ppid int32) { p.parent = ppid } -func getFromSnapProcess(pid int32) (int32, int32, string, error) { +func getFromSnapProcess(pid int32) (int32, int32, string, error) { //nolint:unparam //FIXME snap, err := windows.CreateToolhelp32Snapshot(windows.TH32CS_SNAPPROCESS, uint32(pid)) if err != nil { return 0, 0, "", err @@ -837,7 +882,8 @@ func getFromSnapProcess(pid int32) (int32, int32, string, error) { defer windows.CloseHandle(snap) var pe32 windows.ProcessEntry32 pe32.Size = uint32(unsafe.Sizeof(pe32)) - if err = windows.Process32First(snap, &pe32); err != nil { + err = windows.Process32First(snap, &pe32) + if err != nil { return 0, 0, "", err } for { @@ -857,7 +903,7 @@ func ProcessesWithContext(ctx context.Context) ([]*Process, error) { pids, err := PidsWithContext(ctx) if err != nil { - return out, fmt.Errorf("could not get Processes %s", err) + return out, fmt.Errorf("could not get Processes %w", err) } for _, pid := range pids { @@ -913,7 +959,7 @@ func getProcessMemoryInfo(h windows.Handle, mem *PROCESS_MEMORY_COUNTERS) (err e return } -type SYSTEM_TIMES struct { +type SYSTEM_TIMES struct { //nolint:revive //FIXME CreateTime syscall.Filetime ExitTime syscall.Filetime KernelTime syscall.Filetime @@ -948,13 +994,13 @@ func getUserProcessParams32(handle windows.Handle) (rtlUserProcessParameters32, buf := readProcessMemory(syscall.Handle(handle), true, pebAddress, uint(unsafe.Sizeof(processEnvironmentBlock32{}))) if len(buf) != int(unsafe.Sizeof(processEnvironmentBlock32{})) { - return rtlUserProcessParameters32{}, fmt.Errorf("cannot read process PEB") + return rtlUserProcessParameters32{}, errors.New("cannot read process PEB") } peb := (*processEnvironmentBlock32)(unsafe.Pointer(&buf[0])) userProcessAddress := uint64(peb.ProcessParameters) buf = readProcessMemory(syscall.Handle(handle), true, userProcessAddress, uint(unsafe.Sizeof(rtlUserProcessParameters32{}))) if len(buf) != int(unsafe.Sizeof(rtlUserProcessParameters32{})) { - return rtlUserProcessParameters32{}, fmt.Errorf("cannot read user process parameters") + return rtlUserProcessParameters32{}, errors.New("cannot read user process parameters") } return *(*rtlUserProcessParameters32)(unsafe.Pointer(&buf[0])), nil } @@ -967,13 +1013,13 @@ func getUserProcessParams64(handle windows.Handle) (rtlUserProcessParameters64, buf := readProcessMemory(syscall.Handle(handle), false, pebAddress, uint(unsafe.Sizeof(processEnvironmentBlock64{}))) if len(buf) != int(unsafe.Sizeof(processEnvironmentBlock64{})) { - return rtlUserProcessParameters64{}, fmt.Errorf("cannot read process PEB") + return rtlUserProcessParameters64{}, errors.New("cannot read process PEB") } peb := (*processEnvironmentBlock64)(unsafe.Pointer(&buf[0])) userProcessAddress := peb.ProcessParameters buf = readProcessMemory(syscall.Handle(handle), false, userProcessAddress, uint(unsafe.Sizeof(rtlUserProcessParameters64{}))) if len(buf) != int(unsafe.Sizeof(rtlUserProcessParameters64{})) { - return rtlUserProcessParameters64{}, fmt.Errorf("cannot read user process parameters") + return rtlUserProcessParameters64{}, errors.New("cannot read user process parameters") } return *(*rtlUserProcessParameters64)(unsafe.Pointer(&buf[0])), nil } @@ -1023,9 +1069,9 @@ func is32BitProcess(h windows.Handle) bool { return procIs32Bits } -func getProcessEnvironmentVariables(pid int32, ctx context.Context) ([]string, error) { +func getProcessEnvironmentVariables(ctx context.Context, pid int32) ([]string, error) { h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(pid)) - if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { + if errors.Is(err, windows.ERROR_ACCESS_DENIED) || errors.Is(err, windows.ERROR_INVALID_PARAMETER) { return nil, nil } if err != nil { @@ -1109,7 +1155,7 @@ func (p *processReader) Read(buf []byte) (int, error) { func getProcessCommandLine(pid int32) (string, error) { h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(pid)) - if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { + if errors.Is(err, windows.ERROR_ACCESS_DENIED) || errors.Is(err, windows.ERROR_INVALID_PARAMETER) { return "", nil } if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go similarity index 52% rename from vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go index db4d45334..911351b16 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build (windows && 386) || (windows && arm) -// +build windows,386 windows,arm package process @@ -8,11 +8,12 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/windows" + + "github.com/shirou/gopsutil/v4/internal/common" ) -type PROCESS_MEMORY_COUNTERS struct { +type PROCESS_MEMORY_COUNTERS struct { //nolint:revive //FIXME CB uint32 PageFaultCount uint32 PeakWorkingSetSize uint32 @@ -39,30 +40,27 @@ func queryPebAddress(procHandle syscall.Handle, is32BitProcess bool) (uint64, er ) if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { return uint64(info.PebBaseAddress), nil - } else { - return 0, windows.NTStatus(ret) } - } else { - // we are on a 32-bit process reading an external 64-bit process - if common.ProcNtWow64QueryInformationProcess64.Find() == nil { // avoid panic - var info processBasicInformation64 + return 0, windows.NTStatus(ret) + } + // we are on a 32-bit process reading an external 64-bit process + if common.ProcNtWow64QueryInformationProcess64.Find() != nil { + return 0, errors.New("can't find API to query 64 bit process from 32 bit") + } + // avoid panic + var info processBasicInformation64 - ret, _, _ := common.ProcNtWow64QueryInformationProcess64.Call( - uintptr(procHandle), - uintptr(common.ProcessBasicInformation), - uintptr(unsafe.Pointer(&info)), - uintptr(unsafe.Sizeof(info)), - uintptr(0), - ) - if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { - return info.PebBaseAddress, nil - } else { - return 0, windows.NTStatus(ret) - } - } else { - return 0, errors.New("can't find API to query 64 bit process from 32 bit") - } + ret, _, _ := common.ProcNtWow64QueryInformationProcess64.Call( + uintptr(procHandle), + uintptr(common.ProcessBasicInformation), + uintptr(unsafe.Pointer(&info)), + uintptr(unsafe.Sizeof(info)), + uintptr(0), + ) + if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { + return info.PebBaseAddress, nil } + return 0, windows.NTStatus(ret) } func readProcessMemory(h syscall.Handle, is32BitProcess bool, address uint64, size uint) []byte { @@ -81,25 +79,23 @@ func readProcessMemory(h syscall.Handle, is32BitProcess bool, address uint64, si if int(ret) >= 0 && read > 0 { return buffer[:read] } - } else { // reading a 64-bit process from a 32-bit one - if common.ProcNtWow64ReadVirtualMemory64.Find() == nil { // avoid panic - var read uint64 + } else if common.ProcNtWow64ReadVirtualMemory64.Find() == nil { // avoid panic + var read uint64 - buffer := make([]byte, size) + buffer := make([]byte, size) - ret, _, _ := common.ProcNtWow64ReadVirtualMemory64.Call( - uintptr(h), - uintptr(address&0xFFFFFFFF), // the call expects a 64-bit value - uintptr(address>>32), - uintptr(unsafe.Pointer(&buffer[0])), - uintptr(size), // the call expects a 64-bit value - uintptr(0), // but size is 32-bit so pass zero as the high dword - uintptr(unsafe.Pointer(&read)), - ) - if int(ret) >= 0 && read > 0 { - return buffer[:uint(read)] - } + ret, _, _ := common.ProcNtWow64ReadVirtualMemory64.Call( + uintptr(h), + uintptr(address&0xFFFFFFFF), // the call expects a 64-bit value + uintptr(address>>32), + uintptr(unsafe.Pointer(&buffer[0])), + uintptr(size), // the call expects a 64-bit value + uintptr(0), // but size is 32-bit so pass zero as the high dword + uintptr(unsafe.Pointer(&read)), + ) + if int(ret) >= 0 && read > 0 { + return buffer[:uint(read)] } } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go similarity index 68% rename from vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go index 74c6212cf..8cc26c375 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build (windows && amd64) || (windows && arm64) -// +build windows,amd64 windows,arm64 package process @@ -7,11 +7,12 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/windows" + + "github.com/shirou/gopsutil/v4/internal/common" ) -type PROCESS_MEMORY_COUNTERS struct { +type PROCESS_MEMORY_COUNTERS struct { //nolint:revive //FIXME CB uint32 PageFaultCount uint32 PeakWorkingSetSize uint64 @@ -38,26 +39,23 @@ func queryPebAddress(procHandle syscall.Handle, is32BitProcess bool) (uint64, er ) if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { return uint64(wow64), nil - } else { - return 0, windows.NTStatus(ret) } - } else { - // we are on a 64-bit process reading an external 64-bit process - var info processBasicInformation64 + return 0, windows.NTStatus(ret) + } + // we are on a 64-bit process reading an external 64-bit process + var info processBasicInformation64 - ret, _, _ := common.ProcNtQueryInformationProcess.Call( - uintptr(procHandle), - uintptr(common.ProcessBasicInformation), - uintptr(unsafe.Pointer(&info)), - uintptr(unsafe.Sizeof(info)), - uintptr(0), - ) - if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { - return info.PebBaseAddress, nil - } else { - return 0, windows.NTStatus(ret) - } + ret, _, _ := common.ProcNtQueryInformationProcess.Call( + uintptr(procHandle), + uintptr(common.ProcessBasicInformation), + uintptr(unsafe.Pointer(&info)), + uintptr(unsafe.Sizeof(info)), + uintptr(0), + ) + if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { + return info.PebBaseAddress, nil } + return 0, windows.NTStatus(ret) } func readProcessMemory(procHandle syscall.Handle, _ bool, address uint64, size uint) []byte { diff --git a/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml b/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml deleted file mode 100644 index dc6fefb97..000000000 --- a/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml +++ /dev/null @@ -1,12 +0,0 @@ -run: - timeout: 5m -linters: - enable: - - gofmt - - errcheck - - errname - - errorlint - - bodyclose - - durationcheck - - whitespace - diff --git a/vendor/github.com/shoenig/go-m1cpu/LICENSE b/vendor/github.com/shoenig/go-m1cpu/LICENSE deleted file mode 100644 index e87a115e4..000000000 --- a/vendor/github.com/shoenig/go-m1cpu/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/shoenig/go-m1cpu/Makefile b/vendor/github.com/shoenig/go-m1cpu/Makefile deleted file mode 100644 index 28d786397..000000000 --- a/vendor/github.com/shoenig/go-m1cpu/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -SHELL = bash - -default: test - -.PHONY: test -test: - @echo "--> Running Tests ..." - @go test -v -race ./... - -vet: - @echo "--> Vet Go sources ..." - @go vet ./... diff --git a/vendor/github.com/shoenig/go-m1cpu/README.md b/vendor/github.com/shoenig/go-m1cpu/README.md deleted file mode 100644 index 399657acf..000000000 --- a/vendor/github.com/shoenig/go-m1cpu/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# m1cpu - -[![Go Reference](https://pkg.go.dev/badge/github.com/shoenig/go-m1cpu.svg)](https://pkg.go.dev/github.com/shoenig/go-m1cpu) -[![MPL License](https://img.shields.io/github/license/shoenig/go-m1cpu?color=g&style=flat-square)](https://github.com/shoenig/go-m1cpu/blob/main/LICENSE) -[![Run CI Tests](https://github.com/shoenig/go-m1cpu/actions/workflows/ci.yaml/badge.svg)](https://github.com/shoenig/go-m1cpu/actions/workflows/ci.yaml) - -The `go-m1cpu` module is a library for inspecting Apple Silicon CPUs in Go. - -Use the `m1cpu` Go package for looking up the CPU frequency for Apple M1 and M2 CPUs. - -# Install - -```shell -go get github.com/shoenig/go-m1cpu@latest -``` - -# CGO - -This package requires the use of [CGO](https://go.dev/blog/cgo). - -Extracting the CPU properties is done via Apple's [IOKit](https://developer.apple.com/documentation/iokit?language=objc) -framework, which is accessible only through system C libraries. - -# Example - -Simple Go program to print Apple Silicon M1/M2 CPU speeds. - -```go -package main - -import ( - "fmt" - - "github.com/shoenig/go-m1cpu" -) - -func main() { - fmt.Println("Apple Silicon", m1cpu.IsAppleSilicon()) - - fmt.Println("pCore GHz", m1cpu.PCoreGHz()) - fmt.Println("eCore GHz", m1cpu.ECoreGHz()) - - fmt.Println("pCore Hz", m1cpu.PCoreHz()) - fmt.Println("eCore Hz", m1cpu.ECoreHz()) -} -``` - -Using `go test` to print out available information. - -``` -➜ go test -v -run Show -=== RUN Test_Show - cpu_test.go:42: pCore Hz 3504000000 - cpu_test.go:43: eCore Hz 2424000000 - cpu_test.go:44: pCore GHz 3.504 - cpu_test.go:45: eCore GHz 2.424 - cpu_test.go:46: pCore count 8 - cpu_test.go:47: eCoreCount 4 - cpu_test.go:50: pCore Caches 196608 131072 16777216 - cpu_test.go:53: eCore Caches 131072 65536 4194304 ---- PASS: Test_Show (0.00s) -``` - -# License - -Open source under the [MPL](LICENSE) diff --git a/vendor/github.com/shoenig/go-m1cpu/cpu.go b/vendor/github.com/shoenig/go-m1cpu/cpu.go deleted file mode 100644 index 502a8cce9..000000000 --- a/vendor/github.com/shoenig/go-m1cpu/cpu.go +++ /dev/null @@ -1,213 +0,0 @@ -//go:build darwin && arm64 && cgo - -package m1cpu - -// #cgo LDFLAGS: -framework CoreFoundation -framework IOKit -// #include -// #include -// #include -// #include -// -// #if !defined(MAC_OS_VERSION_12_0) || MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0 -// #define kIOMainPortDefault kIOMasterPortDefault -// #endif -// -// #define HzToGHz(hz) ((hz) / 1000000000.0) -// -// UInt64 global_pCoreHz; -// UInt64 global_eCoreHz; -// int global_pCoreCount; -// int global_eCoreCount; -// int global_pCoreL1InstCacheSize; -// int global_eCoreL1InstCacheSize; -// int global_pCoreL1DataCacheSize; -// int global_eCoreL1DataCacheSize; -// int global_pCoreL2CacheSize; -// int global_eCoreL2CacheSize; -// char global_brand[32]; -// -// UInt64 getFrequency(CFTypeRef typeRef) { -// CFDataRef cfData = typeRef; -// -// CFIndex size = CFDataGetLength(cfData); -// UInt8 buf[size]; -// CFDataGetBytes(cfData, CFRangeMake(0, size), buf); -// -// UInt8 b1 = buf[size-5]; -// UInt8 b2 = buf[size-6]; -// UInt8 b3 = buf[size-7]; -// UInt8 b4 = buf[size-8]; -// -// UInt64 pCoreHz = 0x00000000FFFFFFFF & ((b1<<24) | (b2 << 16) | (b3 << 8) | (b4)); -// return pCoreHz; -// } -// -// int sysctl_int(const char * name) { -// int value = -1; -// size_t size = 8; -// sysctlbyname(name, &value, &size, NULL, 0); -// return value; -// } -// -// void sysctl_string(const char * name, char * dest) { -// size_t size = 32; -// sysctlbyname(name, dest, &size, NULL, 0); -// } -// -// void initialize() { -// global_pCoreCount = sysctl_int("hw.perflevel0.physicalcpu"); -// global_eCoreCount = sysctl_int("hw.perflevel1.physicalcpu"); -// global_pCoreL1InstCacheSize = sysctl_int("hw.perflevel0.l1icachesize"); -// global_eCoreL1InstCacheSize = sysctl_int("hw.perflevel1.l1icachesize"); -// global_pCoreL1DataCacheSize = sysctl_int("hw.perflevel0.l1dcachesize"); -// global_eCoreL1DataCacheSize = sysctl_int("hw.perflevel1.l1dcachesize"); -// global_pCoreL2CacheSize = sysctl_int("hw.perflevel0.l2cachesize"); -// global_eCoreL2CacheSize = sysctl_int("hw.perflevel1.l2cachesize"); -// sysctl_string("machdep.cpu.brand_string", global_brand); -// -// CFMutableDictionaryRef matching = IOServiceMatching("AppleARMIODevice"); -// io_iterator_t iter; -// IOServiceGetMatchingServices(kIOMainPortDefault, matching, &iter); -// -// const size_t bufsize = 512; -// io_object_t obj; -// while ((obj = IOIteratorNext(iter))) { -// char class[bufsize]; -// IOObjectGetClass(obj, class); -// char name[bufsize]; -// IORegistryEntryGetName(obj, name); -// -// if (strncmp(name, "pmgr", bufsize) == 0) { -// CFTypeRef pCoreRef = IORegistryEntryCreateCFProperty(obj, CFSTR("voltage-states5-sram"), kCFAllocatorDefault, 0); -// CFTypeRef eCoreRef = IORegistryEntryCreateCFProperty(obj, CFSTR("voltage-states1-sram"), kCFAllocatorDefault, 0); -// -// long long pCoreHz = getFrequency(pCoreRef); -// long long eCoreHz = getFrequency(eCoreRef); -// -// global_pCoreHz = pCoreHz; -// global_eCoreHz = eCoreHz; -// return; -// } -// } -// } -// -// UInt64 eCoreHz() { -// return global_eCoreHz; -// } -// -// UInt64 pCoreHz() { -// return global_pCoreHz; -// } -// -// Float64 eCoreGHz() { -// return HzToGHz(global_eCoreHz); -// } -// -// Float64 pCoreGHz() { -// return HzToGHz(global_pCoreHz); -// } -// -// int pCoreCount() { -// return global_pCoreCount; -// } -// -// int eCoreCount() { -// return global_eCoreCount; -// } -// -// int pCoreL1InstCacheSize() { -// return global_pCoreL1InstCacheSize; -// } -// -// int pCoreL1DataCacheSize() { -// return global_pCoreL1DataCacheSize; -// } -// -// int pCoreL2CacheSize() { -// return global_pCoreL2CacheSize; -// } -// -// int eCoreL1InstCacheSize() { -// return global_eCoreL1InstCacheSize; -// } -// -// int eCoreL1DataCacheSize() { -// return global_eCoreL1DataCacheSize; -// } -// -// int eCoreL2CacheSize() { -// return global_eCoreL2CacheSize; -// } -// -// char * modelName() { -// return global_brand; -// } -import "C" - -func init() { - C.initialize() -} - -// IsAppleSilicon returns true on this platform. -func IsAppleSilicon() bool { - return true -} - -// PCoreHZ returns the max frequency in Hertz of the P-Core of an Apple Silicon CPU. -func PCoreHz() uint64 { - return uint64(C.pCoreHz()) -} - -// ECoreHZ returns the max frequency in Hertz of the E-Core of an Apple Silicon CPU. -func ECoreHz() uint64 { - return uint64(C.eCoreHz()) -} - -// PCoreGHz returns the max frequency in Gigahertz of the P-Core of an Apple Silicon CPU. -func PCoreGHz() float64 { - return float64(C.pCoreGHz()) -} - -// ECoreGHz returns the max frequency in Gigahertz of the E-Core of an Apple Silicon CPU. -func ECoreGHz() float64 { - return float64(C.eCoreGHz()) -} - -// PCoreCount returns the number of physical P (performance) cores. -func PCoreCount() int { - return int(C.pCoreCount()) -} - -// ECoreCount returns the number of physical E (efficiency) cores. -func ECoreCount() int { - return int(C.eCoreCount()) -} - -// PCoreCacheSize returns the sizes of the P (performance) core cache sizes -// in the order of -// -// - L1 instruction cache -// - L1 data cache -// - L2 cache -func PCoreCache() (int, int, int) { - return int(C.pCoreL1InstCacheSize()), - int(C.pCoreL1DataCacheSize()), - int(C.pCoreL2CacheSize()) -} - -// ECoreCacheSize returns the sizes of the E (efficiency) core cache sizes -// in the order of -// -// - L1 instruction cache -// - L1 data cache -// - L2 cache -func ECoreCache() (int, int, int) { - return int(C.eCoreL1InstCacheSize()), - int(C.eCoreL1DataCacheSize()), - int(C.eCoreL2CacheSize()) -} - -// ModelName returns the model name of the CPU. -func ModelName() string { - return C.GoString(C.modelName()) -} diff --git a/vendor/github.com/shoenig/go-m1cpu/incompatible.go b/vendor/github.com/shoenig/go-m1cpu/incompatible.go deleted file mode 100644 index d425025aa..000000000 --- a/vendor/github.com/shoenig/go-m1cpu/incompatible.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build !darwin || !arm64 || !cgo - -package m1cpu - -// IsAppleSilicon return false on this platform. -func IsAppleSilicon() bool { - return false -} - -// PCoreHZ requires darwin/arm64 -func PCoreHz() uint64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreHZ requires darwin/arm64 -func ECoreHz() uint64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// PCoreGHz requires darwin/arm64 -func PCoreGHz() float64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreGHz requires darwin/arm64 -func ECoreGHz() float64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// PCoreCount requires darwin/arm64 -func PCoreCount() int { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreCount requires darwin/arm64 -func ECoreCount() int { - panic("m1cpu: not a darwin/arm64 system") -} - -// PCoreCacheSize requires darwin/arm64 -func PCoreCache() (int, int, int) { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreCacheSize requires darwin/arm64 -func ECoreCache() (int, int, int) { - panic("m1cpu: not a darwin/arm64 system") -} - -// ModelName requires darwin/arm64 -func ModelName() string { - panic("m1cpu: not a darwin/arm64 system") -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/.gitignore b/vendor/github.com/testcontainers/testcontainers-go/.gitignore index e52935635..b5fd75ccc 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/.gitignore +++ b/vendor/github.com/testcontainers/testcontainers-go/.gitignore @@ -8,13 +8,18 @@ site/ src/mkdocs-codeinclude-plugin src/pip-delete-this-directory.txt .idea/ +.build/ .DS_Store TEST-*.xml -tcvenv - **/go.work # VS Code settings .vscode + +# Environment variables +.env + +# Coverage files +coverage.out diff --git a/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml b/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml index 26f8f8a3c..8d668831d 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml +++ b/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml @@ -1,36 +1,89 @@ +formatters: + enable: + - gci + - gofumpt + settings: + gci: + sections: + - standard + - default + - prefix(github.com/testcontainers) linters: enable: - - errcheck - errorlint - - gci - gocritic - - gofumpt - misspell + - nakedret - nolintlint - - nonamedreturns + - perfsprint + - revive - testifylint - thelper - -linters-settings: - errorlint: - # Check whether fmt.Errorf uses the %w verb for formatting errors. - # See the https://github.com/polyfloyd/go-errorlint for caveats. - errorf: true - # Permit more than 1 %w verb, valid per Go 1.20 (Requires errorf:true) - errorf-multi: true - # Check for plain type assertions and type switches. - asserts: true - # Check for plain error comparisons. - comparison: true - gci: - sections: - - standard - - default - - prefix(github.com/testcontainers) - testifylint: - disable: - - float-compare - - go-require - enable-all: true -run: - timeout: 5m + - usestdlibvars + exclusions: + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + settings: + errorlint: + asserts: true + comparison: true + errorf: true + errorf-multi: true + revive: + rules: + - name: blank-imports + - name: context-as-argument + arguments: + - allowTypesBefore: '*testing.T' + - name: context-keys-type + - name: dot-imports + - name: early-return + arguments: + - preserveScope + - name: empty-block + - name: error-naming + disabled: true + - name: error-return + - name: error-strings + disabled: true + - name: errorf + - name: increment-decrement + - name: indent-error-flow + arguments: + - preserveScope + - name: range + - name: receiver-naming + - name: redefines-builtin-id + disabled: true + - name: superfluous-else + arguments: + - preserveScope + - name: time-naming + - name: unexported-return + disabled: true + - name: unreachable-code + - name: unused-parameter + - name: use-any + - name: var-declaration + - name: var-naming + arguments: + - - ID + - - VM + - - upperCaseConst: true + staticcheck: + checks: + - all + testifylint: + disable: + - float-compare + - go-require + enable-all: true +output: + formats: + text: + path: stdout + path-prefix: . +version: "2" diff --git a/vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md b/vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md index c8194c275..4736297eb 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md +++ b/vendor/github.com/testcontainers/testcontainers-go/CONTRIBUTING.md @@ -2,7 +2,7 @@ Please see the [main contributing guidelines](./docs/contributing.md). -There are additional docs describing [contributing documentation changes](./docs/contributing_docs.md). +There are additional docs describing [contributing documentation changes](./docs/contributing.md). ### GitHub Sponsorship diff --git a/vendor/github.com/testcontainers/testcontainers-go/Makefile b/vendor/github.com/testcontainers/testcontainers-go/Makefile index 7c8c5e36b..de6ccbd88 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/Makefile +++ b/vendor/github.com/testcontainers/testcontainers-go/Makefile @@ -1,5 +1,12 @@ include ./commons-test.mk +.PHONY: lint-all +lint-all: + $(MAKE) lint + $(MAKE) -C modulegen lint + $(MAKE) -C examples lint-examples + $(MAKE) -C modules lint-modules + .PHONY: test-all test-all: tools test-tools test-unit @@ -16,22 +23,71 @@ tidy-all: ## -------------------------------------- -TCENV=tcvenv -PYTHONBIN=./$(TCENV)/bin - -tcvenv: tcvenv/touchfile - -tcvenv/touchfile: - @echo "Creating docs $(TCENV)..." - test -d $(TCENV) || python3 -m venv $(TCENV) - @echo "Installing requirements..." - . $(PYTHONBIN)/activate; pip install -Ur requirements.txt - touch $(TCENV)/touchfile +DOCS_CONTAINER=mkdocs-container +DOCS_IMAGE=python:3.13 +.PHONY: clean-docs clean-docs: - @echo "Destroying docs $(TCENV)..." - rm -rf $(TCENV) + @echo "Destroying docs" + docker rm -f $(DOCS_CONTAINER) || true .PHONY: serve-docs -serve-docs: tcvenv - . $(PYTHONBIN)/activate; $(PYTHONBIN)/mkdocs serve +serve-docs: + docker run --rm --name $(DOCS_CONTAINER) -it -p 8000:8000 \ + -v $(PWD):/testcontainers-go \ + -w /testcontainers-go \ + $(DOCS_IMAGE) bash -c "pip install -Ur requirements.txt && mkdocs serve -f mkdocs.yml -a 0.0.0.0:8000" + +## -------------------------------------- + +# Compose tests: Make goals to test the compose module against the latest versions of the compose and compose-go repositories. +# +# The following goals are available: +# +# - compose-clean: Clean the .build directory, and clean the go.mod and go.sum files in the testcontainers-go compose module. +# - compose-clone: Clone the compose and compose-go repositories into the .build directory. +# - compose-replace: Replace the docker/compose/v2 dependency in the testcontainers-go compose module with the local copy. +# - compose-spec-replace: Replace the compose-spec/compose-go/v2 dependency in the testcontainers-go compose module with the local copy. +# - compose-tidy: Run "go mod tidy" in the testcontainers-go compose module. +# - compose-test-all-latest: Test the testcontainers-go compose module against the latest versions of the compose and compose-go repositories. +# - compose-test-latest: Test the testcontainers-go compose module against the latest version of the compose repository, using current version of the compose-spec repository. +# - compose-test-spec-latest: Test the testcontainers-go compose module against the latest version of the compose-spec repository, using current version of the compose repository. + +.PHONY: compose-clean +compose-clean: + rm -rf .build + cd modules/compose && git checkout -- go.mod go.sum + +.PHONY: compose-clone +compose-clone: compose-clean + mkdir .build + git clone https://github.com/compose-spec/compose-go.git .build/compose-go & \ + git clone https://github.com/docker/compose.git .build/compose + wait + +.PHONY: compose-replace +compose-replace: + cd modules/compose && echo "replace github.com/docker/compose/v2 => ../../.build/compose" >> go.mod + +.PHONY: compose-spec-replace +compose-spec-replace: + cd modules/compose && echo "replace github.com/compose-spec/compose-go/v2 => ../../.build/compose-go" >> go.mod + +.PHONY: compose-tidy +compose-tidy: + cd modules/compose && go mod tidy + +# The following three goals are used in the GitHub Actions workflow to test the compose module against the latest versions of the compose and compose-spec repositories. +# Please update the 'docker-projects-latest' workflow if you are making any changes to these goals. + +.PHONY: compose-test-all-latest +compose-test-all-latest: compose-clone compose-replace compose-spec-replace compose-tidy + make -C modules/compose test-compose + +.PHONY: compose-test-latest +compose-test-latest: compose-clone compose-replace compose-tidy + make -C modules/compose test-compose + +.PHONY: compose-test-spec-latest +compose-test-spec-latest: compose-clone compose-spec-replace compose-tidy + make -C modules/compose test-compose diff --git a/vendor/github.com/testcontainers/testcontainers-go/Pipfile b/vendor/github.com/testcontainers/testcontainers-go/Pipfile index 264827872..1360edfe0 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/Pipfile +++ b/vendor/github.com/testcontainers/testcontainers-go/Pipfile @@ -8,9 +8,9 @@ verify_ssl = true [packages] mkdocs = "==1.5.3" mkdocs-codeinclude-plugin = "==0.2.1" -mkdocs-include-markdown-plugin = "==6.2.2" +mkdocs-include-markdown-plugin = "==7.1.7" mkdocs-material = "==9.5.18" mkdocs-markdownextradata-plugin = "==0.2.6" [requires] -python_version = "3.8" +python_version = "3.13" diff --git a/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock b/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock index 9a2f6d24c..8c17f7a29 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock +++ b/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "0411eac13d1b06b42671b8a654fb269eb0c329d9a3d41f669ccf7b653ef8ad32" + "sha256": "85cf0b145b1bf3625db055f19d76b73094afa3aa1e7283b348a814c0a294d1ed" }, "pipfile-spec": 6, "requires": { @@ -26,124 +26,125 @@ }, "bracex": { "hashes": [ - "sha256:0725da5045e8d37ea9592ab3614d8b561e22c3c5fde3964699be672e072ab611", - "sha256:d2fcf4b606a82ac325471affe1706dd9bbaa3536c91ef86a31f6b766f3dad1d0" + "sha256:0b0049264e7340b3ec782b5cb99beb325f36c3782a32e36e876452fd49a09952", + "sha256:98f1347cd77e22ee8d967a30ad4e310b233f7754dbf31ff3fceb76145ba47dc7" ], - "markers": "python_version >= '3.8'", - "version": "==2.5" + "markers": "python_version >= '3.9'", + "version": "==2.6" }, "certifi": { "hashes": [ - "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", - "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90" + "sha256:6b31f564a415d79ee77df69d757bb49a5bb53bd9f756cbbe24394ffd6fc1f4b2", + "sha256:8ea99dbdfaaf2ba2f9bac77b9249ef62ec5218e7c2b2e903378ed5fccf765995" ], - "index": "pypi", - "markers": "python_version >= '3.6'", - "version": "==2024.7.4" + "markers": "python_version >= '3.7'", + "version": "==2025.7.14" }, "charset-normalizer": { "hashes": [ - "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027", - "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087", - "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786", - "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8", - "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09", - "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185", - "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574", - "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e", - "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519", - "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898", - "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269", - "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3", - "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f", - "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6", - "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8", - "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a", - "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73", - "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc", - "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714", - "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2", - "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc", - "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce", - "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d", - "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e", - "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6", - "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269", - "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96", - "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d", - "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a", - "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4", - "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77", - "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d", - "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0", - "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed", - "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068", - "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac", - "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25", - "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8", - "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab", - "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26", - "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2", - "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db", - "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f", - "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5", - "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99", - "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c", - "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d", - "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811", - "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa", - "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a", - "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03", - "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b", - "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04", - "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c", - "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001", - "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458", - "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389", - "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99", - "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985", - "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537", - "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238", - "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f", - "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d", - "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796", - "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a", - "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143", - "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8", - "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c", - "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5", - "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5", - "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711", - "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4", - "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6", - "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c", - "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7", - "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4", - "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b", - "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae", - "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12", - "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c", - "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae", - "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8", - "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887", - "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b", - "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4", - "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f", - "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5", - "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33", - "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519", - "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561" - ], - "markers": "python_full_version >= '3.7.0'", - "version": "==3.3.2" + "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4", + "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45", + "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7", + "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0", + "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7", + "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d", + "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d", + "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", + "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184", + "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db", + "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b", + "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64", + "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", + "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8", + "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", + "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344", + "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58", + "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", + "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471", + "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", + "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a", + "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836", + "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e", + "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", + "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c", + "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1", + "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01", + "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366", + "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58", + "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5", + "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", + "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2", + "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a", + "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597", + "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b", + "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5", + "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb", + "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f", + "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0", + "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941", + "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", + "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86", + "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7", + "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7", + "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455", + "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6", + "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4", + "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", + "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3", + "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", + "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6", + "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981", + "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c", + "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", + "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645", + "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", + "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12", + "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa", + "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd", + "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef", + "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f", + "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2", + "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d", + "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5", + "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02", + "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3", + "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd", + "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e", + "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214", + "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd", + "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a", + "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c", + "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681", + "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba", + "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f", + "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a", + "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28", + "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", + "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82", + "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a", + "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027", + "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7", + "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518", + "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", + "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", + "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9", + "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544", + "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da", + "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509", + "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f", + "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a", + "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f" + ], + "markers": "python_version >= '3.7'", + "version": "==3.4.2" }, "click": { "hashes": [ - "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", - "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de" + "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", + "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b" ], - "markers": "python_version >= '3.7'", - "version": "==8.1.7" + "markers": "python_version >= '3.10'", + "version": "==8.2.1" }, "colorama": { "hashes": [ @@ -162,11 +163,11 @@ }, "idna": { "hashes": [ - "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", - "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0" + "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", + "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3" ], - "markers": "python_version >= '3.5'", - "version": "==3.7" + "markers": "python_version >= '3.6'", + "version": "==3.10" }, "importlib-metadata": { "hashes": [ @@ -178,85 +179,86 @@ }, "jinja2": { "hashes": [ - "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369", - "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d" + "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", + "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67" ], "markers": "python_version >= '3.7'", - "version": "==3.1.4" + "version": "==3.1.6" }, "markdown": { "hashes": [ - "sha256:2ae2471477cfd02dbbf038d5d9bc226d40def84b4fe2986e49b59b6b472bbed2", - "sha256:7eb6df5690b81a1d7942992c97fad2938e956e79df20cbc6186e9c3a77b1c803" + "sha256:9f4d91ed810864ea88a6f32c07ba8bee1346c0cc1f6b1f9f6c822f2a9667d280", + "sha256:d2900fe1782bd33bdbbd56859defef70c2e78fc46668f8eb9df3128138f2cb6a" ], - "markers": "python_version >= '3.8'", - "version": "==3.7" + "markers": "python_version >= '3.9'", + "version": "==3.9" }, "markupsafe": { "hashes": [ - "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf", - "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff", - "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f", - "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3", - "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532", - "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f", - "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617", - "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df", - "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4", - "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906", - "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f", - "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4", - "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8", - "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371", - "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2", - "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465", - "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52", - "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6", - "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169", - "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad", - "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2", - "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0", - "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029", - "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f", - "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a", - "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced", - "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5", - "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c", - "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf", - "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9", - "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb", - "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad", - "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3", - "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1", - "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46", - "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc", - "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a", - "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee", - "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900", - "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5", - "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea", - "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f", - "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5", - "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e", - "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a", - "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f", - "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50", - "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a", - "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b", - "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4", - "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff", - "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2", - "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46", - "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b", - "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf", - "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5", - "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5", - "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab", - "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd", - "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68" - ], - "markers": "python_version >= '3.7'", - "version": "==2.1.5" + "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", + "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", + "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0", + "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", + "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", + "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13", + "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", + "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", + "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", + "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", + "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0", + "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", + "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", + "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", + "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", + "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff", + "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", + "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", + "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", + "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", + "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", + "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", + "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", + "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", + "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a", + "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", + "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", + "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", + "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", + "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144", + "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f", + "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", + "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", + "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", + "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", + "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", + "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", + "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", + "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", + "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", + "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", + "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", + "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", + "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", + "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", + "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", + "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", + "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", + "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29", + "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", + "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", + "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", + "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", + "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", + "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", + "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a", + "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178", + "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", + "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", + "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", + "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50" + ], + "markers": "python_version >= '3.9'", + "version": "==3.0.2" }, "mergedeep": { "hashes": [ @@ -286,12 +288,12 @@ }, "mkdocs-include-markdown-plugin": { "hashes": [ - "sha256:d293950f6499d2944291ca7b9bc4a60e652bbfd3e3a42b564f6cceee268694e7", - "sha256:f2bd5026650492a581d2fd44be6c22f90391910d76582b96a34c264f2d17875d" + "sha256:677637e04c2d3497c50340be522e2a7f614124f592c7982d88b859f88d527a4c", + "sha256:a0c13efe4f6b05a419c022e201055bf43145eed90de65f2353c33fb4005b6aa5" ], "index": "pypi", - "markers": "python_version >= '3.8'", - "version": "==6.2.2" + "markers": "python_version >= '3.9'", + "version": "==7.1.7" }, "mkdocs-markdownextradata-plugin": { "hashes": [ @@ -321,11 +323,11 @@ }, "packaging": { "hashes": [ - "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", - "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124" + "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", + "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f" ], "markers": "python_version >= '3.8'", - "version": "==24.1" + "version": "==25.0" }, "paginate": { "hashes": [ @@ -343,11 +345,11 @@ }, "platformdirs": { "hashes": [ - "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee", - "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3" + "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85", + "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf" ], - "markers": "python_version >= '3.8'", - "version": "==4.2.2" + "markers": "python_version >= '3.9'", + "version": "==4.4.0" }, "pygments": { "hashes": [ @@ -370,7 +372,7 @@ "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", "version": "==2.9.0.post0" }, "pytz": { @@ -442,11 +444,11 @@ }, "pyyaml-env-tag": { "hashes": [ - "sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb", - "sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069" + "sha256:17109e1a528561e32f026364712fee1264bc2ea6715120891174ed1b980d2e04", + "sha256:2eb38b75a2d21ee0475d6d97ec19c63287a7e140231e4214969d0eac923cd7ff" ], - "markers": "python_version >= '3.6'", - "version": "==0.1" + "markers": "python_version >= '3.9'", + "version": "==1.1" }, "regex": { "hashes": [ @@ -535,78 +537,72 @@ }, "requests": { "hashes": [ - "sha256:f2c3881dddb70d056c5bd7600a4fae312b2a300e39be6a118d30b90bd27262b5", - "sha256:fa5490319474c82ef1d2c9bc459d3652e3ae4ef4c4ebdd18a21145a47ca4b6b8" + "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", + "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==2.32.0" + "version": "==2.32.4" }, "six": { "hashes": [ - "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", - "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" + "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", + "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81" ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.16.0" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2'", + "version": "==1.17.0" }, "urllib3": { "hashes": [ - "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472", - "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168" + "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", + "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc" ], - "index": "pypi", - "markers": "python_version >= '3.8'", - "version": "==2.2.2" + "markers": "python_version >= '3.9'", + "version": "==2.5.0" }, "watchdog": { "hashes": [ - "sha256:0b4359067d30d5b864e09c8597b112fe0a0a59321a0f331498b013fb097406b4", - "sha256:0d8a7e523ef03757a5aa29f591437d64d0d894635f8a50f370fe37f913ce4e19", - "sha256:0e83619a2d5d436a7e58a1aea957a3c1ccbf9782c43c0b4fed80580e5e4acd1a", - "sha256:10b6683df70d340ac3279eff0b2766813f00f35a1d37515d2c99959ada8f05fa", - "sha256:132937547a716027bd5714383dfc40dc66c26769f1ce8a72a859d6a48f371f3a", - "sha256:1cdcfd8142f604630deef34722d695fb455d04ab7cfe9963055df1fc69e6727a", - "sha256:2d468028a77b42cc685ed694a7a550a8d1771bb05193ba7b24006b8241a571a1", - "sha256:32be97f3b75693a93c683787a87a0dc8db98bb84701539954eef991fb35f5fbc", - "sha256:770eef5372f146997638d737c9a3c597a3b41037cfbc5c41538fc27c09c3a3f9", - "sha256:7c7d4bf585ad501c5f6c980e7be9c4f15604c7cc150e942d82083b31a7548930", - "sha256:88456d65f207b39f1981bf772e473799fcdc10801062c36fd5ad9f9d1d463a73", - "sha256:914285126ad0b6eb2258bbbcb7b288d9dfd655ae88fa28945be05a7b475a800b", - "sha256:936acba76d636f70db8f3c66e76aa6cb5136a936fc2a5088b9ce1c7a3508fc83", - "sha256:980b71510f59c884d684b3663d46e7a14b457c9611c481e5cef08f4dd022eed7", - "sha256:984306dc4720da5498b16fc037b36ac443816125a3705dfde4fd90652d8028ef", - "sha256:a2cffa171445b0efa0726c561eca9a27d00a1f2b83846dbd5a4f639c4f8ca8e1", - "sha256:aa160781cafff2719b663c8a506156e9289d111d80f3387cf3af49cedee1f040", - "sha256:b2c45f6e1e57ebb4687690c05bc3a2c1fb6ab260550c4290b8abb1335e0fd08b", - "sha256:b4dfbb6c49221be4535623ea4474a4d6ee0a9cef4a80b20c28db4d858b64e270", - "sha256:baececaa8edff42cd16558a639a9b0ddf425f93d892e8392a56bf904f5eff22c", - "sha256:bcfd02377be80ef3b6bc4ce481ef3959640458d6feaae0bd43dd90a43da90a7d", - "sha256:c0b14488bd336c5b1845cee83d3e631a1f8b4e9c5091ec539406e4a324f882d8", - "sha256:c100d09ac72a8a08ddbf0629ddfa0b8ee41740f9051429baa8e31bb903ad7508", - "sha256:c344453ef3bf875a535b0488e3ad28e341adbd5a9ffb0f7d62cefacc8824ef2b", - "sha256:c50f148b31b03fbadd6d0b5980e38b558046b127dc483e5e4505fcef250f9503", - "sha256:c82253cfc9be68e3e49282831afad2c1f6593af80c0daf1287f6a92657986757", - "sha256:cd67c7df93eb58f360c43802acc945fa8da70c675b6fa37a241e17ca698ca49b", - "sha256:d7ab624ff2f663f98cd03c8b7eedc09375a911794dfea6bf2a359fcc266bff29", - "sha256:e252f8ca942a870f38cf785aef420285431311652d871409a64e2a0a52a2174c", - "sha256:ede7f010f2239b97cc79e6cb3c249e72962404ae3865860855d5cbe708b0fd22", - "sha256:eeea812f38536a0aa859972d50c76e37f4456474b02bd93674d1947cf1e39578", - "sha256:f15edcae3830ff20e55d1f4e743e92970c847bcddc8b7509bcd172aa04de506e", - "sha256:f5315a8c8dd6dd9425b974515081fc0aadca1d1d61e078d2246509fd756141ee", - "sha256:f6ee8dedd255087bc7fe82adf046f0b75479b989185fb0bdf9a98b612170eac7", - "sha256:f7c739888c20f99824f7aa9d31ac8a97353e22d0c0e54703a547a218f6637eb3" - ], - "markers": "python_version >= '3.8'", - "version": "==4.0.2" + "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", + "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", + "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", + "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", + "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c", + "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c", + "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0", + "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", + "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134", + "sha256:7a0e56874cfbc4b9b05c60c8a1926fedf56324bb08cfbc188969777940aef3aa", + "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", + "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", + "sha256:90c8e78f3b94014f7aaae121e6b909674df5b46ec24d6bebc45c44c56729af2a", + "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11", + "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", + "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b", + "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", + "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c", + "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112", + "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948", + "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881", + "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860", + "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3", + "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", + "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26", + "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", + "sha256:e6439e374fc012255b4ec786ae3c4bc838cd7309a540e5fe0952d03687d8804e", + "sha256:e6f0e77c9417e7cd62af82529b10563db3423625c5fce018430b249bf977f9e8", + "sha256:e7631a77ffb1f7d2eefa4445ebbee491c720a5661ddf6df3498ebecae5ed375c", + "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2" + ], + "markers": "python_version >= '3.9'", + "version": "==6.0.0" }, "wcmatch": { "hashes": [ - "sha256:567d66b11ad74384954c8af86f607857c3bdf93682349ad32066231abd556c92", - "sha256:af25922e2b6dbd1550fa37a4c8de7dd558d6c1bb330c641de9b907b9776cb3c4" + "sha256:5848ace7dbb0476e5e55ab63c6bbd529745089343427caa5537f230cc01beb8a", + "sha256:f11f94208c8c8484a16f4f48638a85d771d9513f4ab3f37595978801cb9465af" ], - "markers": "python_version >= '3.8'", - "version": "==9.0" + "markers": "python_version >= '3.9'", + "version": "==10.1" }, "zipp": { "hashes": [ diff --git a/vendor/github.com/testcontainers/testcontainers-go/RELEASING.md b/vendor/github.com/testcontainers/testcontainers-go/RELEASING.md index 31a995493..a35e243cb 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/RELEASING.md +++ b/vendor/github.com/testcontainers/testcontainers-go/RELEASING.md @@ -93,23 +93,23 @@ go mod tidy go mod tidy go mod tidy go mod tidy -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" couchbase.md > couchbase.md.tmp +sed "s/Not available until the next release :material-tag: main<\/span><\/a>/Since :material-tag: v0.20.1<\/span><\/a>/g" couchbase.md > couchbase.md.tmp mv couchbase.md.tmp couchbase.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" localstack.md > localstack.md.tmp +sed "s/Not available until the next release :material-tag: main<\/span><\/a>/Since :material-tag: v0.20.1<\/span><\/a>/g" localstack.md > localstack.md.tmp mv localstack.md.tmp localstack.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" mysql.md > mysql.md.tmp +sed "s/Not available until the next release :material-tag: main<\/span><\/a>/Since :material-tag: v0.20.1<\/span><\/a>/g" mysql.md > mysql.md.tmp mv mysql.md.tmp mysql.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" neo4j.md > neo4j.md.tmp +sed "s/Not available until the next release :material-tag: main<\/span><\/a>/Since :material-tag: v0.20.1<\/span><\/a>/g" neo4j.md > neo4j.md.tmp mv neo4j.md.tmp neo4j.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" postgres.md > postgres.md.tmp +sed "s/Not available until the next release :material-tag: main<\/span><\/a>/Since :material-tag: v0.20.1<\/span><\/a>/g" postgres.md > postgres.md.tmp mv postgres.md.tmp postgres.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" pulsar.md > pulsar.md.tmp +sed "s/Not available until the next release :material-tag: main<\/span><\/a>/Since :material-tag: v0.20.1<\/span><\/a>/g" pulsar.md > pulsar.md.tmp mv pulsar.md.tmp pulsar.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" redis.md > redis.md.tmp +sed "s/Not available until the next release :material-tag: main<\/span><\/a>/Since :material-tag: v0.20.1<\/span><\/a>/g" redis.md > redis.md.tmp mv redis.md.tmp redis.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" redpanda.md > redpanda.md.tmp +sed "s/Not available until the next release :material-tag: main<\/span><\/a>/Since :material-tag: v0.20.1<\/span><\/a>/g" redpanda.md > redpanda.md.tmp mv redpanda.md.tmp redpanda.md -sed "s/Not available until the next release of testcontainers-go :material-tag: main<\/span><\/a>/Since testcontainers-go :material-tag: v0.20.1<\/span><\/a>/g" vault.md > vault.md.tmp +sed "s/Not available until the next release :material-tag: main<\/span><\/a>/Since :material-tag: v0.20.1<\/span><\/a>/g" vault.md > vault.md.tmp mv vault.md.tmp vault.md ``` diff --git a/vendor/github.com/testcontainers/testcontainers-go/cleanup.go b/vendor/github.com/testcontainers/testcontainers-go/cleanup.go index e2d52440b..bd9371355 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/cleanup.go +++ b/vendor/github.com/testcontainers/testcontainers-go/cleanup.go @@ -8,20 +8,65 @@ import ( "time" ) -// terminateOptions is a type that holds the options for terminating a container. -type terminateOptions struct { - ctx context.Context - timeout *time.Duration - volumes []string +// TerminateOptions is a type that holds the options for terminating a container. +type TerminateOptions struct { + ctx context.Context + stopTimeout *time.Duration + volumes []string } // TerminateOption is a type that represents an option for terminating a container. -type TerminateOption func(*terminateOptions) +type TerminateOption func(*TerminateOptions) + +// NewTerminateOptions returns a fully initialised TerminateOptions. +// Defaults: StopTimeout: 10 seconds. +func NewTerminateOptions(ctx context.Context, opts ...TerminateOption) *TerminateOptions { + timeout := time.Second * 10 + options := &TerminateOptions{ + stopTimeout: &timeout, + ctx: ctx, + } + for _, opt := range opts { + opt(options) + } + return options +} + +// Context returns the context to use during a Terminate. +func (o *TerminateOptions) Context() context.Context { + return o.ctx +} + +// StopTimeout returns the stop timeout to use during a Terminate. +func (o *TerminateOptions) StopTimeout() *time.Duration { + return o.stopTimeout +} + +// Cleanup performs any clean up needed +func (o *TerminateOptions) Cleanup() error { + // TODO: simplify this when when perform the client refactor. + if len(o.volumes) == 0 { + return nil + } + client, err := NewDockerClientWithOpts(o.ctx) + if err != nil { + return fmt.Errorf("docker client: %w", err) + } + defer client.Close() + // Best effort to remove all volumes. + var errs []error + for _, volume := range o.volumes { + if errRemove := client.VolumeRemove(o.ctx, volume, true); errRemove != nil { + errs = append(errs, fmt.Errorf("volume remove %q: %w", volume, errRemove)) + } + } + return errors.Join(errs...) +} // StopContext returns a TerminateOption that sets the context. // Default: context.Background(). func StopContext(ctx context.Context) TerminateOption { - return func(c *terminateOptions) { + return func(c *TerminateOptions) { c.ctx = ctx } } @@ -29,8 +74,8 @@ func StopContext(ctx context.Context) TerminateOption { // StopTimeout returns a TerminateOption that sets the timeout. // Default: See [Container.Stop]. func StopTimeout(timeout time.Duration) TerminateOption { - return func(c *terminateOptions) { - c.timeout = &timeout + return func(c *TerminateOptions) { + c.stopTimeout = &timeout } } @@ -39,7 +84,7 @@ func StopTimeout(timeout time.Duration) TerminateOption { // which are not removed by default. // Default: nil. func RemoveVolumes(volumes ...string) TerminateOption { - return func(c *terminateOptions) { + return func(c *TerminateOptions) { c.volumes = volumes } } @@ -54,44 +99,15 @@ func TerminateContainer(container Container, options ...TerminateOption) error { return nil } - c := &terminateOptions{ - ctx: context.Background(), - } - - for _, opt := range options { - opt(c) - } - - // TODO: Add a timeout when terminate supports it. - err := container.Terminate(c.ctx) + err := container.Terminate(context.Background(), options...) if !isCleanupSafe(err) { return fmt.Errorf("terminate: %w", err) } - // Remove additional volumes if any. - if len(c.volumes) == 0 { - return nil - } - - client, err := NewDockerClientWithOpts(c.ctx) - if err != nil { - return fmt.Errorf("docker client: %w", err) - } - - defer client.Close() - - // Best effort to remove all volumes. - var errs []error - for _, volume := range c.volumes { - if errRemove := client.VolumeRemove(c.ctx, volume, true); errRemove != nil { - errs = append(errs, fmt.Errorf("volume remove %q: %w", volume, errRemove)) - } - } - - return errors.Join(errs...) + return nil } -// isNil returns true if val is nil or an nil instance false otherwise. +// isNil returns true if val is nil or a nil instance false otherwise. func isNil(val any) bool { if val == nil { return true diff --git a/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk b/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk index d168ff5c6..10adfad8d 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk +++ b/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk @@ -6,13 +6,13 @@ define go_install endef $(GOBIN)/golangci-lint: - $(call go_install,github.com/golangci/golangci-lint/cmd/golangci-lint@v1.61.0) + $(call go_install,github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.0.2) $(GOBIN)/gotestsum: $(call go_install,gotest.tools/gotestsum@latest) $(GOBIN)/mockery: - $(call go_install,github.com/vektra/mockery/v2@v2.45) + $(call go_install,github.com/vektra/mockery/v2@v2.53.4) .PHONY: install install: $(GOBIN)/golangci-lint $(GOBIN)/gotestsum $(GOBIN)/mockery @@ -30,7 +30,7 @@ dependencies-scan: .PHONY: lint lint: $(GOBIN)/golangci-lint - golangci-lint run --out-format=colored-line-number --path-prefix=. --verbose -c $(ROOT_DIR)/.golangci.yml --fix + golangci-lint run --verbose -c $(ROOT_DIR)/.golangci.yml --fix .PHONY: generate generate: $(GOBIN)/mockery @@ -47,7 +47,8 @@ test-%: $(GOBIN)/gotestsum -- \ -v \ -coverprofile=coverage.out \ - -timeout=30m + -timeout=30m \ + -race .PHONY: tools tools: diff --git a/vendor/github.com/testcontainers/testcontainers-go/container.go b/vendor/github.com/testcontainers/testcontainers-go/container.go index d114a5988..b0f2273a3 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/container.go +++ b/vendor/github.com/testcontainers/testcontainers-go/container.go @@ -6,23 +6,25 @@ import ( "errors" "fmt" "io" + "maps" "os" "path/filepath" "strings" "time" "github.com/cpuguy83/dockercfg" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/build" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/pkg/archive" "github.com/docker/go-connections/nat" "github.com/google/uuid" + "github.com/moby/go-archive" "github.com/moby/patternmatcher/ignorefile" tcexec "github.com/testcontainers/testcontainers-go/exec" "github.com/testcontainers/testcontainers-go/internal/core" + "github.com/testcontainers/testcontainers-go/log" "github.com/testcontainers/testcontainers-go/wait" ) @@ -41,7 +43,7 @@ type Container interface { Endpoint(context.Context, string) (string, error) // get proto://ip:port string for the lowest exposed port PortEndpoint(ctx context.Context, port nat.Port, proto string) (string, error) // get proto://ip:port string for the given exposed port Host(context.Context) (string, error) // get host where the container port is exposed - Inspect(context.Context) (*types.ContainerJSON, error) // get container info + Inspect(context.Context) (*container.InspectResponse, error) // get container info MappedPort(context.Context, nat.Port) (nat.Port, error) // get externally mapped port for a container port Ports(context.Context) (nat.PortMap, error) // Deprecated: Use c.Inspect(ctx).NetworkSettings.Ports instead SessionID() string // get session id @@ -50,14 +52,14 @@ type Container interface { Stop(context.Context, *time.Duration) error // stop the container // Terminate stops and removes the container and its image if it was built and not flagged as kept. - Terminate(ctx context.Context) error + Terminate(ctx context.Context, opts ...TerminateOption) error Logs(context.Context) (io.ReadCloser, error) // Get logs of the container FollowOutput(LogConsumer) // Deprecated: it will be removed in the next major release StartLogProducer(context.Context, ...LogProductionOption) error // Deprecated: Use the ContainerRequest instead StopLogProducer() error // Deprecated: it will be removed in the next major release Name(context.Context) (string, error) // Deprecated: Use c.Inspect(ctx).Name instead - State(context.Context) (*types.ContainerState, error) // returns container's running state + State(context.Context) (*container.State, error) // returns container's running state Networks(context.Context) ([]string, error) // get container networks NetworkAliases(context.Context) (map[string][]string, error) // get container network aliases for a network Exec(ctx context.Context, cmd []string, options ...tcexec.ProcessOption) (int, io.Reader, error) @@ -72,14 +74,14 @@ type Container interface { // ImageBuildInfo defines what is needed to build an image type ImageBuildInfo interface { - BuildOptions() (types.ImageBuildOptions, error) // converts the ImageBuildInfo to a types.ImageBuildOptions + BuildOptions() (build.ImageBuildOptions, error) // converts the ImageBuildInfo to a build.ImageBuildOptions GetContext() (io.Reader, error) // the path to the build context - GetDockerfile() string // the relative path to the Dockerfile, including the fileitself + GetDockerfile() string // the relative path to the Dockerfile, including the file itself GetRepo() string // get repo label for image GetTag() string // get tag label for image - ShouldPrintBuildLog() bool // allow build log to be printed to stdout + BuildLogWriter() io.Writer // for output of build log, use io.Discard to disable the output ShouldBuildImage() bool // return true if the image needs to be built - GetBuildArgs() map[string]*string // return the environment args used to build the from Dockerfile + GetBuildArgs() map[string]*string // return the environment args used to build the Dockerfile GetAuthConfigs() map[string]registry.AuthConfig // Deprecated. Testcontainers will detect registry credentials automatically. Return the auth configs to be able to pull from an authenticated docker registry } @@ -92,7 +94,8 @@ type FromDockerfile struct { Repo string // the repo label for image, defaults to UUID Tag string // the tag label for image, defaults to UUID BuildArgs map[string]*string // enable user to pass build args to docker daemon - PrintBuildLog bool // enable user to print build log + PrintBuildLog bool // Deprecated: Use BuildLogWriter instead + BuildLogWriter io.Writer // for output of build log, defaults to io.Discard AuthConfigs map[string]registry.AuthConfig // Deprecated. Testcontainers will detect registry credentials automatically. Enable auth configs to be able to pull from an authenticated docker registry // KeepImage describes whether DockerContainer.Terminate should not delete the // container image. Useful for images that are built from a Dockerfile and take a @@ -101,7 +104,7 @@ type FromDockerfile struct { // BuildOptionsModifier Modifier for the build options before image build. Use it for // advanced configurations while building the image. Please consider that the modifier // is called after the default build options are set. - BuildOptionsModifier func(*types.ImageBuildOptions) + BuildOptionsModifier func(*build.ImageBuildOptions) } type ContainerFile struct { @@ -127,44 +130,53 @@ func (c *ContainerFile) validate() error { // ContainerRequest represents the parameters used to get a running container type ContainerRequest struct { FromDockerfile - HostAccessPorts []int - Image string - ImageSubstitutors []ImageSubstitutor - Entrypoint []string - Env map[string]string - ExposedPorts []string // allow specifying protocol info - Cmd []string - Labels map[string]string - Mounts ContainerMounts - Tmpfs map[string]string - RegistryCred string // Deprecated: Testcontainers will detect registry credentials automatically - WaitingFor wait.Strategy - Name string // for specifying container name - Hostname string - WorkingDir string // specify the working directory of the container - ExtraHosts []string // Deprecated: Use HostConfigModifier instead - Privileged bool // For starting privileged container - Networks []string // for specifying network names - NetworkAliases map[string][]string // for specifying network aliases - NetworkMode container.NetworkMode // Deprecated: Use HostConfigModifier instead - Resources container.Resources // Deprecated: Use HostConfigModifier instead - Files []ContainerFile // files which will be copied when container starts - User string // for specifying uid:gid - SkipReaper bool // Deprecated: The reaper is globally controlled by the .testcontainers.properties file or the TESTCONTAINERS_RYUK_DISABLED environment variable - ReaperImage string // Deprecated: use WithImageName ContainerOption instead. Alternative reaper image - ReaperOptions []ContainerOption // Deprecated: the reaper is configured at the properties level, for an entire test session - AutoRemove bool // Deprecated: Use HostConfigModifier instead. If set to true, the container will be removed from the host when stopped - AlwaysPullImage bool // Always pull image - ImagePlatform string // ImagePlatform describes the platform which the image runs on. - Binds []string // Deprecated: Use HostConfigModifier instead - ShmSize int64 // Amount of memory shared with the host (in bytes) - CapAdd []string // Deprecated: Use HostConfigModifier instead. Add Linux capabilities - CapDrop []string // Deprecated: Use HostConfigModifier instead. Drop Linux capabilities - ConfigModifier func(*container.Config) // Modifier for the config before container creation - HostConfigModifier func(*container.HostConfig) // Modifier for the host config before container creation - EnpointSettingsModifier func(map[string]*network.EndpointSettings) // Modifier for the network settings before container creation - LifecycleHooks []ContainerLifecycleHooks // define hooks to be executed during container lifecycle - LogConsumerCfg *LogConsumerConfig // define the configuration for the log producer and its log consumers to follow the logs + HostAccessPorts []int + Image string + ImageSubstitutors []ImageSubstitutor + Entrypoint []string + Env map[string]string + ExposedPorts []string // allow specifying protocol info + Cmd []string + Labels map[string]string + Mounts ContainerMounts + Tmpfs map[string]string + RegistryCred string // Deprecated: Testcontainers will detect registry credentials automatically + WaitingFor wait.Strategy + Name string // for specifying container name + Hostname string // Deprecated: Use [ConfigModifier] instead. S + WorkingDir string // Deprecated: Use [ConfigModifier] instead. Specify the working directory of the container + ExtraHosts []string // Deprecated: Use HostConfigModifier instead + Privileged bool // Deprecated: Use [HostConfigModifier] instead. For starting privileged container + Networks []string // for specifying network names + NetworkAliases map[string][]string // for specifying network aliases + NetworkMode container.NetworkMode // Deprecated: Use HostConfigModifier instead + Resources container.Resources // Deprecated: Use HostConfigModifier instead + Files []ContainerFile // files which will be copied when container starts + User string // Deprecated: Use [ConfigModifier] instead. For specifying uid:gid + SkipReaper bool // Deprecated: The reaper is globally controlled by the .testcontainers.properties file or the TESTCONTAINERS_RYUK_DISABLED environment variable + ReaperImage string // Deprecated: use WithImageName ContainerOption instead. Alternative reaper image + ReaperOptions []ContainerOption // Deprecated: the reaper is configured at the properties level, for an entire test session + AutoRemove bool // Deprecated: Use HostConfigModifier instead. If set to true, the container will be removed from the host when stopped + AlwaysPullImage bool // Always pull image + ImagePlatform string // ImagePlatform describes the platform which the image runs on. + Binds []string // Deprecated: Use HostConfigModifier instead + ShmSize int64 // Deprecated: Use [HostConfigModifier] instead. Amount of memory shared with the host (in bytes) + CapAdd []string // Deprecated: Use HostConfigModifier instead. Add Linux capabilities + CapDrop []string // Deprecated: Use HostConfigModifier instead. Drop Linux capabilities + ConfigModifier func(*container.Config) // Modifier for the config before container creation + HostConfigModifier func(*container.HostConfig) // Modifier for the host config before container creation + EndpointSettingsModifier func(map[string]*network.EndpointSettings) // Modifier for the network settings before container creation + LifecycleHooks []ContainerLifecycleHooks // define hooks to be executed during container lifecycle + LogConsumerCfg *LogConsumerConfig // define the configuration for the log producer and its log consumers to follow the logs +} + +// sessionID returns the session ID for the container request. +func (c *ContainerRequest) sessionID() string { + if sessionID := c.Labels[core.LabelSessionID]; sessionID != "" { + return sessionID + } + + return core.SessionID() } // containerOptions functional options for a container @@ -216,7 +228,7 @@ func (c *ContainerRequest) Validate() error { // GetContext retrieve the build context for the request // Must be closed when no longer needed. func (c *ContainerRequest) GetContext() (io.Reader, error) { - var includes []string = []string{"."} + includes := []string{"."} if c.ContextArchive != nil { return c.ContextArchive, nil @@ -274,37 +286,37 @@ func parseDockerIgnore(targetDir string) (bool, []string, error) { // GetBuildArgs returns the env args to be used when creating from Dockerfile func (c *ContainerRequest) GetBuildArgs() map[string]*string { - return c.FromDockerfile.BuildArgs + return c.BuildArgs } -// GetDockerfile returns the Dockerfile from the ContainerRequest, defaults to "Dockerfile" +// GetDockerfile returns the Dockerfile from the ContainerRequest, defaults to "Dockerfile". +// Sets FromDockerfile.Dockerfile to the default if blank. func (c *ContainerRequest) GetDockerfile() string { - f := c.FromDockerfile.Dockerfile - if f == "" { - return "Dockerfile" + if c.Dockerfile == "" { + c.Dockerfile = "Dockerfile" } - return f + return c.Dockerfile } -// GetRepo returns the Repo label for image from the ContainerRequest, defaults to UUID +// GetRepo returns the Repo label for image from the ContainerRequest, defaults to UUID. +// Sets FromDockerfile.Repo to the default value if blank. func (c *ContainerRequest) GetRepo() string { - r := c.FromDockerfile.Repo - if r == "" { - return uuid.NewString() + if c.Repo == "" { + c.Repo = uuid.NewString() } - return strings.ToLower(r) + return strings.ToLower(c.Repo) } -// GetTag returns the Tag label for image from the ContainerRequest, defaults to UUID +// GetTag returns the Tag label for image from the ContainerRequest, defaults to UUID. +// Sets FromDockerfile.Tag to the default value if blank. func (c *ContainerRequest) GetTag() string { - t := c.FromDockerfile.Tag - if t == "" { - return uuid.NewString() + if c.Tag == "" { + c.Tag = uuid.NewString() } - return strings.ToLower(t) + return strings.ToLower(c.Tag) } // Deprecated: Testcontainers will detect registry credentials automatically, and it will be removed in the next major release. @@ -332,13 +344,13 @@ func (c *ContainerRequest) dockerFileImages() ([]string, error) { // Source is an archive, we need to read it to get the Dockerfile. dockerFile := c.GetDockerfile() - tr := tar.NewReader(c.FromDockerfile.ContextArchive) + tr := tar.NewReader(c.ContextArchive) for { hdr, err := tr.Next() if err != nil { if errors.Is(err, io.EOF) { - return nil, fmt.Errorf("Dockerfile %q not found in context archive", dockerFile) + return nil, fmt.Errorf("dockerfile %q not found in context archive", dockerFile) } return nil, fmt.Errorf("reading tar archive: %w", err) @@ -394,28 +406,42 @@ func getAuthConfigsFromDockerfile(c *ContainerRequest) (map[string]registry.Auth } func (c *ContainerRequest) ShouldBuildImage() bool { - return c.FromDockerfile.Context != "" || c.FromDockerfile.ContextArchive != nil + return c.Context != "" || c.ContextArchive != nil } func (c *ContainerRequest) ShouldKeepBuiltImage() bool { - return c.FromDockerfile.KeepImage + return c.KeepImage } -func (c *ContainerRequest) ShouldPrintBuildLog() bool { - return c.FromDockerfile.PrintBuildLog +// BuildLogWriter returns the io.Writer for output of log when building a Docker image from +// a Dockerfile. It returns the BuildLogWriter from the ContainerRequest, defaults to io.Discard. +// For backward compatibility, if BuildLogWriter is default and PrintBuildLog is true, +// the function returns os.Stderr. +// +//nolint:staticcheck //FIXME +func (c *ContainerRequest) BuildLogWriter() io.Writer { + if c.FromDockerfile.BuildLogWriter != nil { + return c.FromDockerfile.BuildLogWriter + } + if c.PrintBuildLog { + c.FromDockerfile.BuildLogWriter = os.Stderr + } else { + c.FromDockerfile.BuildLogWriter = io.Discard + } + return c.FromDockerfile.BuildLogWriter } // BuildOptions returns the image build options when building a Docker image from a Dockerfile. // It will apply some defaults and finally call the BuildOptionsModifier from the FromDockerfile struct, // if set. -func (c *ContainerRequest) BuildOptions() (types.ImageBuildOptions, error) { - buildOptions := types.ImageBuildOptions{ +func (c *ContainerRequest) BuildOptions() (build.ImageBuildOptions, error) { + buildOptions := build.ImageBuildOptions{ Remove: true, ForceRemove: true, } - if c.FromDockerfile.BuildOptionsModifier != nil { - c.FromDockerfile.BuildOptionsModifier(&buildOptions) + if c.BuildOptionsModifier != nil { + c.BuildOptionsModifier(&buildOptions) } // apply mandatory values after the modifier @@ -425,16 +451,14 @@ func (c *ContainerRequest) BuildOptions() (types.ImageBuildOptions, error) { // Make sure the auth configs from the Dockerfile are set right after the user-defined build options. authsFromDockerfile, err := getAuthConfigsFromDockerfile(c) if err != nil { - return types.ImageBuildOptions{}, fmt.Errorf("auth configs from Dockerfile: %w", err) + return build.ImageBuildOptions{}, fmt.Errorf("auth configs from Dockerfile: %w", err) } if buildOptions.AuthConfigs == nil { buildOptions.AuthConfigs = map[string]registry.AuthConfig{} } - for registry, authConfig := range authsFromDockerfile { - buildOptions.AuthConfigs[registry] = authConfig - } + maps.Copy(buildOptions.AuthConfigs, authsFromDockerfile) // make sure the first tag is the one defined in the ContainerRequest tag := fmt.Sprintf("%s:%s", c.GetRepo(), c.GetTag()) @@ -443,11 +467,11 @@ func (c *ContainerRequest) BuildOptions() (types.ImageBuildOptions, error) { for _, is := range c.ImageSubstitutors { modifiedTag, err := is.Substitute(tag) if err != nil { - return types.ImageBuildOptions{}, fmt.Errorf("failed to substitute image %s with %s: %w", tag, is.Description(), err) + return build.ImageBuildOptions{}, fmt.Errorf("failed to substitute image %s with %s: %w", tag, is.Description(), err) } if modifiedTag != tag { - Logger.Printf("✍🏼 Replacing image with %s. From: %s to %s\n", is.Description(), tag, modifiedTag) + log.Printf("✍🏼 Replacing image with %s. From: %s to %s\n", is.Description(), tag, modifiedTag) tag = modifiedTag } } @@ -462,10 +486,10 @@ func (c *ContainerRequest) BuildOptions() (types.ImageBuildOptions, error) { if !c.ShouldKeepBuiltImage() { dst := GenericLabels() if err = core.MergeCustomLabels(dst, c.Labels); err != nil { - return types.ImageBuildOptions{}, err + return build.ImageBuildOptions{}, err } if err = core.MergeCustomLabels(dst, buildOptions.Labels); err != nil { - return types.ImageBuildOptions{}, err + return build.ImageBuildOptions{}, err } buildOptions.Labels = dst } @@ -473,7 +497,7 @@ func (c *ContainerRequest) BuildOptions() (types.ImageBuildOptions, error) { // Do this as late as possible to ensure we don't leak the context on error/panic. buildContext, err := c.GetContext() if err != nil { - return types.ImageBuildOptions{}, err + return build.ImageBuildOptions{}, err } buildOptions.Context = buildContext @@ -482,7 +506,7 @@ func (c *ContainerRequest) BuildOptions() (types.ImageBuildOptions, error) { } func (c *ContainerRequest) validateContextAndImage() error { - if c.FromDockerfile.Context != "" && c.Image != "" { + if c.Context != "" && c.Image != "" { return errors.New("you cannot specify both an Image and Context in a ContainerRequest") } @@ -490,7 +514,7 @@ func (c *ContainerRequest) validateContextAndImage() error { } func (c *ContainerRequest) validateContextOrImageIsSpecified() error { - if c.FromDockerfile.Context == "" && c.FromDockerfile.ContextArchive == nil && c.Image == "" { + if c.Context == "" && c.ContextArchive == nil && c.Image == "" { return errors.New("you must specify either a build context or an image") } @@ -507,9 +531,8 @@ func (c *ContainerRequest) validateMounts() error { targetPath := m.Target.Target() if targets[targetPath] { return fmt.Errorf("%w: %s", ErrDuplicateMountTarget, targetPath) - } else { - targets[targetPath] = true } + targets[targetPath] = true } if c.HostConfigModifier == nil { @@ -523,15 +546,14 @@ func (c *ContainerRequest) validateMounts() error { if len(hostConfig.Binds) > 0 { for _, bind := range hostConfig.Binds { parts := strings.Split(bind, ":") - if len(parts) != 2 { + if len(parts) != 2 && len(parts) != 3 { return fmt.Errorf("%w: %s", ErrInvalidBindMount, bind) } targetPath := parts[1] if targets[targetPath] { return fmt.Errorf("%w: %s", ErrDuplicateMountTarget, targetPath) - } else { - targets[targetPath] = true } + targets[targetPath] = true } } diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker.go b/vendor/github.com/testcontainers/testcontainers-go/docker.go index 2ef8c6973..fbb7298f7 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/docker.go +++ b/vendor/github.com/testcontainers/testcontainers-go/docker.go @@ -5,6 +5,7 @@ import ( "bufio" "context" "encoding/base64" + "encoding/binary" "encoding/json" "errors" "fmt" @@ -15,18 +16,19 @@ import ( "os" "path/filepath" "regexp" - "strings" + "slices" + "sync" "time" "github.com/cenkalti/backoff/v4" + "github.com/containerd/errdefs" "github.com/containerd/platforms" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/build" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/network" "github.com/docker/docker/client" - "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/jsonmessage" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/go-connections/nat" @@ -36,6 +38,7 @@ import ( tcexec "github.com/testcontainers/testcontainers-go/exec" "github.com/testcontainers/testcontainers-go/internal/config" "github.com/testcontainers/testcontainers-go/internal/core" + "github.com/testcontainers/testcontainers-go/log" "github.com/testcontainers/testcontainers-go/wait" ) @@ -78,6 +81,7 @@ type DockerContainer struct { provider *DockerProvider sessionID string terminationSignal chan bool + consumersMtx sync.Mutex // protects consumers consumers []LogConsumer // TODO: Remove locking and wait group once the deprecated StartLogProducer and @@ -89,14 +93,14 @@ type DockerContainer struct { logProductionCtx context.Context logProductionTimeout *time.Duration - logger Logging + logger log.Logger lifecycleHooks []ContainerLifecycleHooks healthStatus string // container health status, will default to healthStatusNone if no healthcheck is present } // SetLogger sets the logger for the container -func (c *DockerContainer) SetLogger(logger Logging) { +func (c *DockerContainer) SetLogger(logger log.Logger) { c.logger = logger } @@ -138,7 +142,8 @@ func (c *DockerContainer) Endpoint(ctx context.Context, proto string) (string, e } // PortEndpoint gets proto://host:port string for the given exposed port -// Will returns just host:port if proto is "" +// It returns proto://host:port or proto://[IPv6host]:port string for the given exposed port. +// It returns just host:port or [IPv6host]:port if proto is blank. func (c *DockerContainer) PortEndpoint(ctx context.Context, port nat.Port, proto string) (string, error) { host, err := c.Host(ctx) if err != nil { @@ -150,12 +155,12 @@ func (c *DockerContainer) PortEndpoint(ctx context.Context, port nat.Port, proto return "", err } - protoFull := "" - if proto != "" { - protoFull = fmt.Sprintf("%s://", proto) + hostPort := net.JoinHostPort(host, outerPort.Port()) + if proto == "" { + return hostPort, nil } - return fmt.Sprintf("%s%s:%s", protoFull, host, outerPort.Port()), nil + return proto + "://" + hostPort, nil } // Host gets host (ip or name) of the docker daemon where the container port is exposed @@ -170,7 +175,7 @@ func (c *DockerContainer) Host(ctx context.Context) (string, error) { } // Inspect gets the raw container info -func (c *DockerContainer) Inspect(ctx context.Context) (*types.ContainerJSON, error) { +func (c *DockerContainer) Inspect(ctx context.Context) (*container.InspectResponse, error) { jsonRaw, err := c.inspectRawContainer(ctx) if err != nil { return nil, err @@ -185,7 +190,7 @@ func (c *DockerContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Po if err != nil { return "", fmt.Errorf("inspect: %w", err) } - if inspect.ContainerJSONBase.HostConfig.NetworkMode == "host" { + if inspect.HostConfig.NetworkMode == "host" { return port, nil } @@ -204,7 +209,7 @@ func (c *DockerContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Po return nat.NewPort(k.Proto(), p[0].HostPort) } - return "", errdefs.NotFound(fmt.Errorf("port %q not found", port)) + return "", errdefs.ErrNotFound.WithMessage(fmt.Sprintf("port %q not found", port)) } // Deprecated: use c.Inspect(ctx).NetworkSettings.Ports instead. @@ -303,12 +308,11 @@ func (c *DockerContainer) Stop(ctx context.Context, timeout *time.Duration) erro // The following hooks are called in order: // - [ContainerLifecycleHooks.PreTerminates] // - [ContainerLifecycleHooks.PostTerminates] -func (c *DockerContainer) Terminate(ctx context.Context) error { - // ContainerRemove hardcodes stop timeout to 3 seconds which is too short - // to ensure that child containers are stopped so we manually call stop. - // TODO: make this configurable via a functional option. - timeout := 10 * time.Second - err := c.Stop(ctx, &timeout) +// +// Default: timeout is 10 seconds. +func (c *DockerContainer) Terminate(ctx context.Context, opts ...TerminateOption) error { + options := NewTerminateOptions(ctx, opts...) + err := c.Stop(options.Context(), options.StopTimeout()) if err != nil && !isCleanupSafe(err) { return fmt.Errorf("stop: %w", err) } @@ -343,11 +347,15 @@ func (c *DockerContainer) Terminate(ctx context.Context) error { c.sessionID = "" c.isRunning = false + if err = options.Cleanup(); err != nil { + errs = append(errs, err) + } + return errors.Join(errs...) } // update container raw info -func (c *DockerContainer) inspectRawContainer(ctx context.Context) (*types.ContainerJSON, error) { +func (c *DockerContainer) inspectRawContainer(ctx context.Context) (*container.InspectResponse, error) { defer c.provider.Close() inspect, err := c.provider.client.ContainerInspect(ctx, c.ID) if err != nil { @@ -360,8 +368,6 @@ func (c *DockerContainer) inspectRawContainer(ctx context.Context) (*types.Conta // Logs will fetch both STDOUT and STDERR from the current container. Returns a // ReadCloser and leaves it up to the caller to extract what it wants. func (c *DockerContainer) Logs(ctx context.Context) (io.ReadCloser, error) { - const streamHeaderSize = 8 - options := container.LogsOptions{ ShowStdout: true, ShowStderr: true, @@ -373,42 +379,43 @@ func (c *DockerContainer) Logs(ctx context.Context) (io.ReadCloser, error) { } defer c.provider.Close() + resp, err := c.Inspect(ctx) + if err != nil { + return nil, err + } + + if resp.Config.Tty { + return rc, nil + } + + return c.parseMultiplexedLogs(rc), nil +} + +// parseMultiplexedLogs handles the multiplexed log format used when TTY is disabled +func (c *DockerContainer) parseMultiplexedLogs(rc io.ReadCloser) io.ReadCloser { + const streamHeaderSize = 8 + pr, pw := io.Pipe() r := bufio.NewReader(rc) go func() { - lineStarted := true - for err == nil { - line, isPrefix, err := r.ReadLine() - - if lineStarted && len(line) >= streamHeaderSize { - line = line[streamHeaderSize:] // trim stream header - lineStarted = false - } - if !isPrefix { - lineStarted = true - } - - _, errW := pw.Write(line) - if errW != nil { + header := make([]byte, streamHeaderSize) + for { + _, errH := io.ReadFull(r, header) + if errH != nil { + _ = pw.CloseWithError(errH) return } - if !isPrefix { - _, errW := pw.Write([]byte("\n")) - if errW != nil { - return - } - } - - if err != nil { - _ = pw.CloseWithError(err) + frameSize := binary.BigEndian.Uint32(header[4:]) + if _, err := io.CopyN(pw, r, int64(frameSize)); err != nil { + pw.CloseWithError(err) return } } }() - return pr, nil + return pr } // Deprecated: use the ContainerRequest.LogConsumerConfig field instead. @@ -419,9 +426,29 @@ func (c *DockerContainer) FollowOutput(consumer LogConsumer) { // followOutput adds a LogConsumer to be sent logs from the container's // STDOUT and STDERR func (c *DockerContainer) followOutput(consumer LogConsumer) { + c.consumersMtx.Lock() + defer c.consumersMtx.Unlock() + c.consumers = append(c.consumers, consumer) } +// consumersCopy returns a copy of the current consumers. +func (c *DockerContainer) consumersCopy() []LogConsumer { + c.consumersMtx.Lock() + defer c.consumersMtx.Unlock() + + return slices.Clone(c.consumers) +} + +// resetConsumers resets the current consumers to the provided ones. +func (c *DockerContainer) resetConsumers(consumers []LogConsumer) { + c.consumersMtx.Lock() + defer c.consumersMtx.Unlock() + + c.consumers = c.consumers[:0] + c.consumers = append(c.consumers, consumers...) +} + // Deprecated: use c.Inspect(ctx).Name instead. // Name gets the name of the container. func (c *DockerContainer) Name(ctx context.Context) (string, error) { @@ -433,7 +460,7 @@ func (c *DockerContainer) Name(ctx context.Context) (string, error) { } // State returns container's running state. -func (c *DockerContainer) State(ctx context.Context) (*types.ContainerState, error) { +func (c *DockerContainer) State(ctx context.Context) (*container.State, error) { inspect, err := c.inspectRawContainer(ctx) if err != nil { return nil, err @@ -615,7 +642,7 @@ func (c *DockerContainer) CopyDirToContainer(ctx context.Context, hostDirPath st } if !dir { - // it's not a dir: let the consumer to handle an error + // it's not a dir: let the consumer handle the error return fmt.Errorf("path %s is not a directory", hostDirPath) } @@ -756,17 +783,23 @@ func (c *DockerContainer) startLogProduction(ctx context.Context, opts ...LogPro } // Setup the log writers. - stdout := newLogConsumerWriter(StdoutLog, c.consumers) - stderr := newLogConsumerWriter(StderrLog, c.consumers) + + consumers := c.consumersCopy() + stdout := newLogConsumerWriter(StdoutLog, consumers) + stderr := newLogConsumerWriter(StderrLog, consumers) // Setup the log production context which will be used to stop the log production. c.logProductionCtx, c.logProductionCancel = context.WithCancelCause(ctx) - go func() { - err := c.logProducer(stdout, stderr) - // Set context cancel cause, if not already set. - c.logProductionCancel(err) - }() + // We capture context cancel function to avoid data race with multiple + // calls to startLogProduction. + go func(cancel context.CancelCauseFunc) { + // Ensure the context is cancelled when log productions completes + // so that GetLogProductionErrorChannel functions correctly. + defer cancel(nil) + + c.logProducer(stdout, stderr) + }(c.logProductionCancel) return nil } @@ -775,40 +808,49 @@ func (c *DockerContainer) startLogProduction(ctx context.Context, opts ...LogPro // - logProductionCtx is done // - A fatal error occurs // - No more logs are available -func (c *DockerContainer) logProducer(stdout, stderr io.Writer) error { +func (c *DockerContainer) logProducer(stdout, stderr io.Writer) { // Clean up idle client connections. defer c.provider.Close() // Setup the log options, start from the beginning. - options := container.LogsOptions{ + options := &container.LogsOptions{ ShowStdout: true, ShowStderr: true, Follow: true, } - for { - timeoutCtx, cancel := context.WithTimeout(c.logProductionCtx, *c.logProductionTimeout) - defer cancel() + // Use a separate method so that timeout cancel function is + // called correctly. + for c.copyLogsTimeout(stdout, stderr, options) { + } +} - err := c.copyLogs(timeoutCtx, stdout, stderr, options) - switch { - case err == nil: - // No more logs available. - return nil - case c.logProductionCtx.Err() != nil: - // Log production was stopped or caller context is done. - return nil - case timeoutCtx.Err() != nil, errors.Is(err, net.ErrClosed): - // Timeout or client connection closed, retry. - default: - // Unexpected error, retry. - Logger.Printf("Unexpected error reading logs: %v", err) - } +// copyLogsTimeout copies logs from the container to stdout and stderr with a timeout. +// It returns true if the log production should be retried, false otherwise. +func (c *DockerContainer) copyLogsTimeout(stdout, stderr io.Writer, options *container.LogsOptions) bool { + timeoutCtx, cancel := context.WithTimeout(c.logProductionCtx, *c.logProductionTimeout) + defer cancel() - // Retry from the last log received. - now := time.Now() - options.Since = fmt.Sprintf("%d.%09d", now.Unix(), int64(now.Nanosecond())) + err := c.copyLogs(timeoutCtx, stdout, stderr, *options) + switch { + case err == nil: + // No more logs available. + return false + case c.logProductionCtx.Err() != nil: + // Log production was stopped or caller context is done. + return false + case timeoutCtx.Err() != nil, errors.Is(err, net.ErrClosed): + // Timeout or client connection closed, retry. + default: + // Unexpected error, retry. + c.logger.Printf("Unexpected error reading logs: %v", err) } + + // Retry from the last log received. + now := time.Now() + options.Since = fmt.Sprintf("%d.%09d", now.Unix(), int64(now.Nanosecond())) + + return true } // copyLogs copies logs from the container to stdout and stderr. @@ -866,13 +908,41 @@ func (c *DockerContainer) GetLogProductionErrorChannel() <-chan error { } errCh := make(chan error, 1) - go func() { - <-c.logProductionCtx.Done() - errCh <- context.Cause(c.logProductionCtx) - }() + go func(ctx context.Context) { + <-ctx.Done() + errCh <- context.Cause(ctx) + close(errCh) + }(c.logProductionCtx) + return errCh } +// connectReaper connects the reaper to the container if it is needed. +func (c *DockerContainer) connectReaper(ctx context.Context) error { + if c.provider.config.RyukDisabled || isReaperImage(c.Image) { + // Reaper is disabled or we are the reaper container. + return nil + } + + reaper, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, c.provider.host), core.SessionID(), c.provider) + if err != nil { + return fmt.Errorf("reaper: %w", err) + } + + if c.terminationSignal, err = reaper.Connect(); err != nil { + return fmt.Errorf("reaper connect: %w", err) + } + + return nil +} + +// cleanupTermSignal triggers the termination signal if it was created and an error occurred. +func (c *DockerContainer) cleanupTermSignal(err error) { + if c.terminationSignal != nil && err != nil { + c.terminationSignal <- true + } +} + // DockerNetwork represents a network started using Docker type DockerNetwork struct { ID string // Network ID from Docker @@ -906,6 +976,7 @@ type DockerProvider struct { host string hostCache string config config.Config + mtx sync.Mutex } // Client gets the docker client used by the provider @@ -931,29 +1002,29 @@ var _ ContainerProvider = (*DockerProvider)(nil) // BuildImage will build and image from context and Dockerfile, then return the tag func (p *DockerProvider) BuildImage(ctx context.Context, img ImageBuildInfo) (string, error) { - var buildOptions types.ImageBuildOptions + var buildOptions build.ImageBuildOptions resp, err := backoff.RetryNotifyWithData( - func() (types.ImageBuildResponse, error) { + func() (build.ImageBuildResponse, error) { var err error buildOptions, err = img.BuildOptions() if err != nil { - return types.ImageBuildResponse{}, backoff.Permanent(fmt.Errorf("build options: %w", err)) + return build.ImageBuildResponse{}, backoff.Permanent(fmt.Errorf("build options: %w", err)) } defer tryClose(buildOptions.Context) // release resources in any case resp, err := p.client.ImageBuild(ctx, buildOptions.Context, buildOptions) if err != nil { if isPermanentClientError(err) { - return types.ImageBuildResponse{}, backoff.Permanent(fmt.Errorf("build image: %w", err)) + return build.ImageBuildResponse{}, backoff.Permanent(fmt.Errorf("build image: %w", err)) } - return types.ImageBuildResponse{}, err + return build.ImageBuildResponse{}, err } defer p.Close() return resp, nil }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx), - func(err error, duration time.Duration) { + func(err error, _ time.Duration) { p.Logger.Printf("Failed to build image: %s, will retry", err) }, ) @@ -962,10 +1033,7 @@ func (p *DockerProvider) BuildImage(ctx context.Context, img ImageBuildInfo) (st } defer resp.Body.Close() - output := io.Discard - if img.ShouldPrintBuildLog() { - output = os.Stderr - } + output := img.BuildLogWriter() // Always process the output, even if it is not printed // to ensure that errors during the build process are @@ -980,33 +1048,24 @@ func (p *DockerProvider) BuildImage(ctx context.Context, img ImageBuildInfo) (st } // CreateContainer fulfils a request for a container without starting it -func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerRequest) (con Container, err error) { //nolint:nonamedreturns // Needed for error checking. +func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerRequest) (con Container, err error) { // defer the close of the Docker client connection the soonest defer p.Close() - // Make sure that bridge network exists - // In case it is disabled we will create reaper_default network - if p.DefaultNetwork == "" { - p.DefaultNetwork, err = p.getDefaultNetwork(ctx, p.client) - if err != nil { - return nil, err - } + var defaultNetwork string + defaultNetwork, err = p.ensureDefaultNetwork(ctx) + if err != nil { + return nil, fmt.Errorf("ensure default network: %w", err) } // If default network is not bridge make sure it is attached to the request // as container won't be attached to it automatically // in case of Podman the bridge network is called 'podman' as 'bridge' would conflict - if p.DefaultNetwork != p.defaultBridgeNetworkName { - isAttached := false - for _, net := range req.Networks { - if net == p.DefaultNetwork { - isAttached = true - break - } - } + if defaultNetwork != p.defaultBridgeNetworkName { + isAttached := slices.Contains(req.Networks, defaultNetwork) if !isAttached { - req.Networks = append(req.Networks, p.DefaultNetwork) + req.Networks = append(req.Networks, defaultNetwork) } } @@ -1021,28 +1080,6 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque req.Labels = make(map[string]string) } - var termSignal chan bool - // the reaper does not need to start a reaper for itself - isReaperContainer := strings.HasSuffix(imageName, config.ReaperDefaultImage) - if !p.config.RyukDisabled && !isReaperContainer { - r, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, p.host), core.SessionID(), p) - if err != nil { - return nil, fmt.Errorf("reaper: %w", err) - } - - termSignal, err := r.Connect() - if err != nil { - return nil, fmt.Errorf("reaper connect: %w", err) - } - - // Cleanup on error. - defer func() { - if err != nil { - termSignal <- true - } - }() - } - if err = req.Validate(); err != nil { return nil, err } @@ -1052,11 +1089,29 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque var platform *specs.Platform + defaultHooks := []ContainerLifecycleHooks{ + DefaultLoggingHook(p.Logger), + } + + origLifecycleHooks := req.LifecycleHooks + req.LifecycleHooks = []ContainerLifecycleHooks{ + combineContainerHooks(defaultHooks, req.LifecycleHooks), + } + if req.ShouldBuildImage() { + if err = req.buildingHook(ctx); err != nil { + return nil, err + } + imageName, err = p.BuildImage(ctx, &req) if err != nil { return nil, err } + + req.Image = imageName + if err = req.builtHook(ctx); err != nil { + return nil, err + } } else { for _, is := range req.ImageSubstitutors { modifiedTag, err := is.Substitute(imageName) @@ -1083,13 +1138,12 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque if req.AlwaysPullImage { shouldPullImage = true // If requested always attempt to pull image } else { - img, _, err := p.client.ImageInspectWithRaw(ctx, imageName) + img, err := p.client.ImageInspect(ctx, imageName) if err != nil { - if client.IsErrNotFound(err) { - shouldPullImage = true - } else { + if !errdefs.IsNotFound(err) { return nil, err } + shouldPullImage = true } if platform != nil && (img.Architecture != platform.Architecture || img.Os != platform.OS) { shouldPullImage = true @@ -1106,7 +1160,7 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque } } - if !isReaperContainer { + if !isReaperImage(imageName) { // Add the labels that identify this as a testcontainers container and // allow the reaper to terminate it if requested. AddGenericLabels(req.Labels) @@ -1118,27 +1172,21 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque Env: env, Labels: req.Labels, Cmd: req.Cmd, - Hostname: req.Hostname, - User: req.User, - WorkingDir: req.WorkingDir, } hostConfig := &container.HostConfig{ - Privileged: req.Privileged, - ShmSize: req.ShmSize, - Tmpfs: req.Tmpfs, + Tmpfs: req.Tmpfs, } networkingConfig := &network.NetworkingConfig{} // default hooks include logger hook and pre-create hook - defaultHooks := []ContainerLifecycleHooks{ - DefaultLoggingHook(p.Logger), + defaultHooks = append(defaultHooks, defaultPreCreateHook(p, dockerInput, hostConfig, networkingConfig), defaultCopyFileToContainerHook(req.Files), defaultLogConsumersHook(req.LogConsumerCfg), defaultReadinessHook(), - } + ) // in the case the container needs to access a local port // we need to forward the local port to the container @@ -1151,10 +1199,25 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque return nil, fmt.Errorf("expose host ports: %w", err) } + defer func() { + if err != nil && con == nil { + // Container setup failed so ensure we clean up the sshd container too. + ctr := &DockerContainer{ + provider: p, + logger: p.Logger, + lifecycleHooks: []ContainerLifecycleHooks{sshdForwardPortsHook}, + } + err = errors.Join(ctr.terminatingHook(ctx)) + } + }() + defaultHooks = append(defaultHooks, sshdForwardPortsHook) } - req.LifecycleHooks = []ContainerLifecycleHooks{combineContainerHooks(defaultHooks, req.LifecycleHooks)} + // Combine with the original LifecycleHooks to avoid duplicate logging hooks. + req.LifecycleHooks = []ContainerLifecycleHooks{ + combineContainerHooks(defaultHooks, origLifecycleHooks), + } err = req.creatingHook(ctx) if err != nil { @@ -1184,38 +1247,47 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque } } - c := &DockerContainer{ - ID: resp.ID, - WaitingFor: req.WaitingFor, - Image: imageName, - imageWasBuilt: req.ShouldBuildImage(), - keepBuiltImage: req.ShouldKeepBuiltImage(), - sessionID: core.SessionID(), - exposedPorts: req.ExposedPorts, - provider: p, - terminationSignal: termSignal, - logger: p.Logger, - lifecycleHooks: req.LifecycleHooks, + // This should match the fields set in ContainerFromDockerResponse. + ctr := &DockerContainer{ + ID: resp.ID, + WaitingFor: req.WaitingFor, + Image: imageName, + imageWasBuilt: req.ShouldBuildImage(), + keepBuiltImage: req.ShouldKeepBuiltImage(), + sessionID: req.sessionID(), + exposedPorts: req.ExposedPorts, + provider: p, + logger: p.Logger, + lifecycleHooks: req.LifecycleHooks, } - err = c.createdHook(ctx) - if err != nil { - return nil, err + if err = ctr.connectReaper(ctx); err != nil { + return ctr, err // No wrap as it would stutter. } - return c, nil + // Wrapped so the returned error is passed to the cleanup function. + defer func(ctr *DockerContainer) { + ctr.cleanupTermSignal(err) + }(ctr) + + if err = ctr.createdHook(ctx); err != nil { + // Return the container to allow caller to clean up. + return ctr, fmt.Errorf("created hook: %w", err) + } + + return ctr, nil } -func (p *DockerProvider) findContainerByName(ctx context.Context, name string) (*types.Container, error) { +func (p *DockerProvider) findContainerByName(ctx context.Context, name string) (*container.Summary, error) { if name == "" { return nil, nil } // Note that, 'name' filter will use regex to find the containers filter := filters.NewArgs(filters.Arg("name", fmt.Sprintf("^%s$", name))) - containers, err := p.client.ContainerList(ctx, container.ListOptions{Filters: filter}) + containers, err := p.client.ContainerList(ctx, container.ListOptions{All: true, Filters: filter}) if err != nil { - return nil, err + return nil, fmt.Errorf("container list: %w", err) } defer p.Close() @@ -1225,9 +1297,9 @@ func (p *DockerProvider) findContainerByName(ctx context.Context, name string) ( return nil, nil } -func (p *DockerProvider) waitContainerCreation(ctx context.Context, name string) (*types.Container, error) { +func (p *DockerProvider) waitContainerCreation(ctx context.Context, name string) (*container.Summary, error) { return backoff.RetryNotifyWithData( - func() (*types.Container, error) { + func() (*container.Summary, error) { c, err := p.findContainerByName(ctx, name) if err != nil { if !errdefs.IsNotFound(err) && isPermanentClientError(err) { @@ -1237,7 +1309,7 @@ func (p *DockerProvider) waitContainerCreation(ctx context.Context, name string) } if c == nil { - return nil, errdefs.NotFound(fmt.Errorf("container %s not found", name)) + return nil, errdefs.ErrNotFound.WithMessage(fmt.Sprintf("container %s not found", name)) } return c, nil }, @@ -1251,7 +1323,7 @@ func (p *DockerProvider) waitContainerCreation(ctx context.Context, name string) ) } -func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req ContainerRequest) (con Container, err error) { //nolint:nonamedreturns // Needed for error check. +func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req ContainerRequest) (con Container, err error) { c, err := p.findContainerByName(ctx, req.Name) if err != nil { return nil, err @@ -1270,7 +1342,7 @@ func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req Contain } } - sessionID := core.SessionID() + sessionID := req.sessionID() var termSignal chan bool if !p.config.RyukDisabled { @@ -1279,7 +1351,7 @@ func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req Contain return nil, fmt.Errorf("reaper: %w", err) } - termSignal, err := r.Connect() + termSignal, err = r.Connect() if err != nil { return nil, fmt.Errorf("reaper connect: %w", err) } @@ -1311,6 +1383,31 @@ func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req Contain lifecycleHooks: []ContainerLifecycleHooks{combineContainerHooks(defaultHooks, req.LifecycleHooks)}, } + // Workaround for https://github.com/moby/moby/issues/50133. + // /containers/{id}/json API endpoint of Docker Engine takes data about container from master (not replica) database + // which is synchronized with container state after call of /containers/{id}/stop API endpoint. + dcState, err := dc.State(ctx) + if err != nil { + return nil, fmt.Errorf("docker container state: %w", err) + } + + // If a container was stopped programmatically, we want to ensure the container + // is running again, but only if it is not paused, as it's not possible to start + // a paused container. The Docker Engine returns the "cannot start a paused container, + // try unpause instead" error. + switch dcState.Status { + case "running": + // cannot re-start a running container, but we still need + // to call the startup hooks. + case "paused": + // TODO: we should unpause the container here. + return nil, fmt.Errorf("cannot start a paused container: %w", errors.ErrUnsupported) + default: + if err := dc.Start(ctx); err != nil { + return dc, fmt.Errorf("start container %s in state %s: %w", req.Name, c.State, err) + } + } + err = dc.startedHook(ctx) if err != nil { return nil, err @@ -1331,7 +1428,7 @@ func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req Contain func (p *DockerProvider) attemptToPullImage(ctx context.Context, tag string, pullOpt image.PullOptions) error { registry, imageAuth, err := DockerImageAuth(ctx, tag) if err != nil { - p.Logger.Printf("Failed to get image auth for %s. Setting empty credentials for the image: %s. Error is: %s", registry, tag, err) + p.Logger.Printf("No image auth found for %s. Setting empty credentials for the image: %s. This is expected for public images. Details: %s", registry, tag, err) } else { // see https://github.com/docker/docs/blob/e8e1204f914767128814dca0ea008644709c117f/engine/api/sdk/examples.md?plain=1#L649-L657 encodedJSON, err := json.Marshal(imageAuth) @@ -1357,7 +1454,7 @@ func (p *DockerProvider) attemptToPullImage(ctx context.Context, tag string, pul return nil }, backoff.WithContext(backoff.NewExponentialBackOff(), ctx), - func(err error, duration time.Duration) { + func(err error, _ time.Duration) { p.Logger.Printf("Failed to pull image: %s, will retry", err) }, ) @@ -1367,7 +1464,7 @@ func (p *DockerProvider) attemptToPullImage(ctx context.Context, tag string, pul defer pull.Close() // download of docker image finishes at EOF of the pull request - _, err = io.ReadAll(pull) + _, err = io.Copy(io.Discard, pull) return err } @@ -1411,10 +1508,13 @@ func (p *DockerProvider) Config() TestcontainersConfig { // Warning: this is based on your Docker host setting. Will fail if using an SSH tunnel // You can use the "TESTCONTAINERS_HOST_OVERRIDE" env variable to set this yourself func (p *DockerProvider) DaemonHost(ctx context.Context) (string, error) { - return daemonHost(ctx, p) + p.mtx.Lock() + defer p.mtx.Unlock() + + return p.daemonHostLocked(ctx) } -func daemonHost(ctx context.Context, p *DockerProvider) (string, error) { +func (p *DockerProvider) daemonHostLocked(ctx context.Context) (string, error) { if p.hostCache != "" { return p.hostCache, nil } @@ -1437,7 +1537,11 @@ func daemonHost(ctx context.Context, p *DockerProvider) (string, error) { p.hostCache = daemonURL.Hostname() case "unix", "npipe": if core.InAContainer() { - ip, err := p.GetGatewayIP(ctx) + defaultNetwork, err := p.ensureDefaultNetworkLocked(ctx) + if err != nil { + return "", fmt.Errorf("ensure default network: %w", err) + } + ip, err := p.getGatewayIP(ctx, defaultNetwork) if err != nil { ip, err = core.DefaultGatewayIP() if err != nil { @@ -1457,16 +1561,12 @@ func daemonHost(ctx context.Context, p *DockerProvider) (string, error) { // Deprecated: use network.New instead // CreateNetwork returns the object representing a new network identified by its name -func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest) (net Network, err error) { //nolint:nonamedreturns // Needed for error check. +func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest) (net Network, err error) { // defer the close of the Docker client connection the soonest defer p.Close() - // Make sure that bridge network exists - // In case it is disabled we will create reaper_default network - if p.DefaultNetwork == "" { - if p.DefaultNetwork, err = p.getDefaultNetwork(ctx, p.client); err != nil { - return nil, err - } + if _, err = p.ensureDefaultNetwork(ctx); err != nil { + return nil, fmt.Errorf("ensure default network: %w", err) } if req.Labels == nil { @@ -1482,7 +1582,7 @@ func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest) IPAM: req.IPAM, } - sessionID := core.SessionID() + sessionID := req.sessionID() var termSignal chan bool if !p.config.RyukDisabled { @@ -1491,7 +1591,7 @@ func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest) return nil, fmt.Errorf("reaper: %w", err) } - termSignal, err := r.Connect() + termSignal, err = r.Connect() if err != nil { return nil, fmt.Errorf("reaper connect: %w", err) } @@ -1537,14 +1637,15 @@ func (p *DockerProvider) GetNetwork(ctx context.Context, req NetworkRequest) (ne func (p *DockerProvider) GetGatewayIP(ctx context.Context) (string, error) { // Use a default network as defined in the DockerProvider - if p.DefaultNetwork == "" { - var err error - p.DefaultNetwork, err = p.getDefaultNetwork(ctx, p.client) - if err != nil { - return "", err - } + defaultNetwork, err := p.ensureDefaultNetwork(ctx) + if err != nil { + return "", fmt.Errorf("ensure default network: %w", err) } - nw, err := p.GetNetwork(ctx, NetworkRequest{Name: p.DefaultNetwork}) + return p.getGatewayIP(ctx, defaultNetwork) +} + +func (p *DockerProvider) getGatewayIP(ctx context.Context, defaultNetwork string) (string, error) { + nw, err := p.GetNetwork(ctx, NetworkRequest{Name: defaultNetwork}) if err != nil { return "", err } @@ -1557,82 +1658,103 @@ func (p *DockerProvider) GetGatewayIP(ctx context.Context) (string, error) { } } if ip == "" { - return "", errors.New("Failed to get gateway IP from network settings") + return "", errors.New("failed to get gateway IP from network settings") } return ip, nil } -func (p *DockerProvider) getDefaultNetwork(ctx context.Context, cli client.APIClient) (string, error) { - // Get list of available networks - networkResources, err := cli.NetworkList(ctx, network.ListOptions{}) - if err != nil { - return "", err - } +// ensureDefaultNetwork ensures that defaultNetwork is set and creates +// it if it does not exist, returning its value. +// It is safe to call this method concurrently. +func (p *DockerProvider) ensureDefaultNetwork(ctx context.Context) (string, error) { + p.mtx.Lock() + defer p.mtx.Unlock() + return p.ensureDefaultNetworkLocked(ctx) +} - reaperNetwork := ReaperDefault +func (p *DockerProvider) ensureDefaultNetworkLocked(ctx context.Context) (string, error) { + if p.defaultNetwork != "" { + // Already set. + return p.defaultNetwork, nil + } - reaperNetworkExists := false + networkResources, err := p.client.NetworkList(ctx, network.ListOptions{}) + if err != nil { + return "", fmt.Errorf("network list: %w", err) + } + // TODO: remove once we have docker context support via #2810 + // Prefer the default bridge network if it exists. + // This makes the results stable as network list order is not guaranteed. for _, net := range networkResources { - if net.Name == p.defaultBridgeNetworkName { - return p.defaultBridgeNetworkName, nil + switch net.Name { + case p.defaultBridgeNetworkName: + p.defaultNetwork = p.defaultBridgeNetworkName + return p.defaultNetwork, nil + case ReaperDefault: + p.defaultNetwork = ReaperDefault } + } - if net.Name == reaperNetwork { - reaperNetworkExists = true - } + if p.defaultNetwork != "" { + return p.defaultNetwork, nil } - // Create a bridge network for the container communications - if !reaperNetworkExists { - _, err = cli.NetworkCreate(ctx, reaperNetwork, network.CreateOptions{ - Driver: Bridge, - Attachable: true, - Labels: GenericLabels(), - }) - // If the network already exists, we can ignore the error as that can - // happen if we are running multiple tests in parallel and we only - // need to ensure that the network exists. - if err != nil && !errdefs.IsConflict(err) { - return "", err - } + // Create a bridge network for the container communications. + _, err = p.client.NetworkCreate(ctx, ReaperDefault, network.CreateOptions{ + Driver: Bridge, + Attachable: true, + Labels: GenericLabels(), + }) + // If the network already exists, we can ignore the error as that can + // happen if we are running multiple tests in parallel and we only + // need to ensure that the network exists. + if err != nil && !errdefs.IsConflict(err) { + return "", fmt.Errorf("network create: %w", err) } - return reaperNetwork, nil + p.defaultNetwork = ReaperDefault + + return p.defaultNetwork, nil } -// containerFromDockerResponse builds a Docker container struct from the response of the Docker API -func containerFromDockerResponse(ctx context.Context, response types.Container) (*DockerContainer, error) { - provider, err := NewDockerProvider() - if err != nil { - return nil, err +// ContainerFromType builds a Docker container struct from the response of the Docker API +func (p *DockerProvider) ContainerFromType(ctx context.Context, response container.Summary) (ctr *DockerContainer, err error) { + exposedPorts := make([]string, len(response.Ports)) + for i, port := range response.Ports { + exposedPorts[i] = fmt.Sprintf("%d/%s", port.PublicPort, port.Type) } - ctr := DockerContainer{} - - ctr.ID = response.ID - ctr.WaitingFor = nil - ctr.Image = response.Image - ctr.imageWasBuilt = false - - ctr.logger = provider.Logger - ctr.lifecycleHooks = []ContainerLifecycleHooks{ - DefaultLoggingHook(ctr.logger), + // This should match the fields set in CreateContainer. + ctr = &DockerContainer{ + ID: response.ID, + Image: response.Image, + imageWasBuilt: false, + sessionID: response.Labels[core.LabelSessionID], + isRunning: response.State == "running", + exposedPorts: exposedPorts, + provider: p, + logger: p.Logger, + lifecycleHooks: []ContainerLifecycleHooks{ + DefaultLoggingHook(p.Logger), + }, } - ctr.provider = provider - ctr.sessionID = core.SessionID() - ctr.consumers = []LogConsumer{} - ctr.isRunning = response.State == "running" + if err = ctr.connectReaper(ctx); err != nil { + return nil, err + } - // the termination signal should be obtained from the reaper - ctr.terminationSignal = nil + // Wrapped so the returned error is passed to the cleanup function. + defer func(ctr *DockerContainer) { + ctr.cleanupTermSignal(err) + }(ctr) // populate the raw representation of the container jsonRaw, err := ctr.inspectRawContainer(ctx) if err != nil { - return nil, fmt.Errorf("inspect raw container: %w", err) + // Return the container to allow caller to clean up. + return ctr, fmt.Errorf("inspect raw container: %w", err) } // the health status of the container, if any @@ -1640,7 +1762,7 @@ func containerFromDockerResponse(ctx context.Context, response types.Container) ctr.healthStatus = health.Status } - return &ctr, nil + return ctr, nil } // ListImages list images from the provider. If an image has multiple Tags, each tag is reported @@ -1664,6 +1786,19 @@ func (p *DockerProvider) ListImages(ctx context.Context) ([]ImageInfo, error) { // SaveImages exports a list of images as an uncompressed tar func (p *DockerProvider) SaveImages(ctx context.Context, output string, images ...string) error { + return p.SaveImagesWithOpts(ctx, output, images) +} + +// SaveImagesWithOpts exports a list of images as an uncompressed tar, passing options to the provider +func (p *DockerProvider) SaveImagesWithOpts(ctx context.Context, output string, images []string, opts ...SaveImageOption) error { + saveOpts := saveImageOptions{} + + for _, opt := range opts { + if err := opt(&saveOpts); err != nil { + return fmt.Errorf("applying save image option: %w", err) + } + } + outputFile, err := os.Create(output) if err != nil { return fmt.Errorf("opening output file %w", err) @@ -1672,7 +1807,7 @@ func (p *DockerProvider) SaveImages(ctx context.Context, output string, images . _ = outputFile.Close() }() - imageReader, err := p.client.ImageSave(ctx, images) + imageReader, err := p.client.ImageSave(ctx, images, saveOpts.dockerSaveOpts...) if err != nil { return fmt.Errorf("saving images %w", err) } @@ -1689,6 +1824,14 @@ func (p *DockerProvider) SaveImages(ctx context.Context, output string, images . return nil } +func SaveDockerImageWithPlatforms(platforms ...specs.Platform) SaveImageOption { + return func(opts *saveImageOptions) error { + opts.dockerSaveOpts = append(opts.dockerSaveOpts, client.ImageSaveWithPlatforms(platforms...)) + + return nil + } +} + // PullImage pulls image from registry func (p *DockerProvider) PullImage(ctx context.Context, img string) error { return p.attemptToPullImage(ctx, img, image.PullOptions{}) @@ -1696,11 +1839,11 @@ func (p *DockerProvider) PullImage(ctx context.Context, img string) error { var permanentClientErrors = []func(error) bool{ errdefs.IsNotFound, - errdefs.IsInvalidParameter, + errdefs.IsInvalidArgument, errdefs.IsUnauthorized, - errdefs.IsForbidden, + errdefs.IsPermissionDenied, errdefs.IsNotImplemented, - errdefs.IsSystem, + errdefs.IsInternal, } func isPermanentClientError(err error) bool { diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go b/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go index af0d415de..58b3ef263 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go +++ b/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go @@ -21,6 +21,9 @@ import ( // defaultRegistryFn is variable overwritten in tests to check for behaviour with different default values. var defaultRegistryFn = defaultRegistry +// getRegistryCredentials is a variable overwritten in tests to mock the dockercfg.GetRegistryCredentials function. +var getRegistryCredentials = dockercfg.GetRegistryCredentials + // DockerImageAuth returns the auth config for the given Docker image, extracting first its Docker registry. // Finally, it will use the credential helpers to extract the information from the docker config file // for that registry, if it exists. @@ -111,9 +114,28 @@ type credentials struct { var creds = &credentialsCache{entries: map[string]credentials{}} -// Get returns the username and password for the given hostname +// AuthConfig updates the details in authConfig for the given hostname +// as determined by the details in configKey. +func (c *credentialsCache) AuthConfig(hostname, configKey string, authConfig *registry.AuthConfig) error { + u, p, err := creds.get(hostname, configKey) + if err != nil { + return err + } + + if u != "" { + authConfig.Username = u + authConfig.Password = p + } else { + authConfig.IdentityToken = p + } + + return nil +} + +// get returns the username and password for the given hostname // as determined by the details in configPath. -func (c *credentialsCache) Get(hostname, configKey string) (string, string, error) { +// If the username is empty, the password is an identity token. +func (c *credentialsCache) get(hostname, configKey string) (string, string, error) { key := configKey + ":" + hostname c.mtx.RLock() entry, ok := c.entries[key] @@ -124,7 +146,7 @@ func (c *credentialsCache) Get(hostname, configKey string) (string, string, erro } // No entry found, request and cache. - user, password, err := dockercfg.GetRegistryCredentials(hostname) + user, password, err := getRegistryCredentials(hostname) if err != nil { return "", "", fmt.Errorf("getting credentials for %s: %w", hostname, err) } @@ -186,14 +208,10 @@ func getDockerAuthConfigs() (map[string]registry.AuthConfig, error) { switch { case ac.Username == "" && ac.Password == "": // Look up credentials from the credential store. - u, p, err := creds.Get(k, key) - if err != nil { + if err := creds.AuthConfig(k, key, &ac); err != nil { results <- authConfigResult{err: err} return } - - ac.Username = u - ac.Password = p case ac.Auth == "": // Create auth from the username and password encoding. ac.Auth = base64.StdEncoding.EncodeToString([]byte(ac.Username + ":" + ac.Password)) @@ -203,25 +221,19 @@ func getDockerAuthConfigs() (map[string]registry.AuthConfig, error) { }(k, v) } - // in the case where the auth field in the .docker/conf.json is empty, and the user has credential helpers registered - // the auth comes from there + // In the case where the auth field in the .docker/conf.json is empty, and the user has + // credential helpers registered the auth comes from there. for k := range cfg.CredentialHelpers { go func(k string) { defer wg.Done() - u, p, err := creds.Get(k, key) - if err != nil { + var ac registry.AuthConfig + if err := creds.AuthConfig(k, key, &ac); err != nil { results <- authConfigResult{err: err} return } - results <- authConfigResult{ - key: k, - cfg: registry.AuthConfig{ - Username: u, - Password: p, - }, - } + results <- authConfigResult{key: k, cfg: ac} }(k) } diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker_client.go b/vendor/github.com/testcontainers/testcontainers-go/docker_client.go index 04df71291..e9eea1efe 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/docker_client.go +++ b/vendor/github.com/testcontainers/testcontainers-go/docker_client.go @@ -13,6 +13,7 @@ import ( "github.com/testcontainers/testcontainers-go/internal" "github.com/testcontainers/testcontainers-go/internal/core" + "github.com/testcontainers/testcontainers-go/log" ) // DockerClient is a wrapper around the docker client that is used by testcontainers-go. @@ -73,9 +74,9 @@ func (c *DockerClient) Info(ctx context.Context) (system.Info, error) { } } - Logger.Printf(infoMessage, packagePath, + log.Printf(infoMessage, packagePath, dockerInfo.ServerVersion, - c.Client.ClientVersion(), + c.ClientVersion(), dockerInfo.OperatingSystem, dockerInfo.MemTotal/1024/1024, infoLabels, internal.Version, diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go b/vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go index d8af3fae3..9609d92ca 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go +++ b/vendor/github.com/testcontainers/testcontainers-go/docker_mounts.go @@ -1,12 +1,20 @@ package testcontainers -import "github.com/docker/docker/api/types/mount" +import ( + "errors" + "path/filepath" + + "github.com/docker/docker/api/types/mount" + + "github.com/testcontainers/testcontainers-go/log" +) var mountTypeMapping = map[MountType]mount.Type{ MountTypeBind: mount.TypeBind, // Deprecated, it will be removed in a future release MountTypeVolume: mount.TypeVolume, MountTypeTmpfs: mount.TypeTmpfs, MountTypePipe: mount.TypeNamedPipe, + MountTypeImage: mount.TypeImage, } // Deprecated: use Files or HostConfigModifier in the ContainerRequest, or copy files container APIs to make containers portable across Docker environments @@ -28,6 +36,12 @@ type TmpfsMounter interface { GetTmpfsOptions() *mount.TmpfsOptions } +// ImageMounter can optionally be implemented by mount sources +// to support advanced scenarios based on mount.ImageOptions +type ImageMounter interface { + ImageOptions() *mount.ImageOptions +} + // Deprecated: use Files or HostConfigModifier in the ContainerRequest, or copy files container APIs to make containers portable across Docker environments type DockerBindMountSource struct { *mount.BindOptions @@ -81,6 +95,48 @@ func (s DockerTmpfsMountSource) GetTmpfsOptions() *mount.TmpfsOptions { return s.TmpfsOptions } +// DockerImageMountSource is a mount source for an image +type DockerImageMountSource struct { + // imageName is the image name + imageName string + + // subpath is the subpath to mount the image into + subpath string +} + +// NewDockerImageMountSource creates a new DockerImageMountSource +func NewDockerImageMountSource(imageName string, subpath string) DockerImageMountSource { + return DockerImageMountSource{ + imageName: imageName, + subpath: subpath, + } +} + +// Validate validates the source of the mount, ensuring that the subpath is a relative path +func (s DockerImageMountSource) Validate() error { + if !filepath.IsLocal(s.subpath) { + return errors.New("image mount source must be a local path") + } + return nil +} + +// ImageOptions returns the image options for the image mount +func (s DockerImageMountSource) ImageOptions() *mount.ImageOptions { + return &mount.ImageOptions{ + Subpath: s.subpath, + } +} + +// Source returns the image name for the image mount +func (s DockerImageMountSource) Source() string { + return s.imageName +} + +// Type returns the mount type for the image mount +func (s DockerImageMountSource) Type() MountType { + return MountTypeImage +} + // PrepareMounts maps the given []ContainerMount to the corresponding // []mount.Mount for further processing func (m ContainerMounts) PrepareMounts() []mount.Mount { @@ -114,8 +170,10 @@ func mapToDockerMounts(containerMounts ContainerMounts) []mount.Mount { containerMount.VolumeOptions = typedMounter.GetVolumeOptions() case TmpfsMounter: containerMount.TmpfsOptions = typedMounter.GetTmpfsOptions() + case ImageMounter: + containerMount.ImageOptions = typedMounter.ImageOptions() case BindMounter: - Logger.Printf("Mount type %s is not supported by Testcontainers for Go", m.Source.Type()) + log.Printf("Mount type %s is not supported by Testcontainers for Go", m.Source.Type()) default: // The provided source type has no custom options } diff --git a/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go b/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go index 2b7958360..36f1db1cf 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go +++ b/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go @@ -2,7 +2,9 @@ package exec import ( "bytes" + "fmt" "io" + "sync" "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/stdcopy" @@ -23,7 +25,6 @@ func NewProcessOptions(cmd []string) *ProcessOptions { return &ProcessOptions{ ExecConfig: container.ExecOptions{ Cmd: cmd, - Detach: false, AttachStdout: true, AttachStderr: true, }, @@ -60,6 +61,43 @@ func WithEnv(env []string) ProcessOption { }) } +// safeBuffer is a goroutine safe buffer. +type safeBuffer struct { + mtx sync.Mutex + buf bytes.Buffer + err error +} + +// Error sets an error for the next read. +func (sb *safeBuffer) Error(err error) { + sb.mtx.Lock() + defer sb.mtx.Unlock() + + sb.err = err +} + +// Write writes p to the buffer. +// It is safe for concurrent use by multiple goroutines. +func (sb *safeBuffer) Write(p []byte) (n int, err error) { + sb.mtx.Lock() + defer sb.mtx.Unlock() + + return sb.buf.Write(p) +} + +// Read reads up to len(p) bytes into p from the buffer. +// It is safe for concurrent use by multiple goroutines. +func (sb *safeBuffer) Read(p []byte) (n int, err error) { + sb.mtx.Lock() + defer sb.mtx.Unlock() + + if sb.err != nil { + return 0, sb.err + } + + return sb.buf.Read(p) +} + // Multiplexed returns a [ProcessOption] that configures the command execution // to combine stdout and stderr into a single stream without Docker's multiplexing headers. func Multiplexed() ProcessOption { @@ -73,13 +111,14 @@ func Multiplexed() ProcessOption { done := make(chan struct{}) - var outBuff bytes.Buffer - var errBuff bytes.Buffer + var outBuff safeBuffer + var errBuff safeBuffer go func() { + defer close(done) if _, err := stdcopy.StdCopy(&outBuff, &errBuff, opts.Reader); err != nil { + outBuff.Error(fmt.Errorf("copying output: %w", err)) return } - close(done) }() <-done diff --git a/vendor/github.com/testcontainers/testcontainers-go/file.go b/vendor/github.com/testcontainers/testcontainers-go/file.go index a6743cc9e..9205208cb 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/file.go +++ b/vendor/github.com/testcontainers/testcontainers-go/file.go @@ -9,6 +9,8 @@ import ( "os" "path/filepath" "strings" + + "github.com/testcontainers/testcontainers-go/log" ) func isDir(path string) (bool, error) { @@ -41,7 +43,7 @@ func tarDir(src string, fileMode int64) (*bytes.Buffer, error) { buffer := &bytes.Buffer{} - Logger.Printf(">> creating TAR file from directory: %s\n", src) + log.Printf(">> creating TAR file from directory: %s\n", src) // tar > gzip > buffer zr := gzip.NewWriter(buffer) @@ -59,7 +61,7 @@ func tarDir(src string, fileMode int64) (*bytes.Buffer, error) { // if a symlink, skip file if fi.Mode().Type() == os.ModeSymlink { - Logger.Printf(">> skipping symlink: %s\n", file) + log.Printf(">> skipping symlink: %s\n", file) return nil } diff --git a/vendor/github.com/testcontainers/testcontainers-go/generic.go b/vendor/github.com/testcontainers/testcontainers-go/generic.go index fd13a607d..dc5ee1ccb 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/generic.go +++ b/vendor/github.com/testcontainers/testcontainers-go/generic.go @@ -4,10 +4,12 @@ import ( "context" "errors" "fmt" + "maps" "strings" "sync" "github.com/testcontainers/testcontainers-go/internal/core" + "github.com/testcontainers/testcontainers-go/log" ) var ( @@ -20,7 +22,7 @@ type GenericContainerRequest struct { ContainerRequest // embedded request for provider Started bool // whether to auto-start the container ProviderType ProviderType // which provider to use, Docker if empty - Logger Logging // provide a container specific Logging - use default global logger if empty + Logger log.Logger // provide a container specific Logging - use default global logger if empty Reuse bool // reuse an existing container if it exists or create a new one. a container name mustn't be empty } @@ -52,11 +54,12 @@ func GenericContainer(ctx context.Context, req GenericContainerRequest) (Contain return nil, ErrReuseEmptyName } - logging := req.Logger - if logging == nil { - logging = Logger + logger := req.Logger + if logger == nil { + // Ensure there is always a non-nil logger by default + logger = log.Default() } - provider, err := req.ProviderType.GetProvider(WithLogger(logging)) + provider, err := req.ProviderType.GetProvider(WithLogger(logger)) if err != nil { return nil, fmt.Errorf("get provider: %w", err) } @@ -111,7 +114,36 @@ func GenericLabels() map[string]string { // AddGenericLabels adds the generic labels to target. func AddGenericLabels(target map[string]string) { - for k, v := range GenericLabels() { - target[k] = v + maps.Copy(target, GenericLabels()) +} + +// Run is a convenience function that creates a new container and starts it. +// It calls the GenericContainer function and returns a concrete DockerContainer type. +func Run(ctx context.Context, img string, opts ...ContainerCustomizer) (*DockerContainer, error) { + req := ContainerRequest{ + Image: img, + } + + genericContainerReq := GenericContainerRequest{ + ContainerRequest: req, + Started: true, + } + + for _, opt := range opts { + if err := opt.Customize(&genericContainerReq); err != nil { + return nil, fmt.Errorf("customize: %w", err) + } } + + ctr, err := GenericContainer(ctx, genericContainerReq) + var c *DockerContainer + if ctr != nil { + c = ctr.(*DockerContainer) + } + + if err != nil { + return c, fmt.Errorf("generic container: %w", err) + } + + return c, nil } diff --git a/vendor/github.com/testcontainers/testcontainers-go/image.go b/vendor/github.com/testcontainers/testcontainers-go/image.go index f7f2bf026..11154d862 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/image.go +++ b/vendor/github.com/testcontainers/testcontainers-go/image.go @@ -2,17 +2,26 @@ package testcontainers import ( "context" + + "github.com/docker/docker/client" ) -// ImageInfo represents a summary information of an image +// ImageInfo represents summary information of an image type ImageInfo struct { ID string Name string } +type saveImageOptions struct { + dockerSaveOpts []client.ImageSaveOption +} + +type SaveImageOption func(*saveImageOptions) error + // ImageProvider allows manipulating images type ImageProvider interface { ListImages(context.Context) ([]ImageInfo, error) SaveImages(context.Context, string, ...string) error + SaveImagesWithOpts(context.Context, string, []string, ...SaveImageOption) error PullImage(context.Context, string) error } diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go b/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go index b0bcc24d3..deb8f0a9f 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go @@ -11,11 +11,11 @@ import ( "github.com/magiconair/properties" ) -const ReaperDefaultImage = "testcontainers/ryuk:0.10.2" +const ReaperDefaultImage = "testcontainers/ryuk:0.13.0" var ( tcConfig Config - tcConfigOnce *sync.Once = new(sync.Once) + tcConfigOnce = new(sync.Once) ) // testcontainersConfig { @@ -68,17 +68,17 @@ type Config struct { // RyukReconnectionTimeout is the time to wait before attempting to reconnect to the Garbage Collector container. // - // Environment variable: TESTCONTAINERS_RYUK_RECONNECTION_TIMEOUT + // Environment variable: RYUK_RECONNECTION_TIMEOUT RyukReconnectionTimeout time.Duration `properties:"ryuk.reconnection.timeout,default=10s"` // RyukConnectionTimeout is the time to wait before timing out when connecting to the Garbage Collector container. // - // Environment variable: TESTCONTAINERS_RYUK_CONNECTION_TIMEOUT + // Environment variable: RYUK_CONNECTION_TIMEOUT RyukConnectionTimeout time.Duration `properties:"ryuk.connection.timeout,default=1m"` // RyukVerbose is a flag to enable or disable verbose logging for the Garbage Collector. // - // Environment variable: TESTCONTAINERS_RYUK_VERBOSE + // Environment variable: RYUK_VERBOSE RyukVerbose bool `properties:"ryuk.verbose,default=false"` // TestcontainersHost is the address of the Testcontainers host. @@ -126,17 +126,17 @@ func read() Config { config.RyukPrivileged = ryukPrivilegedEnv == "true" } - ryukVerboseEnv := os.Getenv("TESTCONTAINERS_RYUK_VERBOSE") + ryukVerboseEnv := readTestcontainersEnv("RYUK_VERBOSE") if parseBool(ryukVerboseEnv) { config.RyukVerbose = ryukVerboseEnv == "true" } - ryukReconnectionTimeoutEnv := os.Getenv("TESTCONTAINERS_RYUK_RECONNECTION_TIMEOUT") + ryukReconnectionTimeoutEnv := readTestcontainersEnv("RYUK_RECONNECTION_TIMEOUT") if timeout, err := time.ParseDuration(ryukReconnectionTimeoutEnv); err == nil { config.RyukReconnectionTimeout = timeout } - ryukConnectionTimeoutEnv := os.Getenv("TESTCONTAINERS_RYUK_CONNECTION_TIMEOUT") + ryukConnectionTimeoutEnv := readTestcontainersEnv("RYUK_CONNECTION_TIMEOUT") if timeout, err := time.ParseDuration(ryukConnectionTimeoutEnv); err == nil { config.RyukConnectionTimeout = timeout } @@ -168,3 +168,18 @@ func parseBool(input string) bool { _, err := strconv.ParseBool(input) return err == nil } + +// readTestcontainersEnv reads the environment variable with the given name. +// It checks for the environment variable with the given name first, and then +// checks for the environment variable with the given name prefixed with "TESTCONTAINERS_". +func readTestcontainersEnv(envVar string) string { + value := os.Getenv(envVar) + if value != "" { + return value + } + + // TODO: remove this prefix after the next major release + const prefix string = "TESTCONTAINERS_" + + return os.Getenv(prefix + envVar) +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/bootstrap.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/bootstrap.go index cf06dde7e..d249d9be3 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/core/bootstrap.go +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/bootstrap.go @@ -2,11 +2,12 @@ package core import ( "crypto/sha256" + "encoding/hex" "fmt" "os" "github.com/google/uuid" - "github.com/shirou/gopsutil/v3/process" + "github.com/shirou/gopsutil/v4/process" ) // sessionID returns a unique session ID for the current test session. Because each Go package @@ -38,7 +39,7 @@ var sessionID string var projectPath string // processID returns a unique ID for the current test process. Because each Go package will be run in a separate process, -// we need a way to identify the current test process, in the form of an UUID +// we need a way to identify the current test process, in the form of a UUID var processID string const sessionIDPlaceholder = "testcontainers-go:%d:%d" @@ -50,7 +51,7 @@ func init() { var createTime int64 fallbackCwd, err := os.Getwd() if err != nil { - // very unlinke to fail, but if it does, we will use a temp dir + // very unlikely to fail, but if it does, we will use a temp dir fallbackCwd = os.TempDir() } @@ -83,13 +84,13 @@ func init() { } hasher := sha256.New() - _, err = hasher.Write([]byte(fmt.Sprintf(sessionIDPlaceholder, parentPid, createTime))) + _, err = fmt.Fprintf(hasher, sessionIDPlaceholder, parentPid, createTime) if err != nil { sessionID = uuid.New().String() return } - sessionID = fmt.Sprintf("%x", hasher.Sum(nil)) + sessionID = hex.EncodeToString(hasher.Sum(nil)) } func ProcessID() string { diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_host.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_host.go index 3088a3742..73ff0a977 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_host.go +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_host.go @@ -104,7 +104,7 @@ func MustExtractDockerHost(ctx context.Context) string { // // 1. Docker host from the "tc.host" property in the ~/.testcontainers.properties file. // 2. The TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE environment variable. -// 3. Using a Docker client, check if the Info().OperativeSystem is "Docker Desktop" and return the default docker socket path for rootless docker. +// 3. Using a Docker client, check if the Info().OperatingSystem is "Docker Desktop" and return the default docker socket path for rootless docker. // 4. Else, Get the current Docker Host from the existing strategies: see MustExtractDockerHost. // 5. If the socket contains the unix schema, the schema is removed (e.g. unix:///var/run/docker.sock -> /var/run/docker.sock) // 6. Else, the default location of the docker socket is used (/var/run/docker.sock) @@ -240,7 +240,7 @@ func isHostNotSet(err error) bool { } // dockerHostFromEnv returns the docker host from the DOCKER_HOST environment variable, if it's not empty -func dockerHostFromEnv(ctx context.Context) (string, error) { +func dockerHostFromEnv(_ context.Context) (string, error) { if dockerHostPath := os.Getenv("DOCKER_HOST"); dockerHostPath != "" { return dockerHostPath, nil } @@ -263,7 +263,7 @@ func dockerHostFromContext(ctx context.Context) (string, error) { } // dockerHostFromProperties returns the docker host from the ~/.testcontainers.properties file, if it's not empty -func dockerHostFromProperties(ctx context.Context) (string, error) { +func dockerHostFromProperties(_ context.Context) (string, error) { cfg := config.Read() socketPath := cfg.Host if socketPath != "" { @@ -285,7 +285,7 @@ func dockerSocketOverridePath() (string, error) { // dockerSocketPath returns the docker socket from the default docker socket path, if it's not empty // and the socket exists -func dockerSocketPath(ctx context.Context) (string, error) { +func dockerSocketPath(_ context.Context) (string, error) { if fileExists(DockerSocketPath) { return DockerSocketPathWithSchema, nil } @@ -294,25 +294,32 @@ func dockerSocketPath(ctx context.Context) (string, error) { } // testcontainersHostFromProperties returns the testcontainers host from the ~/.testcontainers.properties file, if it's not empty -func testcontainersHostFromProperties(ctx context.Context) (string, error) { +func testcontainersHostFromProperties(_ context.Context) (string, error) { cfg := config.Read() testcontainersHost := cfg.TestcontainersHost if testcontainersHost != "" { - parsed, err := parseURL(testcontainersHost) + // Validate the URL format + _, err := parseURL(testcontainersHost) if err != nil { return "", err } - return parsed, nil + // Return the original URL to preserve schema for Docker client + return testcontainersHost, nil } return "", ErrTestcontainersHostNotSetInProperties } +// DockerEnvFile is the file that is created when running inside a container. +// It's a variable to allow testing. +// TODO: Remove this once context rework is done, which eliminates need for the default network creation. +var DockerEnvFile = "/.dockerenv" + // InAContainer returns true if the code is running inside a container // See https://github.com/docker/docker/blob/a9fa38b1edf30b23cae3eade0be48b3d4b1de14b/daemon/initlayer/setup_unix.go#L25 func InAContainer() bool { - return inAContainer("/.dockerenv") + return inAContainer(DockerEnvFile) } func inAContainer(path string) bool { diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_rootless.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_rootless.go index b8e0f6e17..81083842e 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_rootless.go +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_rootless.go @@ -3,11 +3,11 @@ package core import ( "context" "errors" - "fmt" "net/url" "os" "path/filepath" "runtime" + "strconv" ) var ( @@ -36,7 +36,7 @@ func IsWindows() bool { // 2. ~/.docker/run/docker.sock file. // 3. ~/.docker/desktop/docker.sock file. // 4. /run/user/${uid}/docker.sock file. -// 5. Else, return ErrRootlessDockerNotFound, wrapping secific errors for each of the above paths. +// 5. Else, return ErrRootlessDockerNotFound, wrapping specific errors for each of the above paths. // // It should include the Docker socket schema (unix://) in the returned path. func rootlessDockerSocketPath(_ context.Context) (string, error) { @@ -79,11 +79,9 @@ func fileExists(f string) bool { } func parseURL(s string) (string, error) { - var hostURL *url.URL - if u, err := url.Parse(s); err != nil { + hostURL, err := url.Parse(s) + if err != nil { return "", err - } else { - hostURL = u } switch hostURL.Scheme { @@ -144,7 +142,7 @@ func rootlessSocketPathFromHomeDesktopDir() (string, error) { // rootlessSocketPathFromRunDir returns the path to the rootless Docker socket from the /run/user//docker.sock file. func rootlessSocketPathFromRunDir() (string, error) { uid := os.Getuid() - f := filepath.Join(baseRunDir, "user", fmt.Sprintf("%d", uid), "docker.sock") + f := filepath.Join(baseRunDir, "user", strconv.Itoa(uid), "docker.sock") if fileExists(f) { return f, nil } diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/images.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/images.go index 2892267e9..f073a907f 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/core/images.go +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/images.go @@ -106,7 +106,7 @@ func ExtractRegistry(image string, fallback string) string { return fallback } -// IsURL checks if the string is an URL. +// IsURL checks if the string is a URL. // Extracted from https://github.com/asaskevich/govalidator/blob/f21760c49a8d/validator.go#L104 func IsURL(str string) bool { if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/labels.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/labels.go index 081492423..198fdae7c 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/core/labels.go +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/labels.go @@ -3,6 +3,7 @@ package core import ( "errors" "fmt" + "maps" "strings" "github.com/testcontainers/testcontainers-go/internal" @@ -51,9 +52,7 @@ func DefaultLabels(sessionID string) map[string]string { // AddDefaultLabels adds the default labels for sessionID to target. func AddDefaultLabels(sessionID string, target map[string]string) { - for k, v := range DefaultLabels(sessionID) { - target[k] = v - } + maps.Copy(target, DefaultLabels(sessionID)) } // MergeCustomLabels sets labels from src to dst. diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/version.go b/vendor/github.com/testcontainers/testcontainers-go/internal/version.go index 0c688d5e3..5f51f3d0a 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/version.go +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/version.go @@ -1,4 +1,4 @@ package internal // Version is the next development version of the application -const Version = "0.34.0" +const Version = "0.39.0" diff --git a/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go b/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go index 57833dafc..7887ebedc 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go +++ b/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go @@ -5,13 +5,15 @@ import ( "errors" "fmt" "io" + "reflect" "strings" "time" - "github.com/cenkalti/backoff/v4" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/go-connections/nat" + + "github.com/testcontainers/testcontainers-go/log" ) // ContainerRequestHook is a hook that will be called before a container is created. @@ -39,6 +41,8 @@ type ContainerHook func(ctx context.Context, ctr Container) error // to modify the container lifecycle. All the container lifecycle hooks except the PreCreates hooks // will be passed to the container once it's created type ContainerLifecycleHooks struct { + PreBuilds []ContainerRequestHook + PostBuilds []ContainerRequestHook PreCreates []ContainerRequestHook PostCreates []ContainerHook PreStarts []ContainerHook @@ -51,62 +55,74 @@ type ContainerLifecycleHooks struct { } // DefaultLoggingHook is a hook that will log the container lifecycle events -var DefaultLoggingHook = func(logger Logging) ContainerLifecycleHooks { +var DefaultLoggingHook = func(logger log.Logger) ContainerLifecycleHooks { shortContainerID := func(c Container) string { return c.GetContainerID()[:12] } return ContainerLifecycleHooks{ + PreBuilds: []ContainerRequestHook{ + func(_ context.Context, req ContainerRequest) error { + logger.Printf("🐳 Building image %s:%s", req.GetRepo(), req.GetTag()) + return nil + }, + }, + PostBuilds: []ContainerRequestHook{ + func(_ context.Context, req ContainerRequest) error { + logger.Printf("✅ Built image %s", req.Image) + return nil + }, + }, PreCreates: []ContainerRequestHook{ - func(ctx context.Context, req ContainerRequest) error { + func(_ context.Context, req ContainerRequest) error { logger.Printf("🐳 Creating container for image %s", req.Image) return nil }, }, PostCreates: []ContainerHook{ - func(ctx context.Context, c Container) error { + func(_ context.Context, c Container) error { logger.Printf("✅ Container created: %s", shortContainerID(c)) return nil }, }, PreStarts: []ContainerHook{ - func(ctx context.Context, c Container) error { + func(_ context.Context, c Container) error { logger.Printf("🐳 Starting container: %s", shortContainerID(c)) return nil }, }, PostStarts: []ContainerHook{ - func(ctx context.Context, c Container) error { + func(_ context.Context, c Container) error { logger.Printf("✅ Container started: %s", shortContainerID(c)) return nil }, }, PostReadies: []ContainerHook{ - func(ctx context.Context, c Container) error { + func(_ context.Context, c Container) error { logger.Printf("🔔 Container is ready: %s", shortContainerID(c)) return nil }, }, PreStops: []ContainerHook{ - func(ctx context.Context, c Container) error { + func(_ context.Context, c Container) error { logger.Printf("🐳 Stopping container: %s", shortContainerID(c)) return nil }, }, PostStops: []ContainerHook{ - func(ctx context.Context, c Container) error { + func(_ context.Context, c Container) error { logger.Printf("✅ Container stopped: %s", shortContainerID(c)) return nil }, }, PreTerminates: []ContainerHook{ - func(ctx context.Context, c Container) error { + func(_ context.Context, c Container) error { logger.Printf("🐳 Terminating container: %s", shortContainerID(c)) return nil }, }, PostTerminates: []ContainerHook{ - func(ctx context.Context, c Container) error { + func(_ context.Context, c Container) error { logger.Printf("🚫 Container terminated: %s", shortContainerID(c)) return nil }, @@ -173,10 +189,7 @@ var defaultLogConsumersHook = func(cfg *LogConsumerConfig) ContainerLifecycleHoo } dockerContainer := c.(*DockerContainer) - dockerContainer.consumers = dockerContainer.consumers[:0] - for _, consumer := range cfg.Consumers { - dockerContainer.followOutput(consumer) - } + dockerContainer.resetConsumers(cfg.Consumers) return dockerContainer.startLogProduction(ctx, cfg.Opts...) }, @@ -184,7 +197,7 @@ var defaultLogConsumersHook = func(cfg *LogConsumerConfig) ContainerLifecycleHoo PostStops: []ContainerHook{ // Stop the log production. // See combineContainerHooks for the order of execution. - func(ctx context.Context, c Container) error { + func(_ context.Context, c Container) error { if cfg == nil || len(cfg.Consumers) == 0 { return nil } @@ -196,71 +209,10 @@ var defaultLogConsumersHook = func(cfg *LogConsumerConfig) ContainerLifecycleHoo } } -func checkPortsMapped(exposedAndMappedPorts nat.PortMap, exposedPorts []string) error { - portMap, _, err := nat.ParsePortSpecs(exposedPorts) - if err != nil { - return fmt.Errorf("parse exposed ports: %w", err) - } - - for exposedPort := range portMap { - // having entries in exposedAndMappedPorts, where the key is the exposed port, - // and the value is the mapped port, means that the port has been already mapped. - if _, ok := exposedAndMappedPorts[exposedPort]; ok { - continue - } - - // check if the port is mapped with the protocol (default is TCP) - if strings.Contains(string(exposedPort), "/") { - return fmt.Errorf("port %s is not mapped yet", exposedPort) - } - - // Port didn't have a type, default to tcp and retry. - exposedPort += "/tcp" - if _, ok := exposedAndMappedPorts[exposedPort]; !ok { - return fmt.Errorf("port %s is not mapped yet", exposedPort) - } - } - - return nil -} - // defaultReadinessHook is a hook that will wait for the container to be ready var defaultReadinessHook = func() ContainerLifecycleHooks { return ContainerLifecycleHooks{ PostStarts: []ContainerHook{ - func(ctx context.Context, c Container) error { - // wait until all the exposed ports are mapped: - // it will be ready when all the exposed ports are mapped, - // checking every 50ms, up to 1s, and failing if all the - // exposed ports are not mapped in 5s. - dockerContainer := c.(*DockerContainer) - - b := backoff.NewExponentialBackOff() - - b.InitialInterval = 50 * time.Millisecond - b.MaxElapsedTime = 5 * time.Second - b.MaxInterval = time.Duration(float64(time.Second) * backoff.DefaultRandomizationFactor) - - err := backoff.RetryNotify( - func() error { - jsonRaw, err := dockerContainer.inspectRawContainer(ctx) - if err != nil { - return err - } - - return checkPortsMapped(jsonRaw.NetworkSettings.Ports, dockerContainer.exposedPorts) - }, - b, - func(err error, duration time.Duration) { - dockerContainer.logger.Printf("All requested ports were not exposed: %v", err) - }, - ) - if err != nil { - return fmt.Errorf("all exposed ports, %s, were not mapped in 5s: %w", dockerContainer.exposedPorts, err) - } - - return nil - }, // wait for the container to be ready func(ctx context.Context, c Container) error { dockerContainer := c.(*DockerContainer) @@ -284,11 +236,34 @@ var defaultReadinessHook = func() ContainerLifecycleHooks { } } +// buildingHook is a hook that will be called before a container image is built. +func (req ContainerRequest) buildingHook(ctx context.Context) error { + return req.applyLifecycleHooks(func(lifecycleHooks ContainerLifecycleHooks) error { + return lifecycleHooks.Building(ctx)(req) + }) +} + +// builtHook is a hook that will be called after a container image is built. +func (req ContainerRequest) builtHook(ctx context.Context) error { + return req.applyLifecycleHooks(func(lifecycleHooks ContainerLifecycleHooks) error { + return lifecycleHooks.Built(ctx)(req) + }) +} + // creatingHook is a hook that will be called before a container is created. func (req ContainerRequest) creatingHook(ctx context.Context) error { - errs := make([]error, len(req.LifecycleHooks)) - for i, lifecycleHooks := range req.LifecycleHooks { - errs[i] = lifecycleHooks.Creating(ctx)(req) + return req.applyLifecycleHooks(func(lifecycleHooks ContainerLifecycleHooks) error { + return lifecycleHooks.Creating(ctx)(req) + }) +} + +// applyLifecycleHooks calls hook on all LifecycleHooks. +func (req ContainerRequest) applyLifecycleHooks(hook func(lifecycleHooks ContainerLifecycleHooks) error) error { + var errs []error + for _, lifecycleHooks := range req.LifecycleHooks { + if err := hook(lifecycleHooks); err != nil { + errs = append(errs, err) + } } return errors.Join(errs...) @@ -333,7 +308,11 @@ func (c *DockerContainer) printLogs(ctx context.Context, cause error) { b, err := io.ReadAll(reader) if err != nil { - c.logger.Printf("failed reading container logs: %v\n", err) + if len(b) > 0 { + c.logger.Printf("failed reading container logs: %v\npartial container logs (%s):\n%s", err, cause, b) + } else { + c.logger.Printf("failed reading container logs: %v\n", err) + } return } @@ -370,9 +349,11 @@ func (c *DockerContainer) terminatedHook(ctx context.Context) error { // applyLifecycleHooks applies all lifecycle hooks reporting the container logs on error if logError is true. func (c *DockerContainer) applyLifecycleHooks(ctx context.Context, logError bool, hooks func(lifecycleHooks ContainerLifecycleHooks) []ContainerHook) error { - errs := make([]error, len(c.lifecycleHooks)) - for i, lifecycleHooks := range c.lifecycleHooks { - errs[i] = containerHookFn(ctx, hooks(lifecycleHooks))(c) + var errs []error + for _, lifecycleHooks := range c.lifecycleHooks { + if err := containerHookFn(ctx, hooks(lifecycleHooks))(c); err != nil { + errs = append(errs, err) + } } if err := errors.Join(errs...); err != nil { @@ -394,10 +375,26 @@ func (c *DockerContainer) applyLifecycleHooks(ctx context.Context, logError bool return nil } +// Building is a hook that will be called before a container image is built. +func (c ContainerLifecycleHooks) Building(ctx context.Context) func(req ContainerRequest) error { + return containerRequestHook(ctx, c.PreBuilds) +} + +// Building is a hook that will be called before a container image is built. +func (c ContainerLifecycleHooks) Built(ctx context.Context) func(req ContainerRequest) error { + return containerRequestHook(ctx, c.PostBuilds) +} + // Creating is a hook that will be called before a container is created. func (c ContainerLifecycleHooks) Creating(ctx context.Context) func(req ContainerRequest) error { + return containerRequestHook(ctx, c.PreCreates) +} + +// containerRequestHook returns a function that will iterate over all +// the hooks and call them one by one until there is an error. +func containerRequestHook(ctx context.Context, hooks []ContainerRequestHook) func(req ContainerRequest) error { return func(req ContainerRequest) error { - for _, hook := range c.PreCreates { + for _, hook := range hooks { if err := hook(ctx, req); err != nil { return err } @@ -411,9 +408,11 @@ func (c ContainerLifecycleHooks) Creating(ctx context.Context) func(req Containe // container lifecycle hooks. The created function will iterate over all the hooks and call them one by one. func containerHookFn(ctx context.Context, containerHook []ContainerHook) func(container Container) error { return func(ctr Container) error { - errs := make([]error, len(containerHook)) - for i, hook := range containerHook { - errs[i] = hook(ctx, ctr) + var errs []error + for _, hook := range containerHook { + if err := hook(ctx, ctr); err != nil { + errs = append(errs, err) + } } return errors.Join(errs...) @@ -461,6 +460,20 @@ func (c ContainerLifecycleHooks) Terminated(ctx context.Context) func(container } func (p *DockerProvider) preCreateContainerHook(ctx context.Context, req ContainerRequest, dockerInput *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig) error { + var mountErrors []error + for _, m := range req.Mounts { + // validate only the mount sources that implement the Validator interface + if v, ok := m.Source.(Validator); ok { + if err := v.Validate(); err != nil { + mountErrors = append(mountErrors, err) + } + } + } + + if len(mountErrors) > 0 { + return errors.Join(mountErrors...) + } + // prepare mounts hostConfig.Mounts = mapToDockerMounts(req.Mounts) @@ -488,17 +501,18 @@ func (p *DockerProvider) preCreateContainerHook(ctx context.Context, req Contain } } - if req.ConfigModifier != nil { - req.ConfigModifier(dockerInput) + if req.ConfigModifier == nil { + req.ConfigModifier = defaultConfigModifier(req) } + req.ConfigModifier(dockerInput) if req.HostConfigModifier == nil { req.HostConfigModifier = defaultHostConfigModifier(req) } req.HostConfigModifier(hostConfig) - if req.EnpointSettingsModifier != nil { - req.EnpointSettingsModifier(endpointSettings) + if req.EndpointSettingsModifier != nil { + req.EndpointSettingsModifier(endpointSettings) } networkingConfig.EndpointsConfig = endpointSettings @@ -506,7 +520,7 @@ func (p *DockerProvider) preCreateContainerHook(ctx context.Context, req Contain exposedPorts := req.ExposedPorts // this check must be done after the pre-creation Modifiers are called, so the network mode is already set if len(exposedPorts) == 0 && !hostConfig.NetworkMode.IsContainer() { - image, _, err := p.client.ImageInspectWithRaw(ctx, dockerInput.Image) + image, err := p.client.ImageInspect(ctx, dockerInput.Image) if err != nil { return err } @@ -532,65 +546,50 @@ func (p *DockerProvider) preCreateContainerHook(ctx context.Context, req Contain return nil } -// combineContainerHooks it returns just one ContainerLifecycle hook, as the result of combining -// the default hooks with the user-defined hooks. The function will loop over all the default hooks, -// storing each of the hooks in a slice, and then it will loop over all the user-defined hooks, -// appending or prepending them to the slice of hooks. The order of hooks is the following: -// - for Pre-hooks, always run the default hooks first, then append the user-defined hooks -// - for Post-hooks, always run the user-defined hooks first, then the default hooks +// combineContainerHooks returns a ContainerLifecycle hook as the result +// of combining the default hooks with the user-defined hooks. +// +// The order of hooks is the following: +// - Pre-hooks run the default hooks first then the user-defined hooks +// - Post-hooks run the user-defined hooks first then the default hooks func combineContainerHooks(defaultHooks, userDefinedHooks []ContainerLifecycleHooks) ContainerLifecycleHooks { - preCreates := []ContainerRequestHook{} - postCreates := []ContainerHook{} - preStarts := []ContainerHook{} - postStarts := []ContainerHook{} - postReadies := []ContainerHook{} - preStops := []ContainerHook{} - postStops := []ContainerHook{} - preTerminates := []ContainerHook{} - postTerminates := []ContainerHook{} - + // We use reflection here to ensure that any new hooks are handled. + var hooks ContainerLifecycleHooks + hooksVal := reflect.ValueOf(&hooks).Elem() + hooksType := reflect.TypeOf(hooks) for _, defaultHook := range defaultHooks { - preCreates = append(preCreates, defaultHook.PreCreates...) - preStarts = append(preStarts, defaultHook.PreStarts...) - preStops = append(preStops, defaultHook.PreStops...) - preTerminates = append(preTerminates, defaultHook.PreTerminates...) + defaultVal := reflect.ValueOf(defaultHook) + for i := range hooksType.NumField() { + if strings.HasPrefix(hooksType.Field(i).Name, "Pre") { + field := hooksVal.Field(i) + field.Set(reflect.AppendSlice(field, defaultVal.Field(i))) + } + } } - // append the user-defined hooks after the default pre-hooks - // and because the post hooks are still empty, the user-defined post-hooks - // will be the first ones to be executed + // Append the user-defined hooks after the default pre-hooks + // and because the post hooks are still empty, the user-defined + // post-hooks will be the first ones to be executed. for _, userDefinedHook := range userDefinedHooks { - preCreates = append(preCreates, userDefinedHook.PreCreates...) - postCreates = append(postCreates, userDefinedHook.PostCreates...) - preStarts = append(preStarts, userDefinedHook.PreStarts...) - postStarts = append(postStarts, userDefinedHook.PostStarts...) - postReadies = append(postReadies, userDefinedHook.PostReadies...) - preStops = append(preStops, userDefinedHook.PreStops...) - postStops = append(postStops, userDefinedHook.PostStops...) - preTerminates = append(preTerminates, userDefinedHook.PreTerminates...) - postTerminates = append(postTerminates, userDefinedHook.PostTerminates...) + userVal := reflect.ValueOf(userDefinedHook) + for i := range hooksType.NumField() { + field := hooksVal.Field(i) + field.Set(reflect.AppendSlice(field, userVal.Field(i))) + } } - // finally, append the default post-hooks + // Finally, append the default post-hooks. for _, defaultHook := range defaultHooks { - postCreates = append(postCreates, defaultHook.PostCreates...) - postStarts = append(postStarts, defaultHook.PostStarts...) - postReadies = append(postReadies, defaultHook.PostReadies...) - postStops = append(postStops, defaultHook.PostStops...) - postTerminates = append(postTerminates, defaultHook.PostTerminates...) + defaultVal := reflect.ValueOf(defaultHook) + for i := range hooksType.NumField() { + if strings.HasPrefix(hooksType.Field(i).Name, "Post") { + field := hooksVal.Field(i) + field.Set(reflect.AppendSlice(field, defaultVal.Field(i))) + } + } } - return ContainerLifecycleHooks{ - PreCreates: preCreates, - PostCreates: postCreates, - PreStarts: preStarts, - PostStarts: postStarts, - PostReadies: postReadies, - PreStops: preStops, - PostStops: postStops, - PreTerminates: preTerminates, - PostTerminates: postTerminates, - } + return hooks } func mergePortBindings(configPortMap, exposedPortMap nat.PortMap, exposedPorts []string) nat.PortMap { @@ -612,6 +611,15 @@ func mergePortBindings(configPortMap, exposedPortMap nat.PortMap, exposedPorts [ return exposedPortMap } +// defaultHostConfigModifier provides a default modifier including the deprecated fields +func defaultConfigModifier(req ContainerRequest) func(config *container.Config) { + return func(config *container.Config) { + config.Hostname = req.Hostname + config.WorkingDir = req.WorkingDir + config.User = req.User + } +} + // defaultHostConfigModifier provides a default modifier including the deprecated fields func defaultHostConfigModifier(req ContainerRequest) func(hostConfig *container.HostConfig) { return func(hostConfig *container.HostConfig) { @@ -622,5 +630,7 @@ func defaultHostConfigModifier(req ContainerRequest) func(hostConfig *container. hostConfig.ExtraHosts = req.ExtraHosts hostConfig.NetworkMode = req.NetworkMode hostConfig.Resources = req.Resources + hostConfig.Privileged = req.Privileged + hostConfig.ShmSize = req.ShmSize } } diff --git a/vendor/github.com/testcontainers/testcontainers-go/log/logger.go b/vendor/github.com/testcontainers/testcontainers-go/log/logger.go new file mode 100644 index 000000000..d20e90a05 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/log/logger.go @@ -0,0 +1,73 @@ +package log + +import ( + "log" + "os" + "strings" + "testing" +) + +// Validate our types implement the required interfaces. +var ( + _ Logger = (*log.Logger)(nil) + _ Logger = (*noopLogger)(nil) + _ Logger = (*testLogger)(nil) +) + +// Logger defines the Logger interface. +type Logger interface { + Printf(format string, v ...any) +} + +// defaultLogger is the default Logger instance. +var defaultLogger Logger = &noopLogger{} + +func init() { + // Enable default logger in the testing with a verbose flag. + if testing.Testing() { + // Parse manually because testing.Verbose() panics unless flag.Parse() has done. + for _, arg := range os.Args { + if strings.EqualFold(arg, "-test.v=true") || strings.EqualFold(arg, "-v") { + defaultLogger = log.New(os.Stderr, "", log.LstdFlags) + } + } + } +} + +// Default returns the default Logger instance. +func Default() Logger { + return defaultLogger +} + +// SetDefault sets the default Logger instance. +func SetDefault(logger Logger) { + defaultLogger = logger +} + +func Printf(format string, v ...any) { + defaultLogger.Printf(format, v...) +} + +type noopLogger struct{} + +// Printf implements Logging. +func (n noopLogger) Printf(_ string, _ ...any) { + // NOOP +} + +// TestLogger returns a Logging implementation for testing.TB +// This way logs from testcontainers are part of the test output of a test suite or test case. +func TestLogger(tb testing.TB) Logger { + tb.Helper() + return testLogger{TB: tb} +} + +type testLogger struct { + testing.TB +} + +// Printf implements Logging. +func (t testLogger) Printf(format string, v ...any) { + t.Helper() + t.Logf(format, v...) +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/logger.go b/vendor/github.com/testcontainers/testcontainers-go/logger.go deleted file mode 100644 index fca5da539..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/logger.go +++ /dev/null @@ -1,103 +0,0 @@ -package testcontainers - -import ( - "context" - "log" - "os" - "strings" - "testing" - - "github.com/docker/docker/client" -) - -// Logger is the default log instance -var Logger Logging = log.New(os.Stderr, "", log.LstdFlags) - -func init() { - for _, arg := range os.Args { - if strings.EqualFold(arg, "-test.v=true") || strings.EqualFold(arg, "-v") { - return - } - } - - // If we are not running in verbose mode, we configure a noop logger by default. - Logger = &noopLogger{} -} - -// Validate our types implement the required interfaces. -var ( - _ Logging = (*log.Logger)(nil) - _ ContainerCustomizer = LoggerOption{} - _ GenericProviderOption = LoggerOption{} - _ DockerProviderOption = LoggerOption{} -) - -// Logging defines the Logger interface -type Logging interface { - Printf(format string, v ...interface{}) -} - -type noopLogger struct{} - -// Printf implements Logging. -func (n noopLogger) Printf(format string, v ...interface{}) { - // NOOP -} - -// Deprecated: this function will be removed in a future release -// LogDockerServerInfo logs the docker server info using the provided logger and Docker client -func LogDockerServerInfo(ctx context.Context, client client.APIClient, logger Logging) { - // NOOP -} - -// TestLogger returns a Logging implementation for testing.TB -// This way logs from testcontainers are part of the test output of a test suite or test case. -func TestLogger(tb testing.TB) Logging { - tb.Helper() - return testLogger{TB: tb} -} - -// WithLogger returns a generic option that sets the logger to be used. -// -// Consider calling this before other "With functions" as these may generate logs. -// -// This can be given a TestLogger to collect the logs from testcontainers into a -// test case. -func WithLogger(logger Logging) LoggerOption { - return LoggerOption{ - logger: logger, - } -} - -// LoggerOption is a generic option that sets the logger to be used. -// -// It can be used to set the logger for providers and containers. -type LoggerOption struct { - logger Logging -} - -// ApplyGenericTo implements GenericProviderOption. -func (o LoggerOption) ApplyGenericTo(opts *GenericProviderOptions) { - opts.Logger = o.logger -} - -// ApplyDockerTo implements DockerProviderOption. -func (o LoggerOption) ApplyDockerTo(opts *DockerProviderOptions) { - opts.Logger = o.logger -} - -// Customize implements ContainerCustomizer. -func (o LoggerOption) Customize(req *GenericContainerRequest) error { - req.Logger = o.logger - return nil -} - -type testLogger struct { - testing.TB -} - -// Printf implements Logging. -func (t testLogger) Printf(format string, v ...interface{}) { - t.Helper() - t.Logf(format, v...) -} diff --git a/vendor/github.com/testcontainers/testcontainers-go/logger_option.go b/vendor/github.com/testcontainers/testcontainers-go/logger_option.go new file mode 100644 index 000000000..d40dd93aa --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/logger_option.go @@ -0,0 +1,45 @@ +package testcontainers + +import "github.com/testcontainers/testcontainers-go/log" + +// Validate our types implement the required interfaces. +var ( + _ ContainerCustomizer = LoggerOption{} + _ GenericProviderOption = LoggerOption{} + _ DockerProviderOption = LoggerOption{} +) + +// WithLogger returns a generic option that sets the logger to be used. +// +// Consider calling this before other "With functions" as these may generate logs. +// +// This can be given a TestLogger to collect the logs from testcontainers into a +// test case. +func WithLogger(logger log.Logger) LoggerOption { + return LoggerOption{ + logger: logger, + } +} + +// LoggerOption is a generic option that sets the logger to be used. +// +// It can be used to set the logger for providers and containers. +type LoggerOption struct { + logger log.Logger +} + +// ApplyGenericTo implements GenericProviderOption. +func (o LoggerOption) ApplyGenericTo(opts *GenericProviderOptions) { + opts.Logger = o.logger +} + +// ApplyDockerTo implements DockerProviderOption. +func (o LoggerOption) ApplyDockerTo(opts *DockerProviderOptions) { + opts.Logger = o.logger +} + +// Customize implements ContainerCustomizer. +func (o LoggerOption) Customize(req *GenericContainerRequest) error { + req.Logger = o.logger + return nil +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml b/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml index 2d80a5b42..8d6ee9f46 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml +++ b/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml @@ -40,19 +40,6 @@ nav: - Quickstart: quickstart.md - Features: - features/creating_container.md - - features/configuration.md - - features/image_name_substitution.md - - features/files_and_mounts.md - - features/creating_networks.md - - features/networking.md - - features/tls.md - - features/test_session_semantics.md - - features/garbage_collector.md - - features/build_from_dockerfile.md - - features/docker_auth.md - - features/docker_compose.md - - features/follow_logs.md - - features/override_container_command.md - Wait Strategies: - Introduction: features/wait/introduction.md - Exec: features/wait/exec.md @@ -64,9 +51,26 @@ nav: - Log: features/wait/log.md - Multi: features/wait/multi.md - SQL: features/wait/sql.md + - TLS: features/wait/tls.md + - Walk: features/wait/walk.md + - features/files_and_mounts.md + - features/follow_logs.md + - features/garbage_collector.md + - features/build_from_dockerfile.md + - features/override_container_command.md + - features/networking.md + - features/configuration.md + - features/image_name_substitution.md + - features/test_session_semantics.md + - features/docker_auth.md + - features/docker_compose.md + - features/tls.md - Modules: - modules/index.md + - modules/aerospike.md + - modules/arangodb.md - modules/artemis.md + - modules/azure.md - modules/azurite.md - modules/cassandra.md - modules/chroma.md @@ -75,6 +79,9 @@ nav: - modules/consul.md - modules/couchbase.md - modules/databend.md + - modules/dind.md + - modules/dockermcpgateway.md + - modules/dockermodelrunner.md - modules/dolt.md - modules/dynamodb.md - modules/elasticsearch.md @@ -89,18 +96,22 @@ nav: - modules/localstack.md - modules/mariadb.md - modules/meilisearch.md + - modules/memcached.md - modules/milvus.md - modules/minio.md - modules/mockserver.md + - modules/mongodb-atlaslocal.md - modules/mongodb.md - modules/mssql.md - modules/mysql.md - modules/nats.md + - modules/nebulagraph.md - modules/neo4j.md - modules/ollama.md - modules/openfga.md - modules/openldap.md - modules/opensearch.md + - modules/pinecone.md - modules/postgres.md - modules/pulsar.md - modules/qdrant.md @@ -108,7 +119,11 @@ nav: - modules/redis.md - modules/redpanda.md - modules/registry.md + - modules/scylladb.md + - modules/socat.md + - modules/solace.md - modules/surrealdb.md + - modules/toxiproxy.md - modules/valkey.md - modules/vault.md - modules/vearch.md @@ -117,7 +132,6 @@ nav: - Examples: - examples/index.md - examples/nginx.md - - examples/toxiproxy.md - System Requirements: - system_requirements/index.md - system_requirements/docker.md @@ -134,10 +148,9 @@ nav: - system_requirements/using_colima.md - system_requirements/using_podman.md - system_requirements/rancher.md - - Contributing: - - contributing.md - - contributing_docs.md + - Dependabot: dependabot.md + - Contributing: contributing.md - Getting help: getting_help.md edit_uri: edit/main/docs/ extra: - latest_version: v0.34.0 + latest_version: v0.39.0 diff --git a/vendor/github.com/testcontainers/testcontainers-go/modules/mongodb/cli.go b/vendor/github.com/testcontainers/testcontainers-go/modules/mongodb/cli.go new file mode 100644 index 000000000..f990bf17c --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/modules/mongodb/cli.go @@ -0,0 +1,32 @@ +package mongodb + +import "fmt" + +// mongoCli is cli to interact with MongoDB. If username and password are provided +// it will use credentials to authenticate. +type mongoCli struct { + mongoshBaseCmd string + mongoBaseCmd string +} + +func newMongoCli(username string, password string) mongoCli { + authArgs := "" + if username != "" && password != "" { + authArgs = fmt.Sprintf("--username %s --password %s", username, password) + } + + return mongoCli{ + mongoshBaseCmd: fmt.Sprintf("mongosh %s --quiet", authArgs), + mongoBaseCmd: fmt.Sprintf("mongo %s --quiet", authArgs), + } +} + +func (m mongoCli) eval(command string, args ...any) []string { + command = "\"" + fmt.Sprintf(command, args...) + "\"" + + return []string{ + "sh", + "-c", + m.mongoshBaseCmd + " --eval " + command + " || " + m.mongoBaseCmd + " --eval " + command, + } +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/modules/mongodb/mongodb.go b/vendor/github.com/testcontainers/testcontainers-go/modules/mongodb/mongodb.go index 3f73e5dc7..844c525e1 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/modules/mongodb/mongodb.go +++ b/vendor/github.com/testcontainers/testcontainers-go/modules/mongodb/mongodb.go @@ -1,19 +1,34 @@ package mongodb import ( + "bytes" "context" + _ "embed" + "errors" "fmt" + "net/url" "time" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/wait" ) +//go:embed mount/entrypoint-tc.sh +var entrypointContent []byte + +const ( + defaultPort = "27017/tcp" + entrypointPath = "/tmp/entrypoint-tc.sh" + keyFilePath = "/tmp/mongo_keyfile" + replicaSetOptEnvKey = "testcontainers.mongodb.replicaset_name" +) + // MongoDBContainer represents the MongoDB container type used in the module type MongoDBContainer struct { testcontainers.Container - username string - password string + username string + password string + replicaSet string } // Deprecated: use Run instead @@ -26,10 +41,10 @@ func RunContainer(ctx context.Context, opts ...testcontainers.ContainerCustomize func Run(ctx context.Context, img string, opts ...testcontainers.ContainerCustomizer) (*MongoDBContainer, error) { req := testcontainers.ContainerRequest{ Image: img, - ExposedPorts: []string{"27017/tcp"}, + ExposedPorts: []string{defaultPort}, WaitingFor: wait.ForAll( wait.ForLog("Waiting for connections"), - wait.ForListeningPort("27017/tcp"), + wait.ForListeningPort(defaultPort), ), Env: map[string]string{}, } @@ -47,13 +62,20 @@ func Run(ctx context.Context, img string, opts ...testcontainers.ContainerCustom username := req.Env["MONGO_INITDB_ROOT_USERNAME"] password := req.Env["MONGO_INITDB_ROOT_PASSWORD"] if username != "" && password == "" || username == "" && password != "" { - return nil, fmt.Errorf("if you specify username or password, you must provide both of them") + return nil, errors.New("if you specify username or password, you must provide both of them") + } + + replicaSet := req.Env[replicaSetOptEnvKey] + if replicaSet != "" { + if err := configureRequestForReplicaset(username, password, replicaSet, &genericContainerReq); err != nil { + return nil, err + } } container, err := testcontainers.GenericContainer(ctx, genericContainerReq) var c *MongoDBContainer if container != nil { - c = &MongoDBContainer{Container: container, username: username, password: password} + c = &MongoDBContainer{Container: container, username: username, password: password, replicaSet: replicaSet} } if err != nil { @@ -85,28 +107,10 @@ func WithPassword(password string) testcontainers.CustomizeRequestOption { } } -// WithReplicaSet configures the container to run a single-node MongoDB replica set named "rs". -// It will wait until the replica set is ready. +// WithReplicaSet sets the replica set name for Single node MongoDB replica set. func WithReplicaSet(replSetName string) testcontainers.CustomizeRequestOption { return func(req *testcontainers.GenericContainerRequest) error { - req.Cmd = append(req.Cmd, "--replSet", replSetName) - req.WaitingFor = wait.ForAll( - req.WaitingFor, - wait.ForExec(eval("rs.status().ok")), - ).WithDeadline(60 * time.Second) - req.LifecycleHooks = append(req.LifecycleHooks, testcontainers.ContainerLifecycleHooks{ - PostStarts: []testcontainers.ContainerHook{ - func(ctx context.Context, c testcontainers.Container) error { - ip, err := c.ContainerIP(ctx) - if err != nil { - return fmt.Errorf("container ip: %w", err) - } - - cmd := eval("rs.initiate({ _id: '%s', members: [ { _id: 0, host: '%s:27017' } ] })", replSetName, ip) - return wait.ForExec(cmd).WaitUntilReady(ctx, c) - }, - }, - }) + req.Env[replicaSetOptEnvKey] = replSetName return nil } @@ -115,28 +119,102 @@ func WithReplicaSet(replSetName string) testcontainers.CustomizeRequestOption { // ConnectionString returns the connection string for the MongoDB container. // If you provide a username and a password, the connection string will also include them. func (c *MongoDBContainer) ConnectionString(ctx context.Context) (string, error) { - host, err := c.Host(ctx) + endpoint, err := c.PortEndpoint(ctx, defaultPort, "") if err != nil { return "", err } - port, err := c.MappedPort(ctx, "27017/tcp") - if err != nil { - return "", err + u := url.URL{ + Scheme: "mongodb", + Host: endpoint, + Path: "/", } + if c.username != "" && c.password != "" { - return fmt.Sprintf("mongodb://%s:%s@%s:%s", c.username, c.password, host, port.Port()), nil + u.User = url.UserPassword(c.username, c.password) + } + + if c.replicaSet != "" { + q := url.Values{} + q.Add("replicaSet", c.replicaSet) + u.RawQuery = q.Encode() + } + + return u.String(), nil +} + +func setupEntrypointForAuth(req *testcontainers.GenericContainerRequest) { + req.Files = append( + req.Files, testcontainers.ContainerFile{ + Reader: bytes.NewReader(entrypointContent), + ContainerFilePath: entrypointPath, + FileMode: 0o755, + }, + ) + req.Entrypoint = []string{entrypointPath} + req.Env["MONGO_KEYFILE"] = keyFilePath +} + +func configureRequestForReplicaset( + username string, + password string, + replicaSet string, + genericContainerReq *testcontainers.GenericContainerRequest, +) error { + if username == "" || password == "" { + return noAuthReplicaSet(replicaSet)(genericContainerReq) } - return c.Endpoint(ctx, "mongodb") + + return withAuthReplicaset(replicaSet, username, password)(genericContainerReq) +} + +func noAuthReplicaSet(replSetName string) testcontainers.CustomizeRequestOption { + return func(req *testcontainers.GenericContainerRequest) error { + cli := newMongoCli("", "") + req.Cmd = append(req.Cmd, "--replSet", replSetName) + initiateReplicaSet(req, cli, replSetName) + + return nil + } +} + +func initiateReplicaSet(req *testcontainers.GenericContainerRequest, cli mongoCli, replSetName string) { + req.WaitingFor = wait.ForAll( + req.WaitingFor, + wait.ForExec(cli.eval("rs.status().ok")), + ).WithDeadline(60 * time.Second) + + req.LifecycleHooks = append( + req.LifecycleHooks, testcontainers.ContainerLifecycleHooks{ + PostStarts: []testcontainers.ContainerHook{ + func(ctx context.Context, c testcontainers.Container) error { + ip, err := c.ContainerIP(ctx) + if err != nil { + return fmt.Errorf("container ip: %w", err) + } + + cmd := cli.eval( + "rs.initiate({ _id: '%s', members: [ { _id: 0, host: '%s:27017' } ] })", + replSetName, + ip, + ) + return wait.ForExec(cmd).WaitUntilReady(ctx, c) + }, + }, + }, + ) } -// eval builds an mongosh|mongo eval command. -func eval(command string, args ...any) []string { - command = "\"" + fmt.Sprintf(command, args...) + "\"" +func withAuthReplicaset( + replSetName string, + username string, + password string, +) testcontainers.CustomizeRequestOption { + return func(req *testcontainers.GenericContainerRequest) error { + setupEntrypointForAuth(req) + cli := newMongoCli(username, password) + req.Cmd = append(req.Cmd, "--replSet", replSetName, "--keyFile", keyFilePath) + initiateReplicaSet(req, cli, replSetName) - return []string{ - "sh", - "-c", - // In previous versions, the binary "mongosh" was named "mongo". - "mongosh --quiet --eval " + command + " || mongo --quiet --eval " + command, + return nil } } diff --git a/vendor/github.com/testcontainers/testcontainers-go/modules/mongodb/mount/entrypoint-tc.sh b/vendor/github.com/testcontainers/testcontainers-go/modules/mongodb/mount/entrypoint-tc.sh new file mode 100644 index 000000000..1561415aa --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/modules/mongodb/mount/entrypoint-tc.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +set -Eeuo pipefail + +# detect mongo user and group +function get_user_group() { + user_group=$(cut -d: -f1,5 /etc/passwd | grep mongo) + echo "${user_group}" +} + +# detect the entrypoint +function get_entrypoint() { + entrypoint=$(find /usr/local/bin -name 'docker-entrypoint.*') + if [[ "${entrypoint}" == *.py ]]; then + entrypoint="python3 ${entrypoint}" + else + entrypoint="exec ${entrypoint}" + fi + echo "${entrypoint}" +} + +ENTRYPOINT=$(get_entrypoint) +MONGO_USER_GROUP=$(get_user_group) + +# Create the keyfile +openssl rand -base64 756 > "${MONGO_KEYFILE}" + +# Set the permissions and ownership of the keyfile +chown "${MONGO_USER_GROUP}" "${MONGO_KEYFILE}" +chmod 400 "${MONGO_KEYFILE}" + +${ENTRYPOINT} "$@" diff --git a/vendor/github.com/testcontainers/testcontainers-go/mounts.go b/vendor/github.com/testcontainers/testcontainers-go/mounts.go index a68e468b3..2e1d2c7e6 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/mounts.go +++ b/vendor/github.com/testcontainers/testcontainers-go/mounts.go @@ -1,12 +1,16 @@ package testcontainers -import "errors" +import ( + "errors" + "path/filepath" +) const ( MountTypeBind MountType = iota // Deprecated: Use MountTypeVolume instead MountTypeVolume MountTypeTmpfs MountTypePipe + MountTypeImage ) var ( @@ -18,6 +22,7 @@ var ( _ ContainerMountSource = (*GenericBindMountSource)(nil) // Deprecated: use Files or HostConfigModifier in the ContainerRequest, or copy files container APIs to make containers portable across Docker environments _ ContainerMountSource = (*GenericVolumeMountSource)(nil) _ ContainerMountSource = (*GenericTmpfsMountSource)(nil) + _ ContainerMountSource = (*GenericImageMountSource)(nil) ) type ( @@ -110,6 +115,15 @@ func VolumeMount(volumeName string, mountTarget ContainerMountTarget) ContainerM } } +// ImageMount returns a new ContainerMount with a GenericImageMountSource as source +// This is a convenience method to cover typical use cases. +func ImageMount(imageName string, subpath string, mountTarget ContainerMountTarget) ContainerMount { + return ContainerMount{ + Source: NewGenericImageMountSource(imageName, subpath), + Target: mountTarget, + } +} + // Mounts returns a ContainerMounts to support a more fluent API func Mounts(mounts ...ContainerMount) ContainerMounts { return mounts @@ -124,3 +138,38 @@ type ContainerMount struct { // ReadOnly determines if the mount should be read-only ReadOnly bool } + +// GenericImageMountSource implements ContainerMountSource and represents an image mount +type GenericImageMountSource struct { + // imageName refers to the name of the image to be mounted + // the same image might be mounted to multiple locations within a single container + imageName string + // subpath is the path within the image to be mounted + subpath string +} + +// NewGenericImageMountSource creates a new GenericImageMountSource +func NewGenericImageMountSource(imageName string, subpath string) GenericImageMountSource { + return GenericImageMountSource{ + imageName: imageName, + subpath: subpath, + } +} + +// Source returns the name of the image to be mounted +func (s GenericImageMountSource) Source() string { + return s.imageName +} + +// Type returns the type of the mount +func (GenericImageMountSource) Type() MountType { + return MountTypeImage +} + +// Validate validates the source of the mount +func (s GenericImageMountSource) Validate() error { + if !filepath.IsLocal(s.subpath) { + return errors.New("image mount source must be a local path") + } + return nil +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/network.go b/vendor/github.com/testcontainers/testcontainers-go/network.go index 9544bee12..e0cc83f51 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/network.go +++ b/vendor/github.com/testcontainers/testcontainers-go/network.go @@ -4,6 +4,8 @@ import ( "context" "github.com/docker/docker/api/types/network" + + "github.com/testcontainers/testcontainers-go/internal/core" ) // NetworkProvider allows the creation of networks on an arbitrary system @@ -23,12 +25,12 @@ type DefaultNetwork string // Deprecated: will be removed in the future. func (n DefaultNetwork) ApplyGenericTo(opts *GenericProviderOptions) { - opts.DefaultNetwork = string(n) + opts.defaultNetwork = string(n) } // Deprecated: will be removed in the future. func (n DefaultNetwork) ApplyDockerTo(opts *DockerProviderOptions) { - opts.DefaultNetwork = string(n) + opts.defaultNetwork = string(n) } // Deprecated: will be removed in the future @@ -47,3 +49,12 @@ type NetworkRequest struct { ReaperImage string // Deprecated: use WithImageName ContainerOption instead. Alternative reaper registry ReaperOptions []ContainerOption // Deprecated: the reaper is configured at the properties level, for an entire test session } + +// sessionID returns the session ID for the network request. +func (r NetworkRequest) sessionID() string { + if sessionID := r.Labels[core.LabelSessionID]; sessionID != "" { + return sessionID + } + + return core.SessionID() +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/options.go b/vendor/github.com/testcontainers/testcontainers-go/options.go index 2849b1566..a930c5410 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/options.go +++ b/vendor/github.com/testcontainers/testcontainers-go/options.go @@ -2,7 +2,9 @@ package testcontainers import ( "context" + "errors" "fmt" + "maps" "net/url" "time" @@ -41,6 +43,15 @@ func CustomizeRequest(src GenericContainerRequest) CustomizeRequestOption { } } +// WithDockerfile allows to build a container from a Dockerfile +func WithDockerfile(df FromDockerfile) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.FromDockerfile = df + + return nil + } +} + // WithConfigModifier allows to override the default container config func WithConfigModifier(modifier func(config *container.Config)) CustomizeRequestOption { return func(req *GenericContainerRequest) error { @@ -53,7 +64,7 @@ func WithConfigModifier(modifier func(config *container.Config)) CustomizeReques // WithEndpointSettingsModifier allows to override the default endpoint settings func WithEndpointSettingsModifier(modifier func(settings map[string]*network.EndpointSettings)) CustomizeRequestOption { return func(req *GenericContainerRequest) error { - req.EnpointSettingsModifier = modifier + req.EndpointSettingsModifier = modifier return nil } @@ -67,9 +78,7 @@ func WithEnv(envs map[string]string) CustomizeRequestOption { req.Env = map[string]string{} } - for key, val := range envs { - req.Env[key] = val - } + maps.Copy(req.Env, envs) return nil } @@ -96,7 +105,38 @@ func WithHostPortAccess(ports ...int) CustomizeRequestOption { } } -// Deprecated: the modules API forces passing the image as part of the signature of the Run function. +// WithName will set the name of the container. +func WithName(containerName string) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + if containerName == "" { + return errors.New("container name must be provided") + } + req.Name = containerName + return nil + } +} + +// WithNoStart will prevent the container from being started after creation. +func WithNoStart() CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.Started = false + return nil + } +} + +// WithReuseByName will mark a container to be reused if it exists or create a new one if it doesn't. +// A container name must be provided to identify the container to be reused. +func WithReuseByName(containerName string) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + if err := WithName(containerName)(req); err != nil { + return err + } + + req.Reuse = true + return nil + } +} + // WithImage sets the image for a container func WithImage(image string) CustomizeRequestOption { return func(req *GenericContainerRequest) error { @@ -186,7 +226,7 @@ func (p prependHubRegistry) Description() string { // - if the prefix is empty, the image is returned as is. // - if the image is a non-hub image (e.g. where another registry is set), the image is returned as is. // - if the image is a Docker Hub image where the hub registry is explicitly part of the name -// (i.e. anything with a docker.io or registry.hub.docker.com host part), the image is returned as is. +// (i.e. anything with a registry.hub.docker.com host part), the image is returned as is. func (p prependHubRegistry) Substitute(image string) (string, error) { registry := core.ExtractRegistry(image, "") @@ -233,6 +273,17 @@ func WithLogConsumers(consumer ...LogConsumer) CustomizeRequestOption { } } +// WithLogConsumerConfig sets the log consumer config for a container. +// Beware that this option completely replaces the existing log consumer config, +// including the log consumers and the log production options, +// so it should be used with care. +func WithLogConsumerConfig(config *LogConsumerConfig) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.LogConsumerCfg = config + return nil + } +} + // Executable represents an executable command to be sent to a container, including options, // as part of the different lifecycle hooks. type Executable interface { @@ -259,11 +310,11 @@ type RawCommand struct { cmds []string } -func NewRawCommand(cmds []string) RawCommand { +func NewRawCommand(cmds []string, opts ...tcexec.ProcessOption) RawCommand { return RawCommand{ cmds: cmds, ExecOptions: ExecOptions{ - opts: []tcexec.ProcessOption{}, + opts: opts, }, } } @@ -321,12 +372,17 @@ func WithAfterReadyCommand(execs ...Executable) CustomizeRequestOption { } } -// WithWaitStrategy sets the wait strategy for a container, using 60 seconds as deadline +// WithWaitStrategy replaces the wait strategy for a container, using 60 seconds as deadline func WithWaitStrategy(strategies ...wait.Strategy) CustomizeRequestOption { return WithWaitStrategyAndDeadline(60*time.Second, strategies...) } -// WithWaitStrategyAndDeadline sets the wait strategy for a container, including deadline +// WithAdditionalWaitStrategy appends the wait strategy for a container, using 60 seconds as deadline +func WithAdditionalWaitStrategy(strategies ...wait.Strategy) CustomizeRequestOption { + return WithAdditionalWaitStrategyAndDeadline(60*time.Second, strategies...) +} + +// WithWaitStrategyAndDeadline replaces the wait strategy for a container, including deadline func WithWaitStrategyAndDeadline(deadline time.Duration, strategies ...wait.Strategy) CustomizeRequestOption { return func(req *GenericContainerRequest) error { req.WaitingFor = wait.ForAll(strategies...).WithDeadline(deadline) @@ -334,3 +390,159 @@ func WithWaitStrategyAndDeadline(deadline time.Duration, strategies ...wait.Stra return nil } } + +// WithAdditionalWaitStrategyAndDeadline appends the wait strategy for a container, including deadline +func WithAdditionalWaitStrategyAndDeadline(deadline time.Duration, strategies ...wait.Strategy) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + if req.WaitingFor == nil { + req.WaitingFor = wait.ForAll(strategies...).WithDeadline(deadline) + return nil + } + + wss := make([]wait.Strategy, 0, len(strategies)+1) + wss = append(wss, req.WaitingFor) + wss = append(wss, strategies...) + + req.WaitingFor = wait.ForAll(wss...).WithDeadline(deadline) + + return nil + } +} + +// WithImageMount mounts an image to a container, passing the source image name, +// the relative subpath to mount in that image, and the mount point in the target container. +// This option validates that the subpath is a relative path, raising an error otherwise. +func WithImageMount(source string, subpath string, target ContainerMountTarget) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + src := NewDockerImageMountSource(source, subpath) + + if err := src.Validate(); err != nil { + return fmt.Errorf("validate image mount source: %w", err) + } + + req.Mounts = append(req.Mounts, ContainerMount{ + Source: src, + Target: target, + }) + return nil + } +} + +// WithAlwaysPull will pull the image before starting the container +func WithAlwaysPull() CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.AlwaysPullImage = true + return nil + } +} + +// WithImagePlatform sets the platform for a container +func WithImagePlatform(platform string) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.ImagePlatform = platform + return nil + } +} + +// WithEntrypoint completely replaces the entrypoint of a container +func WithEntrypoint(entrypoint ...string) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.Entrypoint = entrypoint + return nil + } +} + +// WithEntrypointArgs appends the entrypoint arguments to the entrypoint of a container +func WithEntrypointArgs(entrypointArgs ...string) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.Entrypoint = append(req.Entrypoint, entrypointArgs...) + return nil + } +} + +// WithExposedPorts appends the ports to the exposed ports for a container +func WithExposedPorts(ports ...string) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.ExposedPorts = append(req.ExposedPorts, ports...) + return nil + } +} + +// WithCmd completely replaces the command for a container +func WithCmd(cmd ...string) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.Cmd = cmd + return nil + } +} + +// WithCmdArgs appends the command arguments to the command for a container +func WithCmdArgs(cmdArgs ...string) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.Cmd = append(req.Cmd, cmdArgs...) + return nil + } +} + +// WithLabels appends the labels to the labels for a container +func WithLabels(labels map[string]string) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + if req.Labels == nil { + req.Labels = make(map[string]string) + } + maps.Copy(req.Labels, labels) + return nil + } +} + +// WithLifecycleHooks completely replaces the lifecycle hooks for a container +func WithLifecycleHooks(hooks ...ContainerLifecycleHooks) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.LifecycleHooks = hooks + return nil + } +} + +// WithAdditionalLifecycleHooks appends lifecycle hooks to the existing ones for a container +func WithAdditionalLifecycleHooks(hooks ...ContainerLifecycleHooks) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.LifecycleHooks = append(req.LifecycleHooks, hooks...) + return nil + } +} + +// WithMounts appends the mounts to the mounts for a container +func WithMounts(mounts ...ContainerMount) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.Mounts = append(req.Mounts, mounts...) + return nil + } +} + +// WithTmpfs appends the tmpfs mounts to the tmpfs mounts for a container +func WithTmpfs(tmpfs map[string]string) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + if req.Tmpfs == nil { + req.Tmpfs = make(map[string]string) + } + maps.Copy(req.Tmpfs, tmpfs) + return nil + } +} + +// WithFiles appends the files to the files for a container +func WithFiles(files ...ContainerFile) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.Files = append(req.Files, files...) + return nil + } +} + +// WithProvider sets the provider type for a container +func WithProvider(provider ProviderType) CustomizeRequestOption { + return func(req *GenericContainerRequest) error { + req.ProviderType = provider + + return nil + } +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/parallel.go b/vendor/github.com/testcontainers/testcontainers-go/parallel.go index 0349023ba..a75d011f9 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/parallel.go +++ b/vendor/github.com/testcontainers/testcontainers-go/parallel.go @@ -61,10 +61,7 @@ func ParallelContainers(ctx context.Context, reqs ParallelContainerRequest, opt opt.WorkersCount = defaultWorkersCount } - tasksChanSize := opt.WorkersCount - if tasksChanSize > len(reqs) { - tasksChanSize = len(reqs) - } + tasksChanSize := min(opt.WorkersCount, len(reqs)) tasksChan := make(chan GenericContainerRequest, tasksChanSize) resultsChan := make(chan parallelContainersResult, tasksChanSize) @@ -74,7 +71,7 @@ func ParallelContainers(ctx context.Context, reqs ParallelContainerRequest, opt wg.Add(tasksChanSize) // run workers - for i := 0; i < tasksChanSize; i++ { + for range tasksChanSize { go parallelContainersRunner(ctx, tasksChan, resultsChan, &wg) } diff --git a/vendor/github.com/testcontainers/testcontainers-go/port_forwarding.go b/vendor/github.com/testcontainers/testcontainers-go/port_forwarding.go index 88f14f2d7..107bd42d1 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/port_forwarding.go +++ b/vendor/github.com/testcontainers/testcontainers-go/port_forwarding.go @@ -6,6 +6,8 @@ import ( "fmt" "io" "net" + "slices" + "sync" "time" "github.com/docker/docker/api/types/container" @@ -18,7 +20,7 @@ import ( const ( // hubSshdImage { - sshdImage string = "testcontainers/sshd:1.2.0" + sshdImage string = "testcontainers/sshd:1.3.0" // } // HostInternal is the internal hostname used to reach the host from the container, @@ -38,9 +40,9 @@ var sshPassword = uuid.NewString() // 1. Create a new SSHD container. // 2. Expose the host ports to the container after the container is ready. // 3. Close the SSH sessions before killing the container. -func exposeHostPorts(ctx context.Context, req *ContainerRequest, ports ...int) (sshdConnectHook ContainerLifecycleHooks, err error) { //nolint:nonamedreturns // Required for error check. +func exposeHostPorts(ctx context.Context, req *ContainerRequest, ports ...int) (sshdConnectHook ContainerLifecycleHooks, err error) { if len(ports) == 0 { - return sshdConnectHook, fmt.Errorf("no ports to expose") + return sshdConnectHook, errors.New("no ports to expose") } // Use the first network of the container to connect to the SSHD container. @@ -99,14 +101,30 @@ func exposeHostPorts(ctx context.Context, req *ContainerRequest, ports ...int) ( return sshdConnectHook, fmt.Errorf("new sshd container: %w", err) } - // IP in the first network of the container - sshdIP, err := sshdContainer.ContainerIP(context.Background()) + // IP in the first network of the container. + inspect, err := sshdContainer.Inspect(ctx) if err != nil { - return sshdConnectHook, fmt.Errorf("get sshd container IP: %w", err) + return sshdConnectHook, fmt.Errorf("inspect sshd container: %w", err) + } + + // TODO: remove once we have docker context support via #2810 + sshdIP := inspect.NetworkSettings.IPAddress + if sshdIP == "" { + single := len(inspect.NetworkSettings.Networks) == 1 + for name, network := range inspect.NetworkSettings.Networks { + if name == sshdFirstNetwork || single { + sshdIP = network.IPAddress + break + } + } + } + + if sshdIP == "" { + return sshdConnectHook, errors.New("sshd container IP not found") } if req.HostConfigModifier == nil { - req.HostConfigModifier = func(hostConfig *container.HostConfig) {} + req.HostConfigModifier = func(_ *container.HostConfig) {} } // do not override the original HostConfigModifier @@ -118,13 +136,7 @@ func exposeHostPorts(ctx context.Context, req *ContainerRequest, ports ...int) ( modes := []container.NetworkMode{container.NetworkMode(sshdFirstNetwork), "none", "host"} // if the container is not in one of the modes, attach it to the first network of the SSHD container - found := false - for _, mode := range modes { - if hostConfig.NetworkMode == mode { - found = true - break - } - } + found := slices.Contains(modes, hostConfig.NetworkMode) if !found { req.Networks = append(req.Networks, sshdFirstNetwork) } @@ -151,11 +163,11 @@ func exposeHostPorts(ctx context.Context, req *ContainerRequest, ports ...int) ( // for each exposed port from the host. sshdConnectHook = ContainerLifecycleHooks{ PostReadies: []ContainerHook{ - func(ctx context.Context, c Container) error { + func(ctx context.Context, _ Container) error { return sshdContainer.exposeHostPort(ctx, req.HostAccessPorts...) }, }, - PreStops: stopHooks, + PostStops: stopHooks, PreTerminates: stopHooks, } @@ -166,11 +178,10 @@ func exposeHostPorts(ctx context.Context, req *ContainerRequest, ports ...int) ( func newSshdContainer(ctx context.Context, opts ...ContainerCustomizer) (*sshdContainer, error) { req := GenericContainerRequest{ ContainerRequest: ContainerRequest{ - Image: sshdImage, - HostAccessPorts: []int{}, // empty list because it does not need any port - ExposedPorts: []string{sshPort}, - Env: map[string]string{"PASSWORD": sshPassword}, - WaitingFor: wait.ForListeningPort(sshPort), + Image: sshdImage, + ExposedPorts: []string{sshPort}, + Env: map[string]string{"PASSWORD": sshPassword}, + WaitingFor: wait.ForListeningPort(sshPort), }, Started: true, } @@ -191,183 +202,221 @@ func newSshdContainer(ctx context.Context, opts ...ContainerCustomizer) (*sshdCo return sshd, fmt.Errorf("generic container: %w", err) } - sshClientConfig, err := configureSSHConfig(ctx, sshd) - if err != nil { - // return the container and the error to the caller to handle it + if err = sshd.clientConfig(ctx); err != nil { + // Return the container and the error to the caller to handle it. return sshd, err } - sshd.sshConfig = sshClientConfig - return sshd, nil } // sshdContainer represents the SSHD container type used for the port forwarding container. -// It's an internal type that extends the DockerContainer type, to add the SSH tunneling capabilities. +// It's an internal type that extends the DockerContainer type, to add the SSH tunnelling capabilities. type sshdContainer struct { Container port string sshConfig *ssh.ClientConfig - portForwarders []PortForwarder + portForwarders []*portForwarder } // Terminate stops the container and closes the SSH session -func (sshdC *sshdContainer) Terminate(ctx context.Context) error { - sshdC.closePorts(ctx) - - return sshdC.Container.Terminate(ctx) +func (sshdC *sshdContainer) Terminate(ctx context.Context, opts ...TerminateOption) error { + return errors.Join( + sshdC.closePorts(), + sshdC.Container.Terminate(ctx, opts...), + ) } // Stop stops the container and closes the SSH session func (sshdC *sshdContainer) Stop(ctx context.Context, timeout *time.Duration) error { - sshdC.closePorts(ctx) - - return sshdC.Container.Stop(ctx, timeout) + return errors.Join( + sshdC.closePorts(), + sshdC.Container.Stop(ctx, timeout), + ) } // closePorts closes all port forwarders. -func (sshdC *sshdContainer) closePorts(ctx context.Context) { +func (sshdC *sshdContainer) closePorts() error { + var errs []error for _, pfw := range sshdC.portForwarders { - pfw.Close(ctx) + if err := pfw.Close(); err != nil { + errs = append(errs, err) + } } sshdC.portForwarders = nil // Ensure the port forwarders are not used after closing. + return errors.Join(errs...) } -func configureSSHConfig(ctx context.Context, sshdC *sshdContainer) (*ssh.ClientConfig, error) { +// clientConfig sets up the SSHD client configuration. +func (sshdC *sshdContainer) clientConfig(ctx context.Context) error { mappedPort, err := sshdC.MappedPort(ctx, sshPort) if err != nil { - return nil, fmt.Errorf("mapped port: %w", err) + return fmt.Errorf("mapped port: %w", err) } - sshdC.port = mappedPort.Port() - sshConfig := ssh.ClientConfig{ + sshdC.port = mappedPort.Port() + sshdC.sshConfig = &ssh.ClientConfig{ User: user, HostKeyCallback: ssh.InsecureIgnoreHostKey(), Auth: []ssh.AuthMethod{ssh.Password(sshPassword)}, - Timeout: 30 * time.Second, } - return &sshConfig, nil + return nil } -func (sshdC *sshdContainer) exposeHostPort(ctx context.Context, ports ...int) error { +// exposeHostPort exposes the host ports to the container. +func (sshdC *sshdContainer) exposeHostPort(ctx context.Context, ports ...int) (err error) { + defer func() { + if err != nil { + err = errors.Join(err, sshdC.closePorts()) + } + }() for _, port := range ports { - pw := NewPortForwarder(fmt.Sprintf("localhost:%s", sshdC.port), sshdC.sshConfig, port, port) - sshdC.portForwarders = append(sshdC.portForwarders, *pw) - - go pw.Forward(ctx) //nolint:errcheck // Nothing we can usefully do with the error - } - - var err error + pf, err := newPortForwarder(ctx, "localhost:"+sshdC.port, sshdC.sshConfig, port) + if err != nil { + return fmt.Errorf("new port forwarder: %w", err) + } - // continue when all port forwarders have created the connection - for _, pfw := range sshdC.portForwarders { - err = errors.Join(err, <-pfw.connectionCreated) + sshdC.portForwarders = append(sshdC.portForwarders, pf) } - return err + return nil } -type PortForwarder struct { - sshDAddr string - sshConfig *ssh.ClientConfig - remotePort int - localPort int - connectionCreated chan error // used to signal that the connection has been created, so the caller can proceed - terminateChan chan struct{} // used to signal that the connection has been terminated +// portForwarder forwards a port from the container to the host. +type portForwarder struct { + client *ssh.Client + listener net.Listener + dialTimeout time.Duration + localAddr string + ctx context.Context + cancel context.CancelFunc + + // closeMtx protects the close operation + closeMtx sync.Mutex + closeErr error } -func NewPortForwarder(sshDAddr string, sshConfig *ssh.ClientConfig, remotePort, localPort int) *PortForwarder { - return &PortForwarder{ - sshDAddr: sshDAddr, - sshConfig: sshConfig, - remotePort: remotePort, - localPort: localPort, - connectionCreated: make(chan error), - terminateChan: make(chan struct{}), +// newPortForwarder creates a new running portForwarder for the given port. +// The context is only used for the initial SSH connection. +func newPortForwarder(ctx context.Context, sshDAddr string, sshConfig *ssh.ClientConfig, port int) (pf *portForwarder, err error) { + var d net.Dialer + conn, err := d.DialContext(ctx, "tcp", sshDAddr) + if err != nil { + return nil, fmt.Errorf("ssh dial: %w", err) } -} -func (pf *PortForwarder) Close(ctx context.Context) { - close(pf.terminateChan) - close(pf.connectionCreated) -} + // Ensure the connection is closed in case of error. + defer func() { + if err != nil { + err = errors.Join(err, conn.Close()) + } + }() -func (pf *PortForwarder) Forward(ctx context.Context) error { - client, err := ssh.Dial("tcp", pf.sshDAddr, pf.sshConfig) + c, chans, reqs, err := ssh.NewClientConn(conn, sshDAddr, sshConfig) if err != nil { - err = fmt.Errorf("error dialing ssh server: %w", err) - pf.connectionCreated <- err - return err + return nil, fmt.Errorf("ssh new client conn: %w", err) } - defer client.Close() - listener, err := client.Listen("tcp", fmt.Sprintf("localhost:%d", pf.remotePort)) + client := ssh.NewClient(c, chans, reqs) + + listener, err := client.Listen("tcp", fmt.Sprintf("localhost:%d", port)) if err != nil { - err = fmt.Errorf("error listening on remote port: %w", err) - pf.connectionCreated <- err - return err + return nil, fmt.Errorf("listening on remote port %d: %w", port, err) + } + + ctx, cancel := context.WithCancel(context.Background()) + + pf = &portForwarder{ + client: client, + listener: listener, + localAddr: fmt.Sprintf("localhost:%d", port), + ctx: ctx, + cancel: cancel, + dialTimeout: time.Second * 2, } - defer listener.Close() - // signal that the connection has been created - pf.connectionCreated <- nil + go pf.run() + + return pf, nil +} + +// Close closes the port forwarder. +func (pf *portForwarder) Close() error { + pf.closeMtx.Lock() + defer pf.closeMtx.Unlock() - // check if the context or the terminateChan has been closed select { - case <-ctx.Done(): - if err := listener.Close(); err != nil { - return fmt.Errorf("error closing listener: %w", err) - } - if err := client.Close(); err != nil { - return fmt.Errorf("error closing client: %w", err) - } - return nil - case <-pf.terminateChan: - if err := listener.Close(); err != nil { - return fmt.Errorf("error closing listener: %w", err) - } - if err := client.Close(); err != nil { - return fmt.Errorf("error closing client: %w", err) - } - return nil + case <-pf.ctx.Done(): + // Already closed. + return pf.closeErr default: } + var errs []error + if err := pf.listener.Close(); err != nil { + errs = append(errs, fmt.Errorf("close listener: %w", err)) + } + if err := pf.client.Close(); err != nil { + errs = append(errs, fmt.Errorf("close client: %w", err)) + } + + pf.closeErr = errors.Join(errs...) + pf.cancel() + + return pf.closeErr +} + +// run forwards the port from the remote connection to the local connection. +func (pf *portForwarder) run() { for { - remote, err := listener.Accept() + remote, err := pf.listener.Accept() if err != nil { - return fmt.Errorf("error accepting connection: %w", err) + if errors.Is(err, io.EOF) { + // The listener has been closed. + return + } + + // Ignore errors as they are transient and we want requests to + // continue to be accepted. + continue } - go pf.runTunnel(ctx, remote) + go pf.tunnel(remote) } } -// runTunnel runs a tunnel between two connections; as soon as one connection -// reaches EOF or reports an error, both connections are closed and this -// function returns. -func (pf *PortForwarder) runTunnel(ctx context.Context, remote net.Conn) { +// tunnel runs a tunnel between two connections; as soon as the forwarder +// context is cancelled or one connection copies returns, irrespective of +// the error, both connections are closed. +func (pf *portForwarder) tunnel(remote net.Conn) { + defer remote.Close() + + ctx, cancel := context.WithTimeout(pf.ctx, pf.dialTimeout) + defer cancel() + var dialer net.Dialer - local, err := dialer.DialContext(ctx, "tcp", fmt.Sprintf("localhost:%d", pf.localPort)) + local, err := dialer.DialContext(ctx, "tcp", pf.localAddr) if err != nil { - remote.Close() + // Nothing we can do with the error. return } defer local.Close() - defer remote.Close() - done := make(chan struct{}, 2) + ctx, cancel = context.WithCancel(pf.ctx) go func() { - io.Copy(local, remote) //nolint:errcheck // Nothing we can usefully do with the error - done <- struct{}{} + defer cancel() + io.Copy(local, remote) //nolint:errcheck // Nothing useful we can do with the error. }() go func() { - io.Copy(remote, local) //nolint:errcheck // Nothing we can usefully do with the error - done <- struct{}{} + defer cancel() + io.Copy(remote, local) //nolint:errcheck // Nothing useful we can do with the error. }() - <-done + // Wait for the context to be done before returning which triggers + // both connections to close. This is done to prevent the copies + // blocking forever on unused connections. + <-ctx.Done() } diff --git a/vendor/github.com/testcontainers/testcontainers-go/provider.go b/vendor/github.com/testcontainers/testcontainers-go/provider.go index b5e5ffa99..d2347b7f3 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/provider.go +++ b/vendor/github.com/testcontainers/testcontainers-go/provider.go @@ -9,6 +9,7 @@ import ( "github.com/testcontainers/testcontainers-go/internal/config" "github.com/testcontainers/testcontainers-go/internal/core" + "github.com/testcontainers/testcontainers-go/log" ) // possible provider types @@ -24,8 +25,8 @@ type ( // GenericProviderOptions defines options applicable to all providers GenericProviderOptions struct { - Logger Logging - DefaultNetwork string + Logger log.Logger + defaultNetwork string } // GenericProviderOption defines a common interface to modify GenericProviderOptions @@ -96,7 +97,7 @@ type ContainerProvider interface { // GetProvider provides the provider implementation for a certain type func (t ProviderType) GetProvider(opts ...GenericProviderOption) (GenericProvider, error) { opt := &GenericProviderOptions{ - Logger: Logger, + Logger: log.Default(), } for _, o := range opts { @@ -131,7 +132,7 @@ func (t ProviderType) GetProvider(opts ...GenericProviderOption) (GenericProvide func NewDockerProvider(provOpts ...DockerProviderOption) (*DockerProvider, error) { o := &DockerProviderOptions{ GenericProviderOptions: &GenericProviderOptions{ - Logger: Logger, + Logger: log.Default(), }, } diff --git a/vendor/github.com/testcontainers/testcontainers-go/reaper.go b/vendor/github.com/testcontainers/testcontainers-go/reaper.go index 8f2bde8ab..4e46f0e38 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/reaper.go +++ b/vendor/github.com/testcontainers/testcontainers-go/reaper.go @@ -14,14 +14,15 @@ import ( "time" "github.com/cenkalti/backoff/v4" + "github.com/containerd/errdefs" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/errdefs" "github.com/docker/go-connections/nat" "github.com/testcontainers/testcontainers-go/internal/config" "github.com/testcontainers/testcontainers-go/internal/core" + "github.com/testcontainers/testcontainers-go/log" "github.com/testcontainers/testcontainers-go/wait" ) @@ -69,7 +70,7 @@ type ReaperProvider interface { // // The caller must call Connect at least once on the returned Reaper and use the returned // result otherwise the reaper will be kept open until the process exits. -func NewReaper(ctx context.Context, sessionID string, provider ReaperProvider, reaperImageName string) (*Reaper, error) { +func NewReaper(ctx context.Context, sessionID string, provider ReaperProvider, _ string) (*Reaper, error) { reaper, err := spawner.reaper(ctx, sessionID, provider) if err != nil { return nil, fmt.Errorf("reaper: %w", err) @@ -83,7 +84,7 @@ func NewReaper(ctx context.Context, sessionID string, provider ReaperProvider, r func reaperContainerNameFromSessionID(sessionID string) string { // The session id is 64 characters, so we will not hit the limit of 128 // characters for container names. - return fmt.Sprintf("reaper_%s", sessionID) + return "reaper_" + sessionID } // reaperSpawner is a singleton that manages the reaper container. @@ -92,7 +93,7 @@ type reaperSpawner struct { mtx sync.Mutex } -// port returns the port that a new reaper should listens on. +// port returns the port that a new reaper should listen on. func (r *reaperSpawner) port() nat.Port { if port := os.Getenv("RYUK_PORT"); port != "" { natPort, err := nat.NewPort("tcp", port) @@ -161,6 +162,13 @@ func (r *reaperSpawner) lookupContainer(ctx context.Context, sessionID string) ( } defer dockerClient.Close() + provider, err := NewDockerProvider() + if err != nil { + return nil, fmt.Errorf("new provider: %w", err) + } + + provider.SetClient(dockerClient) + opts := container.ListOptions{ All: true, Filters: filters.NewArgs( @@ -184,11 +192,10 @@ func (r *reaperSpawner) lookupContainer(ctx context.Context, sessionID string) ( } if len(resp) > 1 { - return nil, fmt.Errorf("multiple reaper containers found for session ID %s", sessionID) + return nil, fmt.Errorf("found %d reaper containers for session ID %q", len(resp), sessionID) } - container := resp[0] - r, err := containerFromDockerResponse(ctx, container) + r, err := provider.ContainerFromType(ctx, resp[0]) if err != nil { return nil, fmt.Errorf("from docker: %w", err) } @@ -217,7 +224,7 @@ func (r *reaperSpawner) isRunning(ctx context.Context, ctr Container) error { if !state.Running { // Use NotFound error to indicate the container is not running // and should be recreated. - return errdefs.NotFound(fmt.Errorf("container state: %s", state.Status)) + return errdefs.ErrNotFound.WithMessage("container state: " + state.Status) } return nil @@ -271,7 +278,7 @@ func (r *reaperSpawner) reaper(ctx context.Context, sessionID string, provider R // If connect is true, the reaper will be connected to the reaper container. // It must be called with the lock held. func (r *reaperSpawner) retryLocked(ctx context.Context, sessionID string, provider ReaperProvider) func() (*Reaper, error) { - return func() (reaper *Reaper, err error) { //nolint:nonamedreturns // Needed for deferred error check. + return func() (reaper *Reaper, err error) { reaper, err = r.reuseOrCreate(ctx, sessionID, provider) // Ensure that the reaper is terminated if an error occurred. defer func() { @@ -339,7 +346,7 @@ func (r *reaperSpawner) reuseOrCreate(ctx context.Context, sessionID string, pro // fromContainer constructs a Reaper from an already running reaper DockerContainer. func (r *reaperSpawner) fromContainer(ctx context.Context, sessionID string, provider ReaperProvider, dockerContainer *DockerContainer) (*Reaper, error) { - Logger.Printf("⏳ Waiting for Reaper %q to be ready", dockerContainer.ID[:8]) + log.Printf("⏳ Waiting for Reaper %q to be ready", dockerContainer.ID[:8]) // Reusing an existing container so we determine the port from the container's exposed ports. if err := wait.ForExposedPort(). @@ -354,7 +361,7 @@ func (r *reaperSpawner) fromContainer(ctx context.Context, sessionID string, pro return nil, fmt.Errorf("port endpoint: %w", err) } - Logger.Printf("🔥 Reaper obtained from Docker for this test session %s", dockerContainer.ID[:8]) + log.Printf("🔥 Reaper obtained from Docker for this test session %s", dockerContainer.ID[:8]) return &Reaper{ Provider: provider, @@ -366,7 +373,7 @@ func (r *reaperSpawner) fromContainer(ctx context.Context, sessionID string, pro // newReaper creates a connected Reaper with a sessionID to identify containers // and a provider to use. -func (r *reaperSpawner) newReaper(ctx context.Context, sessionID string, provider ReaperProvider) (reaper *Reaper, err error) { //nolint:nonamedreturns // Needed for deferred error check. +func (r *reaperSpawner) newReaper(ctx context.Context, sessionID string, provider ReaperProvider) (reaper *Reaper, err error) { dockerHostMount := core.MustExtractDockerSocket(ctx) port := r.port() @@ -375,13 +382,13 @@ func (r *reaperSpawner) newReaper(ctx context.Context, sessionID string, provide Image: config.ReaperDefaultImage, ExposedPorts: []string{string(port)}, Labels: core.DefaultLabels(sessionID), - Privileged: tcConfig.RyukPrivileged, WaitingFor: wait.ForListeningPort(port), Name: reaperContainerNameFromSessionID(sessionID), HostConfigModifier: func(hc *container.HostConfig) { hc.AutoRemove = true hc.Binds = []string{dockerHostMount + ":/var/run/docker.sock"} hc.NetworkMode = Bridge + hc.Privileged = tcConfig.RyukPrivileged }, Env: map[string]string{}, } @@ -402,7 +409,12 @@ func (r *reaperSpawner) newReaper(ctx context.Context, sessionID string, provide // Attach reaper container to a requested network if it is specified if p, ok := provider.(*DockerProvider); ok { - req.Networks = append(req.Networks, p.DefaultNetwork) + defaultNetwork, err := p.ensureDefaultNetwork(ctx) + if err != nil { + return nil, fmt.Errorf("ensure default network: %w", err) + } + + req.Networks = append(req.Networks, defaultNetwork) } c, err := provider.RunContainer(ctx, req) @@ -522,7 +534,7 @@ func (r *Reaper) connect(ctx context.Context) (chan bool, error) { go func() { defer conn.Close() if err := r.handshake(conn); err != nil { - Logger.Printf("Reaper handshake failed: %s", err) + log.Printf("Reaper handshake failed: %s", err) } <-terminationSignal }() @@ -561,3 +573,8 @@ func (r *Reaper) handshake(conn net.Conn) error { func (r *Reaper) Labels() map[string]string { return GenericLabels() } + +// isReaperImage returns true if the image name is the reaper image. +func isReaperImage(name string) bool { + return strings.HasSuffix(name, config.ReaperDefaultImage) +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/requirements.txt b/vendor/github.com/testcontainers/testcontainers-go/requirements.txt index 83689b0f8..19aa2e5b7 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/requirements.txt +++ b/vendor/github.com/testcontainers/testcontainers-go/requirements.txt @@ -1,5 +1,5 @@ mkdocs==1.5.3 mkdocs-codeinclude-plugin==0.2.1 -mkdocs-include-markdown-plugin==6.0.4 +mkdocs-include-markdown-plugin==7.1.6 mkdocs-material==9.5.18 -mkdocs-markdownextradata-plugin==0.2.5 +mkdocs-markdownextradata-plugin==0.2.6 diff --git a/vendor/github.com/testcontainers/testcontainers-go/runtime.txt b/vendor/github.com/testcontainers/testcontainers-go/runtime.txt index cc1923a40..24ee5b1be 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/runtime.txt +++ b/vendor/github.com/testcontainers/testcontainers-go/runtime.txt @@ -1 +1 @@ -3.8 +3.13 diff --git a/vendor/github.com/testcontainers/testcontainers-go/sonar-project.properties b/vendor/github.com/testcontainers/testcontainers-go/sonar-project.properties deleted file mode 100644 index 67ef15fcd..000000000 --- a/vendor/github.com/testcontainers/testcontainers-go/sonar-project.properties +++ /dev/null @@ -1,21 +0,0 @@ -# This file is autogenerated by the 'modulegen' tool. -# Github organization linked to sonarcloud -sonar.organization=testcontainers - -# Project key from sonarcloud dashboard for Github Action, otherwise pick a project key you like -sonar.projectKey=testcontainers_testcontainers-go - -sonar.projectName=testcontainers-go - -sonar.projectVersion=v0.34.0 - -sonar.sources=. - -sonar.exclusions=**/*_test.go,**/vendor/**,**/testdata/** - -sonar.tests=. -sonar.test.inclusions=**/*_test.go -sonar.test.exclusions=**/vendor/** - -sonar.go.coverage.reportPaths=**/coverage.out -sonar.go.tests.reportPaths=TEST-unit.xml,examples/nginx/TEST-unit.xml,examples/toxiproxy/TEST-unit.xml,modulegen/TEST-unit.xml,modules/artemis/TEST-unit.xml,modules/azurite/TEST-unit.xml,modules/cassandra/TEST-unit.xml,modules/chroma/TEST-unit.xml,modules/clickhouse/TEST-unit.xml,modules/cockroachdb/TEST-unit.xml,modules/compose/TEST-unit.xml,modules/consul/TEST-unit.xml,modules/couchbase/TEST-unit.xml,modules/databend/TEST-unit.xml,modules/dolt/TEST-unit.xml,modules/dynamodb/TEST-unit.xml,modules/elasticsearch/TEST-unit.xml,modules/etcd/TEST-unit.xml,modules/gcloud/TEST-unit.xml,modules/grafana-lgtm/TEST-unit.xml,modules/inbucket/TEST-unit.xml,modules/influxdb/TEST-unit.xml,modules/k3s/TEST-unit.xml,modules/k6/TEST-unit.xml,modules/kafka/TEST-unit.xml,modules/localstack/TEST-unit.xml,modules/mariadb/TEST-unit.xml,modules/meilisearch/TEST-unit.xml,modules/milvus/TEST-unit.xml,modules/minio/TEST-unit.xml,modules/mockserver/TEST-unit.xml,modules/mongodb/TEST-unit.xml,modules/mssql/TEST-unit.xml,modules/mysql/TEST-unit.xml,modules/nats/TEST-unit.xml,modules/neo4j/TEST-unit.xml,modules/ollama/TEST-unit.xml,modules/openfga/TEST-unit.xml,modules/openldap/TEST-unit.xml,modules/opensearch/TEST-unit.xml,modules/postgres/TEST-unit.xml,modules/pulsar/TEST-unit.xml,modules/qdrant/TEST-unit.xml,modules/rabbitmq/TEST-unit.xml,modules/redis/TEST-unit.xml,modules/redpanda/TEST-unit.xml,modules/registry/TEST-unit.xml,modules/surrealdb/TEST-unit.xml,modules/valkey/TEST-unit.xml,modules/vault/TEST-unit.xml,modules/vearch/TEST-unit.xml,modules/weaviate/TEST-unit.xml,modules/yugabytedb/TEST-unit.xml diff --git a/vendor/github.com/testcontainers/testcontainers-go/testcontainers.go b/vendor/github.com/testcontainers/testcontainers-go/testcontainers.go index 7ae4a40c1..77ba722c7 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/testcontainers.go +++ b/vendor/github.com/testcontainers/testcontainers-go/testcontainers.go @@ -18,7 +18,7 @@ func ExtractDockerSocket() string { // // 1. Docker host from the "tc.host" property in the ~/.testcontainers.properties file. // 2. The TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE environment variable. -// 3. Using a Docker client, check if the Info().OperativeSystem is "Docker Desktop" and return the default docker socket path for rootless docker. +// 3. Using a Docker client, check if the Info().OperatingSystem is "Docker Desktop" and return the default docker socket path for rootless docker. // 4. Else, Get the current Docker Host from the existing strategies: see MustExtractDockerHost. // 5. If the socket contains the unix schema, the schema is removed (e.g. unix:///var/run/docker.sock -> /var/run/docker.sock) // 6. Else, the default location of the docker socket is used (/var/run/docker.sock) diff --git a/vendor/github.com/testcontainers/testcontainers-go/testing.go b/vendor/github.com/testcontainers/testcontainers-go/testing.go index 0601d9fa8..704af99c3 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/testing.go +++ b/vendor/github.com/testcontainers/testcontainers-go/testing.go @@ -3,10 +3,11 @@ package testcontainers import ( "context" "fmt" + "io" "regexp" "testing" - "github.com/docker/docker/errdefs" + "github.com/containerd/errdefs" "github.com/stretchr/testify/require" ) @@ -20,14 +21,20 @@ var errAlreadyInProgress = regexp.MustCompile(`removal of container .* is alread // In this way tests that depend on Testcontainers won't run if the provider is provisioned correctly. func SkipIfProviderIsNotHealthy(t *testing.T) { t.Helper() + defer func() { + if r := recover(); r != nil { + t.Skipf("Recovered from panic: %v. Docker is not running. Testcontainers can't perform is work without it", r) + } + }() + ctx := context.Background() provider, err := ProviderDocker.GetProvider() if err != nil { - t.Skipf("Docker is not running. TestContainers can't perform is work without it: %s", err) + t.Skipf("Docker is not running. Testcontainers can't perform is work without it: %s", err) } err = provider.Health(ctx) if err != nil { - t.Skipf("Docker is not running. TestContainers can't perform is work without it: %s", err) + t.Skipf("Docker is not running. Testcontainers can't perform is work without it: %s", err) } } @@ -36,20 +43,31 @@ func SkipIfProviderIsNotHealthy(t *testing.T) { func SkipIfDockerDesktop(t *testing.T, ctx context.Context) { t.Helper() cli, err := NewDockerClientWithOpts(ctx) - if err != nil { - t.Fatalf("failed to create docker client: %s", err) - } + require.NoErrorf(t, err, "failed to create docker client: %s", err) info, err := cli.Info(ctx) - if err != nil { - t.Fatalf("failed to get docker info: %s", err) - } + require.NoErrorf(t, err, "failed to get docker info: %s", err) if info.OperatingSystem == "Docker Desktop" { t.Skip("Skipping test that requires host network access when running in Docker Desktop") } } +// SkipIfNotDockerDesktop is a utility function capable of skipping tests +// if tests are not run using Docker Desktop. +func SkipIfNotDockerDesktop(t *testing.T, ctx context.Context) { + t.Helper() + cli, err := NewDockerClientWithOpts(ctx) + require.NoErrorf(t, err, "failed to create docker client: %s", err) + + info, err := cli.Info(ctx) + require.NoErrorf(t, err, "failed to get docker info: %s", err) + + if info.OperatingSystem != "Docker Desktop" { + t.Skip("Skipping test that needs Docker Desktop") + } +} + // exampleLogConsumer { // StdoutLogConsumer is a LogConsumer that prints the log to stdout @@ -69,7 +87,7 @@ func (lc *StdoutLogConsumer) Accept(l Log) { // of [GenericContainer](...) or a modules Run(...) in a test to ensure the // container is stopped when the function ends. // -// before any error check. If container is nil, its a no-op. +// before any error check. If container is nil, it's a no-op. func CleanupContainer(tb testing.TB, ctr Container, options ...TerminateOption) { tb.Helper() @@ -81,12 +99,14 @@ func CleanupContainer(tb testing.TB, ctr Container, options ...TerminateOption) // CleanupNetwork is a helper function that schedules the network to be // removed when the test ends. // This should be the first call after NewNetwork(...) in a test before -// any error check. If network is nil, its a no-op. +// any error check. If network is nil, it's a no-op. func CleanupNetwork(tb testing.TB, network Network) { tb.Helper() tb.Cleanup(func() { - noErrorOrIgnored(tb, network.Remove(context.Background())) + if !isNil(network) { + noErrorOrIgnored(tb, network.Remove(context.Background())) + } }) } @@ -127,15 +147,19 @@ func isCleanupSafe(err error) bool { return true } - switch x := err.(type) { //nolint:errorlint // We need to check for interfaces. - case errdefs.ErrNotFound: + // First try with containerd's errdefs + switch { + case errdefs.IsNotFound(err): return true - case errdefs.ErrConflict: + case errdefs.IsConflict(err): // Terminating a container that is already terminating. if errAlreadyInProgress.MatchString(err.Error()) { return true } return false + } + + switch x := err.(type) { //nolint:errorlint // We need to check for interfaces. case causer: return isCleanupSafe(x.Cause()) case wrapErr: @@ -151,3 +175,18 @@ func isCleanupSafe(err error) bool { return false } } + +// RequireContainerExec is a helper function that executes a command in a container +// It insures that there is no error during the execution +// Finally returns the output of its execution +func RequireContainerExec(ctx context.Context, t *testing.T, container Container, cmd []string) string { + t.Helper() + + code, out, err := container.Exec(ctx, cmd) + require.NoError(t, err) + require.Zero(t, code) + + checkBytes, err := io.ReadAll(out) + require.NoError(t, err) + return string(checkBytes) +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/validator.go b/vendor/github.com/testcontainers/testcontainers-go/validator.go new file mode 100644 index 000000000..a888586e8 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/validator.go @@ -0,0 +1,7 @@ +package testcontainers + +// Validator is an interface that can be implemented by types that need to validate their state. +type Validator interface { + // Validate validates the state of the type. + Validate() error +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/all.go b/vendor/github.com/testcontainers/testcontainers-go/wait/all.go index fb097fb5e..9bf4cbe8b 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/all.go +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/all.go @@ -2,7 +2,8 @@ package wait import ( "context" - "fmt" + "errors" + "reflect" "time" ) @@ -58,10 +59,17 @@ func (ms *MultiStrategy) WaitUntilReady(ctx context.Context, target StrategyTarg } if len(ms.Strategies) == 0 { - return fmt.Errorf("no wait strategy supplied") + return errors.New("no wait strategy supplied") } for _, strategy := range ms.Strategies { + if strategy == nil || reflect.ValueOf(strategy).IsNil() { + // A module could be appending strategies after part of the container initialization, + // and use wait.ForAll on a not initialized strategy. + // In this case, we just skip the nil strategy. + continue + } + strategyCtx := ctx // Set default Timeout when strategy implements StrategyTimeout diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/exec.go b/vendor/github.com/testcontainers/testcontainers-go/wait/exec.go index 2e341dd3e..72987c31a 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/exec.go +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/exec.go @@ -30,7 +30,7 @@ func NewExecStrategy(cmd []string) *ExecStrategy { return &ExecStrategy{ cmd: cmd, ExitCodeMatcher: defaultExitCodeMatcher, - ResponseMatcher: func(body io.Reader) bool { return true }, + ResponseMatcher: func(_ io.Reader) bool { return true }, PollInterval: defaultPollInterval(), } } diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/exit.go b/vendor/github.com/testcontainers/testcontainers-go/wait/exit.go index be12b8ad4..670c8e2ce 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/exit.go +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/exit.go @@ -76,9 +76,8 @@ func (ws *ExitStrategy) WaitUntilReady(ctx context.Context, target StrategyTarge if err != nil { if !strings.Contains(err.Error(), "No such container") { return err - } else { - return nil } + return nil } if state.Running { time.Sleep(ws.PollInterval) diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/file.go b/vendor/github.com/testcontainers/testcontainers-go/wait/file.go index d9cab7a6e..4f6d38c7c 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/file.go +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/file.go @@ -6,7 +6,7 @@ import ( "io" "time" - "github.com/docker/docker/errdefs" + "github.com/containerd/errdefs" ) var ( diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go b/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go index 9360517a0..8d97ccbb1 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go @@ -4,13 +4,13 @@ import ( "context" "errors" "fmt" - "log" "net" "os" - "strconv" "time" "github.com/docker/go-connections/nat" + + "github.com/testcontainers/testcontainers-go/log" ) const ( @@ -41,6 +41,11 @@ type HostPortStrategy struct { // a shell is not available in the container or when the container doesn't bind // the port internally until additional conditions are met. skipInternalCheck bool + + // skipExternalCheck is a flag to skip the external check, which, if used with + // skipInternalCheck, makes strategy waiting only for port mapping completion + // without accessing port. + skipExternalCheck bool } // NewHostPortStrategy constructs a default host port strategy that waits for the given @@ -69,6 +74,12 @@ func ForExposedPort() *HostPortStrategy { return NewHostPortStrategy("") } +// ForMappedPort returns a host port strategy that waits for the given port +// to be mapped without accessing the port itself. +func ForMappedPort(port nat.Port) *HostPortStrategy { + return NewHostPortStrategy(port).SkipInternalCheck().SkipExternalCheck() +} + // SkipInternalCheck changes the host port strategy to skip the internal check, // which is useful when a shell is not available in the container or when the // container doesn't bind the port internally until additional conditions are met. @@ -78,6 +89,15 @@ func (hp *HostPortStrategy) SkipInternalCheck() *HostPortStrategy { return hp } +// SkipExternalCheck changes the host port strategy to skip the external check, +// which, if used with SkipInternalCheck, makes strategy waiting only for port +// mapping completion without accessing port. +func (hp *HostPortStrategy) SkipExternalCheck() *HostPortStrategy { + hp.skipExternalCheck = true + + return hp +} + // WithStartupTimeout can be used to change the default startup timeout func (hp *HostPortStrategy) WithStartupTimeout(startupTimeout time.Duration) *HostPortStrategy { hp.timeout = &startupTimeout @@ -94,6 +114,25 @@ func (hp *HostPortStrategy) Timeout() *time.Duration { return hp.timeout } +// detectInternalPort returns the lowest internal port that is currently bound. +// If no internal port is found, it returns the zero nat.Port value which +// can be checked against an empty string. +func (hp *HostPortStrategy) detectInternalPort(ctx context.Context, target StrategyTarget) (nat.Port, error) { + var internalPort nat.Port + inspect, err := target.Inspect(ctx) + if err != nil { + return internalPort, fmt.Errorf("inspect: %w", err) + } + + for port := range inspect.NetworkSettings.Ports { + if internalPort == "" || port.Int() < internalPort.Int() { + internalPort = port + } + } + + return internalPort, nil +} + // WaitUntilReady implements Strategy.WaitUntilReady func (hp *HostPortStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error { timeout := defaultStartupTimeout() @@ -104,34 +143,37 @@ func (hp *HostPortStrategy) WaitUntilReady(ctx context.Context, target StrategyT ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - ipAddress, err := target.Host(ctx) - if err != nil { - return err - } - waitInterval := hp.PollInterval internalPort := hp.Port + i := 0 if internalPort == "" { - inspect, err := target.Inspect(ctx) + var err error + // Port is not specified, so we need to detect it. + internalPort, err = hp.detectInternalPort(ctx, target) if err != nil { - return err + return fmt.Errorf("detect internal port: %w", err) } - for port := range inspect.NetworkSettings.Ports { - if internalPort == "" || port.Int() < internalPort.Int() { - internalPort = port + for internalPort == "" { + select { + case <-ctx.Done(): + return fmt.Errorf("detect internal port: retries: %d, last err: %w, ctx err: %w", i, err, ctx.Err()) + case <-time.After(waitInterval): + if err := checkTarget(ctx, target); err != nil { + return fmt.Errorf("detect internal port: check target: retries: %d, last err: %w", i, err) + } + + internalPort, err = hp.detectInternalPort(ctx, target) + if err != nil { + return fmt.Errorf("detect internal port: %w", err) + } } } } - if internalPort == "" { - return fmt.Errorf("no port to wait for") - } - - var port nat.Port - port, err = target.MappedPort(ctx, internalPort) - i := 0 + port, err := target.MappedPort(ctx, internalPort) + i = 0 for port == "" { i++ @@ -141,7 +183,7 @@ func (hp *HostPortStrategy) WaitUntilReady(ctx context.Context, target StrategyT return fmt.Errorf("mapped port: retries: %d, port: %q, last err: %w, ctx err: %w", i, port, err, ctx.Err()) case <-time.After(waitInterval): if err := checkTarget(ctx, target); err != nil { - return fmt.Errorf("check target: retries: %d, port: %q, last err: %w", i, port, err) + return fmt.Errorf("mapped port: check target: retries: %d, port: %q, last err: %w", i, port, err) } port, err = target.MappedPort(ctx, internalPort) if err != nil { @@ -150,8 +192,15 @@ func (hp *HostPortStrategy) WaitUntilReady(ctx context.Context, target StrategyT } } - if err := externalCheck(ctx, ipAddress, port, target, waitInterval); err != nil { - return fmt.Errorf("external check: %w", err) + if !hp.skipExternalCheck { + ipAddress, err := target.Host(ctx) + if err != nil { + return fmt.Errorf("host: %w", err) + } + + if err := externalCheck(ctx, ipAddress, port, target, waitInterval); err != nil { + return fmt.Errorf("external check: %w", err) + } } if hp.skipInternalCheck { @@ -161,10 +210,10 @@ func (hp *HostPortStrategy) WaitUntilReady(ctx context.Context, target StrategyT if err = internalCheck(ctx, internalPort, target); err != nil { switch { case errors.Is(err, errShellNotExecutable): - log.Println("Shell not executable in container, only external port validated") + log.Printf("Shell not executable in container, only external port validated") return nil case errors.Is(err, errShellNotFound): - log.Println("Shell not found in container") + log.Printf("Shell not found in container") return nil default: return fmt.Errorf("internal check: %w", err) @@ -176,11 +225,9 @@ func (hp *HostPortStrategy) WaitUntilReady(ctx context.Context, target StrategyT func externalCheck(ctx context.Context, ipAddress string, port nat.Port, target StrategyTarget, waitInterval time.Duration) error { proto := port.Proto() - portNumber := port.Int() - portString := strconv.Itoa(portNumber) dialer := net.Dialer{} - address := net.JoinHostPort(ipAddress, portString) + address := net.JoinHostPort(ipAddress, port.Port()) for i := 0; ; i++ { if err := checkTarget(ctx, target); err != nil { return fmt.Errorf("check target: retries: %d address: %s: %w", i, address, err) @@ -219,7 +266,7 @@ func internalCheck(ctx context.Context, internalPort nat.Port, target StrategyTa return fmt.Errorf("%w, host port waiting failed", err) } - // Docker has a issue which override exit code 127 to 126 due to: + // Docker has an issue which override exit code 127 to 126 due to: // https://github.com/moby/moby/issues/45795 // Handle both to ensure compatibility with Docker and Podman for now. switch exitCode { diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/http.go b/vendor/github.com/testcontainers/testcontainers-go/wait/http.go index 11452ecbf..32dc8778b 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/http.go +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/http.go @@ -50,13 +50,13 @@ func NewHTTPStrategy(path string) *HTTPStrategy { Port: "", Path: path, StatusCodeMatcher: defaultStatusCodeMatcher, - ResponseMatcher: func(body io.Reader) bool { return true }, + ResponseMatcher: func(_ io.Reader) bool { return true }, UseTLS: false, TLSConfig: nil, Method: http.MethodGet, Body: nil, Headers: map[string]string{}, - ResponseHeadersMatcher: func(headers http.Header) bool { return true }, + ResponseHeadersMatcher: func(_ http.Header) bool { return true }, PollInterval: defaultPollInterval(), UserInfo: nil, } @@ -208,7 +208,7 @@ func (ws *HTTPStrategy) WaitUntilReady(ctx context.Context, target StrategyTarge } if lowestPort == "" { - return errors.New("No exposed tcp ports or mapped ports - cannot wait for status") + return errors.New("no exposed tcp ports or mapped ports - cannot wait for status") } mappedPort, _ = nat.NewPort(lowestPort.Proto(), hostPort) @@ -229,7 +229,7 @@ func (ws *HTTPStrategy) WaitUntilReady(ctx context.Context, target StrategyTarge } if mappedPort.Proto() != "tcp" { - return errors.New("Cannot use HTTP client on non-TCP ports") + return errors.New("cannot use HTTP client on non-TCP ports") } } diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/log.go b/vendor/github.com/testcontainers/testcontainers-go/wait/log.go index 530077f90..41c96e3eb 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/log.go +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/log.go @@ -1,10 +1,12 @@ package wait import ( + "bytes" "context" + "errors" + "fmt" "io" "regexp" - "strings" "time" ) @@ -14,6 +16,21 @@ var ( _ StrategyTimeout = (*LogStrategy)(nil) ) +// PermanentError is a special error that will stop the wait and return an error. +type PermanentError struct { + err error +} + +// Error implements the error interface. +func (e *PermanentError) Error() string { + return e.err.Error() +} + +// NewPermanentError creates a new PermanentError. +func NewPermanentError(err error) *PermanentError { + return &PermanentError{err: err} +} + // LogStrategy will wait until a given log entry shows up in the docker logs type LogStrategy struct { // all Strategies should have a startupTimeout to avoid waiting infinitely @@ -24,6 +41,18 @@ type LogStrategy struct { IsRegexp bool Occurrence int PollInterval time.Duration + + // check is the function that will be called to check if the log entry is present. + check func([]byte) error + + // submatchCallback is a callback that will be called with the sub matches of the regexp. + submatchCallback func(pattern string, matches [][][]byte) error + + // re is the optional compiled regexp. + re *regexp.Regexp + + // log byte slice version of [LogStrategy.Log] used for count checks. + log []byte } // NewLogStrategy constructs with polling interval of 100 milliseconds and startup timeout of 60 seconds by default @@ -46,6 +75,18 @@ func (ws *LogStrategy) AsRegexp() *LogStrategy { return ws } +// Submatch configures a function that will be called with the result of +// [regexp.Regexp.FindAllSubmatch], allowing the caller to process the results. +// If the callback returns nil, the strategy will be considered successful. +// Returning a [PermanentError] will stop the wait and return an error, otherwise +// it will retry until the timeout is reached. +// [LogStrategy.Occurrence] is ignored if this option is set. +func (ws *LogStrategy) Submatch(callback func(pattern string, matches [][][]byte) error) *LogStrategy { + ws.submatchCallback = callback + + return ws +} + // WithStartupTimeout can be used to change the default startup timeout func (ws *LogStrategy) WithStartupTimeout(timeout time.Duration) *LogStrategy { ws.timeout = &timeout @@ -89,57 +130,85 @@ func (ws *LogStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget timeout = *ws.timeout } + switch { + case ws.submatchCallback != nil: + ws.re = regexp.MustCompile(ws.Log) + ws.check = ws.checkSubmatch + case ws.IsRegexp: + ws.re = regexp.MustCompile(ws.Log) + ws.check = ws.checkRegexp + default: + ws.log = []byte(ws.Log) + ws.check = ws.checkCount + } + ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - length := 0 - -LOOP: + var lastLen int + var lastError error for { select { case <-ctx.Done(): - return ctx.Err() + return errors.Join(lastError, ctx.Err()) default: checkErr := checkTarget(ctx, target) reader, err := target.Logs(ctx) if err != nil { + // TODO: fix as this will wait for timeout if the logs are not available. time.Sleep(ws.PollInterval) continue } b, err := io.ReadAll(reader) if err != nil { + // TODO: fix as this will wait for timeout if the logs are not readable. time.Sleep(ws.PollInterval) continue } - logs := string(b) - - switch { - case length == len(logs) && checkErr != nil: + if lastLen == len(b) && checkErr != nil { + // Log length hasn't changed so we're not making progress. return checkErr - case checkLogsFn(ws, b): - break LOOP - default: - length = len(logs) + } + + if err := ws.check(b); err != nil { + var errPermanent *PermanentError + if errors.As(err, &errPermanent) { + return err + } + + lastError = err + lastLen = len(b) time.Sleep(ws.PollInterval) continue } + + return nil } } +} + +// checkCount checks if the log entry is present in the logs using a string count. +func (ws *LogStrategy) checkCount(b []byte) error { + if count := bytes.Count(b, ws.log); count < ws.Occurrence { + return fmt.Errorf("%q matched %d times, expected %d", ws.Log, count, ws.Occurrence) + } return nil } -func checkLogsFn(ws *LogStrategy, b []byte) bool { - if ws.IsRegexp { - re := regexp.MustCompile(ws.Log) - occurrences := re.FindAll(b, -1) - - return len(occurrences) >= ws.Occurrence +// checkRegexp checks if the log entry is present in the logs using a regexp count. +func (ws *LogStrategy) checkRegexp(b []byte) error { + if matches := ws.re.FindAll(b, -1); len(matches) < ws.Occurrence { + return fmt.Errorf("`%s` matched %d times, expected %d", ws.Log, len(matches), ws.Occurrence) } - logs := string(b) - return strings.Count(logs, ws.Log) >= ws.Occurrence + return nil +} + +// checkSubmatch checks if the log entry is present in the logs using a regexp sub match callback. +func (ws *LogStrategy) checkSubmatch(b []byte) error { + return ws.submatchCallback(ws.Log, ws.re.FindAllSubmatch(b, -1)) } diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/nop.go b/vendor/github.com/testcontainers/testcontainers-go/wait/nop.go index 4206eefc1..c47d83d18 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/nop.go +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/nop.go @@ -5,7 +5,7 @@ import ( "io" "time" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/go-connections/nat" "github.com/testcontainers/testcontainers-go/exec" @@ -44,14 +44,14 @@ func (ws *NopStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget type NopStrategyTarget struct { ReaderCloser io.ReadCloser - ContainerState types.ContainerState + ContainerState container.State } func (st NopStrategyTarget) Host(_ context.Context) (string, error) { return "", nil } -func (st NopStrategyTarget) Inspect(_ context.Context) (*types.ContainerJSON, error) { +func (st NopStrategyTarget) Inspect(_ context.Context) (*container.InspectResponse, error) { return nil, nil } @@ -72,7 +72,7 @@ func (st NopStrategyTarget) Exec(_ context.Context, _ []string, _ ...exec.Proces return 0, nil, nil } -func (st NopStrategyTarget) State(_ context.Context) (*types.ContainerState, error) { +func (st NopStrategyTarget) State(_ context.Context) (*container.State, error) { return &st.ContainerState, nil } diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go b/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go index b766f60fe..1d09edafe 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/sql.go @@ -10,25 +10,25 @@ import ( ) var ( - _ Strategy = (*waitForSql)(nil) - _ StrategyTimeout = (*waitForSql)(nil) + _ Strategy = (*waitForSQL)(nil) + _ StrategyTimeout = (*waitForSQL)(nil) ) -const defaultForSqlQuery = "SELECT 1" +const defaultForSQLQuery = "SELECT 1" // ForSQL constructs a new waitForSql strategy for the given driver -func ForSQL(port nat.Port, driver string, url func(host string, port nat.Port) string) *waitForSql { - return &waitForSql{ +func ForSQL(port nat.Port, driver string, url func(host string, port nat.Port) string) *waitForSQL { + return &waitForSQL{ Port: port, URL: url, Driver: driver, startupTimeout: defaultStartupTimeout(), PollInterval: defaultPollInterval(), - query: defaultForSqlQuery, + query: defaultForSQLQuery, } } -type waitForSql struct { +type waitForSQL struct { timeout *time.Duration URL func(host string, port nat.Port) string @@ -40,31 +40,31 @@ type waitForSql struct { } // WithStartupTimeout can be used to change the default startup timeout -func (w *waitForSql) WithStartupTimeout(timeout time.Duration) *waitForSql { +func (w *waitForSQL) WithStartupTimeout(timeout time.Duration) *waitForSQL { w.timeout = &timeout return w } // WithPollInterval can be used to override the default polling interval of 100 milliseconds -func (w *waitForSql) WithPollInterval(pollInterval time.Duration) *waitForSql { +func (w *waitForSQL) WithPollInterval(pollInterval time.Duration) *waitForSQL { w.PollInterval = pollInterval return w } // WithQuery can be used to override the default query used in the strategy. -func (w *waitForSql) WithQuery(query string) *waitForSql { +func (w *waitForSQL) WithQuery(query string) *waitForSQL { w.query = query return w } -func (w *waitForSql) Timeout() *time.Duration { +func (w *waitForSQL) Timeout() *time.Duration { return w.timeout } // WaitUntilReady repeatedly tries to run "SELECT 1" or user defined query on the given port using sql and driver. // // If it doesn't succeed until the timeout value which defaults to 60 seconds, it will return an error. -func (w *waitForSql) WaitUntilReady(ctx context.Context, target StrategyTarget) error { +func (w *waitForSQL) WaitUntilReady(ctx context.Context, target StrategyTarget) error { timeout := defaultStartupTimeout() if w.timeout != nil { timeout = *w.timeout diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/tls.go b/vendor/github.com/testcontainers/testcontainers-go/wait/tls.go new file mode 100644 index 000000000..ab904b271 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/tls.go @@ -0,0 +1,167 @@ +package wait + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "time" +) + +// Validate we implement interface. +var _ Strategy = (*TLSStrategy)(nil) + +// TLSStrategy is a strategy for handling TLS. +type TLSStrategy struct { + // General Settings. + timeout *time.Duration + pollInterval time.Duration + + // Custom Settings. + certFiles *x509KeyPair + rootFiles []string + + // State. + tlsConfig *tls.Config +} + +// x509KeyPair is a pair of certificate and key files. +type x509KeyPair struct { + certPEMFile string + keyPEMFile string +} + +// ForTLSCert returns a CertStrategy that will add a Certificate to the [tls.Config] +// constructed from PEM formatted certificate key file pair in the container. +func ForTLSCert(certPEMFile, keyPEMFile string) *TLSStrategy { + return &TLSStrategy{ + certFiles: &x509KeyPair{ + certPEMFile: certPEMFile, + keyPEMFile: keyPEMFile, + }, + tlsConfig: &tls.Config{}, + pollInterval: defaultPollInterval(), + } +} + +// ForTLSRootCAs returns a CertStrategy that sets the root CAs for the [tls.Config] +// using the given PEM formatted files from the container. +func ForTLSRootCAs(pemFiles ...string) *TLSStrategy { + return &TLSStrategy{ + rootFiles: pemFiles, + tlsConfig: &tls.Config{}, + pollInterval: defaultPollInterval(), + } +} + +// WithRootCAs sets the root CAs for the [tls.Config] using the given files from +// the container. +func (ws *TLSStrategy) WithRootCAs(files ...string) *TLSStrategy { + ws.rootFiles = files + return ws +} + +// WithCert sets the [tls.Config] Certificates using the given files from the container. +func (ws *TLSStrategy) WithCert(certPEMFile, keyPEMFile string) *TLSStrategy { + ws.certFiles = &x509KeyPair{ + certPEMFile: certPEMFile, + keyPEMFile: keyPEMFile, + } + return ws +} + +// WithServerName sets the server for the [tls.Config]. +func (ws *TLSStrategy) WithServerName(serverName string) *TLSStrategy { + ws.tlsConfig.ServerName = serverName + return ws +} + +// WithStartupTimeout can be used to change the default startup timeout. +func (ws *TLSStrategy) WithStartupTimeout(startupTimeout time.Duration) *TLSStrategy { + ws.timeout = &startupTimeout + return ws +} + +// WithPollInterval can be used to override the default polling interval of 100 milliseconds. +func (ws *TLSStrategy) WithPollInterval(pollInterval time.Duration) *TLSStrategy { + ws.pollInterval = pollInterval + return ws +} + +// TLSConfig returns the TLS config once the strategy is ready. +// If the strategy is nil, it returns nil. +func (ws *TLSStrategy) TLSConfig() *tls.Config { + if ws == nil { + return nil + } + + return ws.tlsConfig +} + +// WaitUntilReady implements the [Strategy] interface. +// It waits for the CA, client cert and key files to be available in the container and +// uses them to setup the TLS config. +func (ws *TLSStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error { + size := len(ws.rootFiles) + if ws.certFiles != nil { + size += 2 + } + strategies := make([]Strategy, 0, size) + for _, file := range ws.rootFiles { + strategies = append(strategies, + ForFile(file).WithMatcher(func(r io.Reader) error { + buf, err := io.ReadAll(r) + if err != nil { + return fmt.Errorf("read CA cert file %q: %w", file, err) + } + + if ws.tlsConfig.RootCAs == nil { + ws.tlsConfig.RootCAs = x509.NewCertPool() + } + + if !ws.tlsConfig.RootCAs.AppendCertsFromPEM(buf) { + return fmt.Errorf("invalid CA cert file %q", file) + } + + return nil + }).WithPollInterval(ws.pollInterval), + ) + } + + if ws.certFiles != nil { + var certPEMBlock []byte + strategies = append(strategies, + ForFile(ws.certFiles.certPEMFile).WithMatcher(func(r io.Reader) error { + var err error + if certPEMBlock, err = io.ReadAll(r); err != nil { + return fmt.Errorf("read certificate cert %q: %w", ws.certFiles.certPEMFile, err) + } + + return nil + }).WithPollInterval(ws.pollInterval), + ForFile(ws.certFiles.keyPEMFile).WithMatcher(func(r io.Reader) error { + keyPEMBlock, err := io.ReadAll(r) + if err != nil { + return fmt.Errorf("read certificate key %q: %w", ws.certFiles.keyPEMFile, err) + } + + cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return fmt.Errorf("x509 key pair %q %q: %w", ws.certFiles.certPEMFile, ws.certFiles.keyPEMFile, err) + } + + ws.tlsConfig.Certificates = []tls.Certificate{cert} + + return nil + }).WithPollInterval(ws.pollInterval), + ) + } + + strategy := ForAll(strategies...) + if ws.timeout != nil { + strategy.WithStartupTimeout(*ws.timeout) + } + + return strategy.WaitUntilReady(ctx, target) +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go b/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go index 7211d49b2..ca5a7dbf2 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/wait.go @@ -7,7 +7,7 @@ import ( "io" "time" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/go-connections/nat" "github.com/testcontainers/testcontainers-go/exec" @@ -25,12 +25,12 @@ type StrategyTimeout interface { type StrategyTarget interface { Host(context.Context) (string, error) - Inspect(context.Context) (*types.ContainerJSON, error) + Inspect(context.Context) (*container.InspectResponse, error) Ports(ctx context.Context) (nat.PortMap, error) // Deprecated: use Inspect instead MappedPort(context.Context, nat.Port) (nat.Port, error) Logs(context.Context) (io.ReadCloser, error) Exec(context.Context, []string, ...exec.ProcessOption) (int, io.Reader, error) - State(context.Context) (*types.ContainerState, error) + State(context.Context) (*container.State, error) CopyFileFromContainer(ctx context.Context, filePath string) (io.ReadCloser, error) } @@ -43,7 +43,7 @@ func checkTarget(ctx context.Context, target StrategyTarget) error { return checkState(state) } -func checkState(state *types.ContainerState) error { +func checkState(state *container.State) error { switch { case state.Running: return nil diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/walk.go b/vendor/github.com/testcontainers/testcontainers-go/wait/walk.go new file mode 100644 index 000000000..98f5755e1 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/walk.go @@ -0,0 +1,81 @@ +package wait + +import ( + "errors" + "slices" +) + +var ( + // ErrVisitStop is used as a return value from [VisitFunc] to stop the walk. + // It is not returned as an error by any function. + ErrVisitStop = errors.New("stop the walk") + + // Deprecated: use [ErrVisitStop] instead. + VisitStop = ErrVisitStop + + // ErrVisitRemove is used as a return value from [VisitFunc] to have the current node removed. + // It is not returned as an error by any function. + ErrVisitRemove = errors.New("remove this strategy") + + // Deprecated: use [ErrVisitRemove] instead. + VisitRemove = ErrVisitRemove +) + +// VisitFunc is a function that visits a strategy node. +// If it returns [ErrVisitStop], the walk stops. +// If it returns [ErrVisitRemove], the current node is removed. +type VisitFunc func(root Strategy) error + +// Walk walks the strategies tree and calls the visit function for each node. +func Walk(root *Strategy, visit VisitFunc) error { + if root == nil { + return errors.New("root strategy is nil") + } + + if err := walk(root, visit); err != nil { + if errors.Is(err, ErrVisitRemove) || errors.Is(err, ErrVisitStop) { + return nil + } + return err + } + + return nil +} + +// walk walks the strategies tree and calls the visit function for each node. +// It returns an error if the visit function returns an error. +func walk(root *Strategy, visit VisitFunc) error { + if *root == nil { + // No strategy. + return nil + } + + // Allow the visit function to customize the behaviour of the walk before visiting the children. + if err := visit(*root); err != nil { + if errors.Is(err, ErrVisitRemove) { + *root = nil + } + + return err + } + + if s, ok := (*root).(*MultiStrategy); ok { + var i int + for range s.Strategies { + if err := walk(&s.Strategies[i], visit); err != nil { + if errors.Is(err, ErrVisitRemove) { + s.Strategies = slices.Delete(s.Strategies, i, i+1) + if errors.Is(err, VisitStop) { + return VisitStop + } + continue + } + + return err + } + i++ + } + } + + return nil +} diff --git a/vendor/github.com/yusufpapurcu/wmi/wmi.go b/vendor/github.com/yusufpapurcu/wmi/wmi.go index 26c3581c9..03f386ed5 100644 --- a/vendor/github.com/yusufpapurcu/wmi/wmi.go +++ b/vendor/github.com/yusufpapurcu/wmi/wmi.go @@ -456,6 +456,18 @@ func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismat Reason: "not a Float32", } } + case float64: + switch f.Kind() { + case reflect.Float32, reflect.Float64: + f.SetFloat(val) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not a Float64", + } + } + default: if f.Kind() == reflect.Slice { switch f.Type().Elem().Kind() { diff --git a/vendor/modules.txt b/vendor/modules.txt index 32d4dabc8..bf872bce4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -46,7 +46,7 @@ cloud.google.com/go/storage/experimental cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/storagepb -# dario.cat/mergo v1.0.0 +# dario.cat/mergo v1.0.2 ## explicit; go 1.13 dario.cat/mergo # github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 @@ -322,9 +322,6 @@ github.com/docker/docker/api/types/time github.com/docker/docker/api/types/versions github.com/docker/docker/api/types/volume github.com/docker/docker/client -github.com/docker/docker/errdefs -github.com/docker/docker/pkg/archive -github.com/docker/docker/pkg/idtools github.com/docker/docker/pkg/jsonmessage github.com/docker/docker/pkg/stdcopy # github.com/docker/go-connections v0.6.0 @@ -338,6 +335,12 @@ github.com/docker/go-units # github.com/dustin/go-humanize v1.0.1 ## explicit; go 1.16 github.com/dustin/go-humanize +# github.com/ebitengine/purego v0.8.4 +## explicit; go 1.18 +github.com/ebitengine/purego +github.com/ebitengine/purego/internal/cgo +github.com/ebitengine/purego/internal/fakecgo +github.com/ebitengine/purego/internal/strings # github.com/envoyproxy/go-control-plane/envoy v1.32.4 ## explicit; go 1.22 github.com/envoyproxy/go-control-plane/envoy/admin/v3 @@ -540,8 +543,6 @@ github.com/moby/go-archive/tarheader ## explicit; go 1.19 github.com/moby/patternmatcher github.com/moby/patternmatcher/ignorefile -# github.com/moby/sys/atomicwriter v0.1.0 -## explicit; go 1.18 # github.com/moby/sys/sequential v0.6.0 ## explicit; go 1.17 github.com/moby/sys/sequential @@ -626,17 +627,14 @@ github.com/rs/xid # github.com/sagikazarmark/locafero v0.11.0 ## explicit; go 1.23.0 github.com/sagikazarmark/locafero -# github.com/shirou/gopsutil/v3 v3.23.12 -## explicit; go 1.15 -github.com/shirou/gopsutil/v3/common -github.com/shirou/gopsutil/v3/cpu -github.com/shirou/gopsutil/v3/internal/common -github.com/shirou/gopsutil/v3/mem -github.com/shirou/gopsutil/v3/net -github.com/shirou/gopsutil/v3/process -# github.com/shoenig/go-m1cpu v0.1.6 -## explicit; go 1.20 -github.com/shoenig/go-m1cpu +# github.com/shirou/gopsutil/v4 v4.25.6 +## explicit; go 1.23 +github.com/shirou/gopsutil/v4/common +github.com/shirou/gopsutil/v4/cpu +github.com/shirou/gopsutil/v4/internal/common +github.com/shirou/gopsutil/v4/mem +github.com/shirou/gopsutil/v4/net +github.com/shirou/gopsutil/v4/process # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus @@ -686,20 +684,21 @@ github.com/stretchr/testify/require # github.com/subosito/gotenv v1.6.0 ## explicit; go 1.18 github.com/subosito/gotenv -# github.com/testcontainers/testcontainers-go v0.34.0 -## explicit; go 1.22 +# github.com/testcontainers/testcontainers-go v0.39.0 +## explicit; go 1.24.0 github.com/testcontainers/testcontainers-go github.com/testcontainers/testcontainers-go/exec github.com/testcontainers/testcontainers-go/internal github.com/testcontainers/testcontainers-go/internal/config github.com/testcontainers/testcontainers-go/internal/core github.com/testcontainers/testcontainers-go/internal/core/network +github.com/testcontainers/testcontainers-go/log github.com/testcontainers/testcontainers-go/wait # github.com/testcontainers/testcontainers-go/modules/minio v0.34.0 ## explicit; go 1.22 github.com/testcontainers/testcontainers-go/modules/minio -# github.com/testcontainers/testcontainers-go/modules/mongodb v0.34.0 -## explicit; go 1.22 +# github.com/testcontainers/testcontainers-go/modules/mongodb v0.39.0 +## explicit; go 1.24.0 github.com/testcontainers/testcontainers-go/modules/mongodb # github.com/tinylib/msgp v1.4.0 ## explicit; go 1.22 @@ -722,7 +721,7 @@ github.com/xdg-go/stringprep # github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 ## explicit; go 1.17 github.com/youmark/pkcs8 -# github.com/yusufpapurcu/wmi v1.2.3 +# github.com/yusufpapurcu/wmi v1.2.4 ## explicit; go 1.16 github.com/yusufpapurcu/wmi # go.mongodb.org/mongo-driver v1.17.4 From 0f6c7c93fa9f780258b905755cfe2d94d5ceb80c Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 23 Oct 2025 19:49:28 +0200 Subject: [PATCH 75/95] Add config --set option for time.Duration --- pbm/config/config.go | 5 +- pbm/config/config_test.go | 180 ++++++++++++++++++++++++++++++++++++++ pbm/storage/gcs/gcs.go | 3 + 3 files changed, 187 insertions(+), 1 deletion(-) diff --git a/pbm/config/config.go b/pbm/config/config.go index 7ad45d694..0256bd0dc 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -644,7 +644,10 @@ func SetConfigVar(ctx context.Context, m connect.Client, key, val string) error case reflect.Int, reflect.Int32: v, err = strconv.ParseInt(val, 10, 32) case reflect.Int64: - v, err = strconv.ParseInt(val, 10, 64) + v, err = time.ParseDuration(val) + if err != nil { + v, err = strconv.ParseInt(val, 10, 64) + } case reflect.Float32: v, err = strconv.ParseFloat(val, 32) case reflect.Float64: diff --git a/pbm/config/config_test.go b/pbm/config/config_test.go index 2fd23a386..5edd11122 100644 --- a/pbm/config/config_test.go +++ b/pbm/config/config_test.go @@ -1,10 +1,21 @@ package config import ( + "context" + "fmt" + "log" + "os" "testing" + "time" "github.com/google/go-cmp/cmp" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/modules/mongodb" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/readpref" + "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/pbm/storage/azure" "github.com/percona/percona-backup-mongodb/pbm/storage/fs" @@ -220,6 +231,175 @@ func TestCastError(t *testing.T) { }) } +var connClient connect.Client + +func TestMain(m *testing.M) { + ctx := context.Background() + mongodbContainer, err := mongodb.Run(ctx, "perconalab/percona-server-mongodb:8.0.4-multi", + mongodb.WithReplicaSet("rs1")) + if err != nil { + log.Fatalf("error while creating mongo test container: %v", err) + } + connStr, err := mongodbContainer.ConnectionString(ctx) + if err != nil { + log.Fatalf("conn string error: %v", err) + } + connStr += "&directConnection=true" + mClient, err := mongo.Connect(ctx, options.Client().ApplyURI(connStr)) + if err != nil { + log.Fatalf("mongo client connect error: %v", err) + } + err = mClient.Ping(ctx, readpref.Primary()) + if err != nil { + log.Fatalf("conn string: %s, ping: %v", connStr, err) + } + + connClient = connect.UnsafeClient(mClient) + + code := m.Run() + + err = mClient.Disconnect(ctx) + if err != nil { + log.Fatalf("mongo client disconnect error: %v", err) + } + if err := testcontainers.TerminateContainer(mongodbContainer); err != nil { + log.Fatalf("failed to terminate container: %s", err) + } + + os.Exit(code) +} + +func TestConfig(t *testing.T) { + ctx := context.Background() + + t.Run("gcs config", func(t *testing.T) { + wantCfg := &Config{ + Storage: StorageConf{ + Type: storage.GCS, + GCS: &gcs.Config{ + Bucket: "b1", + Prefix: "p1", + Credentials: gcs.Credentials{ + ClientEmail: "ce1", + PrivateKey: "pk1", + }, + ChunkSize: 100, + MaxObjSizeGB: floatPtr(1.1), + Retryer: &gcs.Retryer{ + BackoffInitial: 11 * time.Minute, + BackoffMax: 111 * time.Minute, + BackoffMultiplier: 11.1, + }, + }, + }, + } + err := SetConfig(ctx, connClient, &Config{Storage: StorageConf{Type: storage.GCS}}) + if err != nil { + t.Fatal("set config:", err) + } + + err = SetConfigVar(ctx, connClient, + "storage.gcs.bucket", + wantCfg.Storage.GCS.Bucket, + ) + if err != nil { + t.Fatal("set config var", err) + } + + err = SetConfigVar(ctx, connClient, + "storage.gcs.prefix", + wantCfg.Storage.GCS.Prefix, + ) + if err != nil { + t.Fatal("set config var", err) + } + + err = SetConfigVar(ctx, connClient, + "storage.gcs.credentials.clientEmail", + wantCfg.Storage.GCS.Credentials.ClientEmail, + ) + if err != nil { + t.Fatal("set config var", err) + } + + err = SetConfigVar(ctx, connClient, + "storage.gcs.credentials.privateKey", + wantCfg.Storage.GCS.Credentials.PrivateKey, + ) + if err != nil { + t.Fatal("set config var", err) + } + + err = SetConfigVar(ctx, connClient, + "storage.gcs.chunkSize", + fmt.Sprintf("%d", wantCfg.Storage.GCS.ChunkSize), + ) + if err != nil { + t.Fatal("set config var", err) + } + + err = SetConfigVar(ctx, connClient, + "storage.gcs.maxObjSizeGB", + fmt.Sprintf("%f", *wantCfg.Storage.GCS.MaxObjSizeGB), + ) + if err != nil { + t.Fatal("set config var", err) + } + + err = SetConfigVar(ctx, connClient, + "storage.gcs.retryer.backoffInitial", + wantCfg.Storage.GCS.Retryer.BackoffInitial.String(), + ) + if err != nil { + t.Fatal("set config var", err) + } + + err = SetConfigVar(ctx, connClient, + "storage.gcs.retryer.backoffMax", + wantCfg.Storage.GCS.Retryer.BackoffMax.String(), + ) + if err != nil { + t.Fatal("set config var", err) + } + err = SetConfigVar(ctx, connClient, + "storage.gcs.retryer.backoffMultiplier", + fmt.Sprintf("%f", wantCfg.Storage.GCS.Retryer.BackoffMultiplier), + ) + if err != nil { + t.Fatal("set config var", err) + } + + gotCfg, err := GetConfig(ctx, connClient) + if err != nil { + t.Fatal("get config:", err) + } + + if !gotCfg.Storage.Equal(&wantCfg.Storage) { + t.Fatalf("wrong config after using set config var, diff=%s", + cmp.Diff(*wantCfg.Storage.GCS, *gotCfg.Storage.GCS)) + } + }) +} + +func Test(t *testing.T) { + testCases := []struct { + desc string + }{ + { + desc: "", + }, + } + for _, tC := range testCases { + t.Run(tC.desc, func(t *testing.T) { + + }) + } +} + func boolPtr(b bool) *bool { return &b } + +func floatPtr(f float64) *float64 { + return &f +} diff --git a/pbm/storage/gcs/gcs.go b/pbm/storage/gcs/gcs.go index 69eba550a..4e27f817d 100644 --- a/pbm/storage/gcs/gcs.go +++ b/pbm/storage/gcs/gcs.go @@ -121,6 +121,9 @@ func (cfg *Config) Equal(other *Config) bool { if !reflect.DeepEqual(cfg.Credentials, other.Credentials) { return false } + if !reflect.DeepEqual(cfg.Retryer, other.Retryer) { + return false + } return true } From d6c177d02e416d69c11b666f4afb9e563e83de10 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 24 Oct 2025 10:37:27 +0200 Subject: [PATCH 76/95] Add MaxAttempts retrier option for GCS --- pbm/storage/gcs/gcs.go | 5 +++++ pbm/storage/gcs/google_client.go | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/pbm/storage/gcs/gcs.go b/pbm/storage/gcs/gcs.go index 4e27f817d..e4122d16a 100644 --- a/pbm/storage/gcs/gcs.go +++ b/pbm/storage/gcs/gcs.go @@ -53,6 +53,11 @@ type Retryer struct { // https://pkg.go.dev/github.com/googleapis/gax-go/v2@v2.12.3#Backoff.Multiplier // Ignored for MinIO (only Initial and Max used) BackoffMultiplier float64 `bson:"backoffMultiplier" json:"backoffMultiplier" yaml:"backoffMultiplier"` + + // MaxAttempts configures the maximum number of tries. + // E.g. if you it's set to 5, op will be attempted up to 5 times total (initial call + 4 retries). + // https://pkg.go.dev/cloud.google.com/go/storage#WithMaxAttempts + MaxAttempts int `bson:"maxAttempts" json:"maxAttempts" yaml:"maxAttempts"` } type ServiceAccountCredentials struct { diff --git a/pbm/storage/gcs/google_client.go b/pbm/storage/gcs/google_client.go index f0c2463c1..ebe1aeea9 100644 --- a/pbm/storage/gcs/google_client.go +++ b/pbm/storage/gcs/google_client.go @@ -63,7 +63,7 @@ func newGoogleClient(opts *Config, l log.LogEvent) (*googleClient, error) { Max: opts.Retryer.BackoffMax, Multiplier: opts.Retryer.BackoffMultiplier, }), - + storagegcs.WithMaxAttempts(opts.Retryer.MaxAttempts), storagegcs.WithPolicy(storagegcs.RetryAlways), ) } From 5d31219c2edeeadac46c88a6f182822ec76e2683 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 24 Oct 2025 10:51:34 +0200 Subject: [PATCH 77/95] Extrace GCS config into the file --- pbm/storage/gcs/config.go | 117 ++++++++++++++++++++++++++++++++++++++ pbm/storage/gcs/gcs.go | 113 ------------------------------------ 2 files changed, 117 insertions(+), 113 deletions(-) create mode 100644 pbm/storage/gcs/config.go diff --git a/pbm/storage/gcs/config.go b/pbm/storage/gcs/config.go new file mode 100644 index 000000000..e164a6b6b --- /dev/null +++ b/pbm/storage/gcs/config.go @@ -0,0 +1,117 @@ +package gcs + +import ( + "reflect" + "time" +) + +type Config struct { + Bucket string `bson:"bucket" json:"bucket" yaml:"bucket"` + Prefix string `bson:"prefix" json:"prefix" yaml:"prefix"` + Credentials Credentials `bson:"credentials" json:"-" yaml:"credentials"` + + // The maximum number of bytes that the Writer will attempt to send in a single request. + // https://pkg.go.dev/cloud.google.com/go/storage#Writer + ChunkSize int `bson:"chunkSize,omitempty" json:"chunkSize,omitempty" yaml:"chunkSize,omitempty"` + MaxObjSizeGB *float64 `bson:"maxObjSizeGB,omitempty" json:"maxObjSizeGB,omitempty" yaml:"maxObjSizeGB,omitempty"` + + Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` +} + +type Credentials struct { + // JSON credentials (service account) + ClientEmail string `bson:"clientEmail" json:"clientEmail,omitempty" yaml:"clientEmail,omitempty"` + PrivateKey string `bson:"privateKey" json:"privateKey,omitempty" yaml:"privateKey,omitempty"` + + // HMAC credentials for XML API (S3 compatibility) + HMACAccessKey string `bson:"hmacAccessKey" json:"hmacAccessKey,omitempty" yaml:"hmacAccessKey,omitempty"` + HMACSecret string `bson:"hmacSecret" json:"hmacSecret,omitempty" yaml:"hmacSecret,omitempty"` +} + +type Retryer struct { + // BackoffInitial is the initial value of the retry period. + // https://pkg.go.dev/github.com/googleapis/gax-go/v2@v2.12.3#Backoff.Initial + BackoffInitial time.Duration `bson:"backoffInitial" json:"backoffInitial" yaml:"backoffInitial"` + + // BackoffMax is the maximum value of the retry period. + // https://pkg.go.dev/github.com/googleapis/gax-go/v2@v2.12.3#Backoff.Max + BackoffMax time.Duration `bson:"backoffMax" json:"backoffMax" yaml:"backoffMax"` + + // BackoffMultiplier is the factor by which the retry period increases. + // https://pkg.go.dev/github.com/googleapis/gax-go/v2@v2.12.3#Backoff.Multiplier + // Ignored for MinIO (only Initial and Max used) + BackoffMultiplier float64 `bson:"backoffMultiplier" json:"backoffMultiplier" yaml:"backoffMultiplier"` + + // MaxAttempts configures the maximum number of tries. + // E.g. if you it's set to 5, op will be attempted up to 5 times total (initial call + 4 retries). + // https://pkg.go.dev/cloud.google.com/go/storage#WithMaxAttempts + MaxAttempts int `bson:"maxAttempts" json:"maxAttempts" yaml:"maxAttempts"` +} + +func (cfg *Config) Clone() *Config { + if cfg == nil { + return nil + } + + rv := *cfg + if cfg.MaxObjSizeGB != nil { + v := *cfg.MaxObjSizeGB + rv.MaxObjSizeGB = &v + } + if cfg.Retryer != nil { + v := *cfg.Retryer + rv.Retryer = &v + } + + return &rv +} + +func (cfg *Config) Equal(other *Config) bool { + if cfg == nil || other == nil { + return cfg == other + } + + if cfg.Bucket != other.Bucket { + return false + } + if cfg.Prefix != other.Prefix { + return false + } + if cfg.ChunkSize != other.ChunkSize { + return false + } + if !reflect.DeepEqual(cfg.MaxObjSizeGB, other.MaxObjSizeGB) { + return false + } + if !reflect.DeepEqual(cfg.Credentials, other.Credentials) { + return false + } + if !reflect.DeepEqual(cfg.Retryer, other.Retryer) { + return false + } + + return true +} + +// IsSameStorage identifies the same instance of the GCS storage. +func (cfg *Config) IsSameStorage(other *Config) bool { + if cfg == nil || other == nil { + return cfg == other + } + + if cfg.Bucket != other.Bucket { + return false + } + if cfg.Prefix != other.Prefix { + return false + } + + return true +} + +func (cfg *Config) GetMaxObjSizeGB() float64 { + if cfg.MaxObjSizeGB != nil && *cfg.MaxObjSizeGB > 0 { + return *cfg.MaxObjSizeGB + } + return defaultMaxObjSizeGB +} diff --git a/pbm/storage/gcs/gcs.go b/pbm/storage/gcs/gcs.go index e4122d16a..07c82b33d 100644 --- a/pbm/storage/gcs/gcs.go +++ b/pbm/storage/gcs/gcs.go @@ -3,9 +3,7 @@ package gcs import ( "io" "path" - "reflect" "strings" - "time" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/log" @@ -17,49 +15,6 @@ const ( defaultMaxObjSizeGB = 5018 // 4.9 TB ) -type Config struct { - Bucket string `bson:"bucket" json:"bucket" yaml:"bucket"` - Prefix string `bson:"prefix" json:"prefix" yaml:"prefix"` - Credentials Credentials `bson:"credentials" json:"-" yaml:"credentials"` - - // The maximum number of bytes that the Writer will attempt to send in a single request. - // https://pkg.go.dev/cloud.google.com/go/storage#Writer - ChunkSize int `bson:"chunkSize,omitempty" json:"chunkSize,omitempty" yaml:"chunkSize,omitempty"` - MaxObjSizeGB *float64 `bson:"maxObjSizeGB,omitempty" json:"maxObjSizeGB,omitempty" yaml:"maxObjSizeGB,omitempty"` - - Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` -} - -type Credentials struct { - // JSON credentials (service account) - ClientEmail string `bson:"clientEmail" json:"clientEmail,omitempty" yaml:"clientEmail,omitempty"` - PrivateKey string `bson:"privateKey" json:"privateKey,omitempty" yaml:"privateKey,omitempty"` - - // HMAC credentials for XML API (S3 compatibility) - HMACAccessKey string `bson:"hmacAccessKey" json:"hmacAccessKey,omitempty" yaml:"hmacAccessKey,omitempty"` - HMACSecret string `bson:"hmacSecret" json:"hmacSecret,omitempty" yaml:"hmacSecret,omitempty"` -} - -type Retryer struct { - // BackoffInitial is the initial value of the retry period. - // https://pkg.go.dev/github.com/googleapis/gax-go/v2@v2.12.3#Backoff.Initial - BackoffInitial time.Duration `bson:"backoffInitial" json:"backoffInitial" yaml:"backoffInitial"` - - // BackoffMax is the maximum value of the retry period. - // https://pkg.go.dev/github.com/googleapis/gax-go/v2@v2.12.3#Backoff.Max - BackoffMax time.Duration `bson:"backoffMax" json:"backoffMax" yaml:"backoffMax"` - - // BackoffMultiplier is the factor by which the retry period increases. - // https://pkg.go.dev/github.com/googleapis/gax-go/v2@v2.12.3#Backoff.Multiplier - // Ignored for MinIO (only Initial and Max used) - BackoffMultiplier float64 `bson:"backoffMultiplier" json:"backoffMultiplier" yaml:"backoffMultiplier"` - - // MaxAttempts configures the maximum number of tries. - // E.g. if you it's set to 5, op will be attempted up to 5 times total (initial call + 4 retries). - // https://pkg.go.dev/cloud.google.com/go/storage#WithMaxAttempts - MaxAttempts int `bson:"maxAttempts" json:"maxAttempts" yaml:"maxAttempts"` -} - type ServiceAccountCredentials struct { Type string `json:"type"` PrivateKey string `json:"private_key"` @@ -88,74 +43,6 @@ type GCS struct { d *Download } -func (cfg *Config) Clone() *Config { - if cfg == nil { - return nil - } - - rv := *cfg - if cfg.MaxObjSizeGB != nil { - v := *cfg.MaxObjSizeGB - rv.MaxObjSizeGB = &v - } - if cfg.Retryer != nil { - v := *cfg.Retryer - rv.Retryer = &v - } - - return &rv -} - -func (cfg *Config) Equal(other *Config) bool { - if cfg == nil || other == nil { - return cfg == other - } - - if cfg.Bucket != other.Bucket { - return false - } - if cfg.Prefix != other.Prefix { - return false - } - if cfg.ChunkSize != other.ChunkSize { - return false - } - if !reflect.DeepEqual(cfg.MaxObjSizeGB, other.MaxObjSizeGB) { - return false - } - if !reflect.DeepEqual(cfg.Credentials, other.Credentials) { - return false - } - if !reflect.DeepEqual(cfg.Retryer, other.Retryer) { - return false - } - - return true -} - -// IsSameStorage identifies the same instance of the GCS storage. -func (cfg *Config) IsSameStorage(other *Config) bool { - if cfg == nil || other == nil { - return cfg == other - } - - if cfg.Bucket != other.Bucket { - return false - } - if cfg.Prefix != other.Prefix { - return false - } - - return true -} - -func (cfg *Config) GetMaxObjSizeGB() float64 { - if cfg.MaxObjSizeGB != nil && *cfg.MaxObjSizeGB > 0 { - return *cfg.MaxObjSizeGB - } - return defaultMaxObjSizeGB -} - func New(opts *Config, node string, l log.LogEvent) (storage.Storage, error) { g := &GCS{ opts: opts, From fd9543fec51c731e6435bf3099466f8a25abe930 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 24 Oct 2025 11:31:46 +0200 Subject: [PATCH 78/95] Rename opts -> cfg when Config is referenced --- pbm/storage/gcs/download.go | 2 +- pbm/storage/gcs/gcs.go | 30 ++++++++++---------- pbm/storage/gcs/google_client.go | 40 +++++++++++++------------- pbm/storage/gcs/hmac_client.go | 48 ++++++++++++++++---------------- 4 files changed, 60 insertions(+), 60 deletions(-) diff --git a/pbm/storage/gcs/download.go b/pbm/storage/gcs/download.go index 26b2ef840..1e948472a 100644 --- a/pbm/storage/gcs/download.go +++ b/pbm/storage/gcs/download.go @@ -112,7 +112,7 @@ func (g *GCS) sourceReader(fname string, arenas []*storage.Arena, cc, downloadCh } case err := <-pr.Errc: - exitErr = errors.Wrapf(err, "SourceReader: download '%s/%s'", g.opts.Bucket, fname) + exitErr = errors.Wrapf(err, "SourceReader: download '%s/%s'", g.cfg.Bucket, fname) return } } diff --git a/pbm/storage/gcs/gcs.go b/pbm/storage/gcs/gcs.go index 07c82b33d..ea2deb3b9 100644 --- a/pbm/storage/gcs/gcs.go +++ b/pbm/storage/gcs/gcs.go @@ -36,27 +36,27 @@ type gcsClient interface { } type GCS struct { - opts *Config - log log.LogEvent + cfg *Config + log log.LogEvent client gcsClient d *Download } -func New(opts *Config, node string, l log.LogEvent) (storage.Storage, error) { +func New(cfg *Config, node string, l log.LogEvent) (storage.Storage, error) { g := &GCS{ - opts: opts, - log: l, + cfg: cfg, + log: l, } - if g.opts.Credentials.HMACAccessKey != "" && g.opts.Credentials.HMACSecret != "" { - hc, err := newHMACClient(g.opts, g.log) + if g.cfg.Credentials.HMACAccessKey != "" && g.cfg.Credentials.HMACSecret != "" { + hc, err := newHMACClient(g.cfg, g.log) if err != nil { return nil, errors.Wrap(err, "new hmac client") } g.client = hc } else { - gc, err := newGoogleClient(g.opts, g.log) + gc, err := newGoogleClient(g.cfg, g.log) if err != nil { return nil, errors.Wrap(err, "new google client") } @@ -69,7 +69,7 @@ func New(opts *Config, node string, l log.LogEvent) (storage.Storage, error) { cc: 1, } - return storage.NewSplitMergeMW(g, opts.GetMaxObjSizeGB()), nil + return storage.NewSplitMergeMW(g, cfg.GetMaxObjSizeGB()), nil } func NewWithDownloader( @@ -83,18 +83,18 @@ func NewWithDownloader( } g := &GCS{ - opts: opts, - log: l, + cfg: opts, + log: l, } - if g.opts.Credentials.HMACAccessKey != "" && g.opts.Credentials.HMACSecret != "" { - hc, err := newHMACClient(g.opts, g.log) + if g.cfg.Credentials.HMACAccessKey != "" && g.cfg.Credentials.HMACSecret != "" { + hc, err := newHMACClient(g.cfg, g.log) if err != nil { return nil, errors.Wrap(err, "new hmac client") } g.client = hc } else { - gc, err := newGoogleClient(g.opts, g.log) + gc, err := newGoogleClient(g.cfg, g.log) if err != nil { return nil, errors.Wrap(err, "new google client") } @@ -132,7 +132,7 @@ func (g *GCS) FileStat(name string) (storage.FileInfo, error) { } func (g *GCS) List(prefix, suffix string) ([]storage.FileInfo, error) { - prfx := path.Join(g.opts.Prefix, prefix) + prfx := path.Join(g.cfg.Prefix, prefix) if prfx != "" && !strings.HasSuffix(prfx, "/") { prfx += "/" diff --git a/pbm/storage/gcs/google_client.go b/pbm/storage/gcs/google_client.go index ebe1aeea9..fbcd436c8 100644 --- a/pbm/storage/gcs/google_client.go +++ b/pbm/storage/gcs/google_client.go @@ -21,28 +21,28 @@ import ( type googleClient struct { bucketHandle *storagegcs.BucketHandle - opts *Config + cfg *Config log log.LogEvent } -func newGoogleClient(opts *Config, l log.LogEvent) (*googleClient, error) { +func newGoogleClient(cfg *Config, l log.LogEvent) (*googleClient, error) { ctx := context.Background() - if opts.Credentials.PrivateKey == "" || opts.Credentials.ClientEmail == "" { + if cfg.Credentials.PrivateKey == "" || cfg.Credentials.ClientEmail == "" { return nil, errors.New("clientEmail and privateKey are required for GCS credentials") } creds, err := json.Marshal(ServiceAccountCredentials{ Type: "service_account", - PrivateKey: opts.Credentials.PrivateKey, - ClientEmail: opts.Credentials.ClientEmail, + PrivateKey: cfg.Credentials.PrivateKey, + ClientEmail: cfg.Credentials.ClientEmail, AuthURI: "https://accounts.google.com/o/oauth2/auth", TokenURI: "https://oauth2.googleapis.com/token", UniverseDomain: "googleapis.com", AuthProviderCertURL: "https://www.googleapis.com/oauth2/v1/certs", ClientCertURL: fmt.Sprintf( "https://www.googleapis.com/robot/v1/metadata/x509/%s", - opts.Credentials.ClientEmail, + cfg.Credentials.ClientEmail, ), }) if err != nil { @@ -54,23 +54,23 @@ func newGoogleClient(opts *Config, l log.LogEvent) (*googleClient, error) { return nil, errors.Wrap(err, "new GCS client") } - bh := cli.Bucket(opts.Bucket) + bh := cli.Bucket(cfg.Bucket) - if opts.Retryer != nil { + if cfg.Retryer != nil { bh = bh.Retryer( storagegcs.WithBackoff(gax.Backoff{ - Initial: opts.Retryer.BackoffInitial, - Max: opts.Retryer.BackoffMax, - Multiplier: opts.Retryer.BackoffMultiplier, + Initial: cfg.Retryer.BackoffInitial, + Max: cfg.Retryer.BackoffMax, + Multiplier: cfg.Retryer.BackoffMultiplier, }), - storagegcs.WithMaxAttempts(opts.Retryer.MaxAttempts), + storagegcs.WithMaxAttempts(cfg.Retryer.MaxAttempts), storagegcs.WithPolicy(storagegcs.RetryAlways), ) } return &googleClient{ bucketHandle: bh, - opts: opts, + cfg: cfg, log: l, }, nil } @@ -90,7 +90,7 @@ func (g googleClient) save(name string, data io.Reader, options ...storage.Optio 10<<20, // default: 10 MiB align, 10_000, - int64(g.opts.ChunkSize), + int64(g.cfg.ChunkSize), ) if rem := partSize % align; rem != 0 { @@ -105,7 +105,7 @@ func (g googleClient) save(name string, data io.Reader, options ...storage.Optio } ctx := context.Background() - w := g.bucketHandle.Object(path.Join(g.opts.Prefix, name)).NewWriter(ctx) + w := g.bucketHandle.Object(path.Join(g.cfg.Prefix, name)).NewWriter(ctx) w.ChunkSize = int(partSize) if g.log != nil && opts.UseLogger { w.ProgressFunc = func(written int64) { @@ -133,7 +133,7 @@ func (g googleClient) save(name string, data io.Reader, options ...storage.Optio func (g googleClient) fileStat(name string) (storage.FileInfo, error) { ctx := context.Background() - attrs, err := g.bucketHandle.Object(path.Join(g.opts.Prefix, name)).Attrs(ctx) + attrs, err := g.bucketHandle.Object(path.Join(g.cfg.Prefix, name)).Attrs(ctx) if err != nil { if errors.Is(err, storagegcs.ErrObjectNotExist) { return storage.FileInfo{}, storage.ErrNotExist @@ -196,7 +196,7 @@ func (g googleClient) list(prefix, suffix string) ([]storage.FileInfo, error) { func (g googleClient) delete(name string) error { ctx := context.Background() - err := g.bucketHandle.Object(path.Join(g.opts.Prefix, name)).Delete(ctx) + err := g.bucketHandle.Object(path.Join(g.cfg.Prefix, name)).Delete(ctx) if err != nil { if errors.Is(err, storagegcs.ErrObjectNotExist) { return storage.ErrNotExist @@ -210,8 +210,8 @@ func (g googleClient) delete(name string) error { func (g googleClient) copy(src, dst string) error { ctx := context.Background() - srcObj := g.bucketHandle.Object(path.Join(g.opts.Prefix, src)) - dstObj := g.bucketHandle.Object(path.Join(g.opts.Prefix, dst)) + srcObj := g.bucketHandle.Object(path.Join(g.cfg.Prefix, src)) + dstObj := g.bucketHandle.Object(path.Join(g.cfg.Prefix, dst)) _, err := g.fileStat(src) if err == storage.ErrNotExist { @@ -226,7 +226,7 @@ func (g googleClient) getPartialObject(name string, buf *storage.Arena, start, l ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) defer cancel() - obj := g.bucketHandle.Object(path.Join(g.opts.Prefix, name)) + obj := g.bucketHandle.Object(path.Join(g.cfg.Prefix, name)) reader, err := obj.NewRangeReader(ctx, start, length) if err != nil { if errors.Is(err, storagegcs.ErrObjectNotExist) || isRangeNotSatisfiable(err) { diff --git a/pbm/storage/gcs/hmac_client.go b/pbm/storage/gcs/hmac_client.go index fae5c1ce6..9a3c1bc5d 100644 --- a/pbm/storage/gcs/hmac_client.go +++ b/pbm/storage/gcs/hmac_client.go @@ -21,26 +21,26 @@ import ( type hmacClient struct { client *minio.Client - opts *Config + cfg *Config log log.LogEvent } -func newHMACClient(opts *Config, l log.LogEvent) (*hmacClient, error) { - if opts.Credentials.HMACAccessKey == "" || opts.Credentials.HMACSecret == "" { +func newHMACClient(cfg *Config, l log.LogEvent) (*hmacClient, error) { + if cfg.Credentials.HMACAccessKey == "" || cfg.Credentials.HMACSecret == "" { return nil, errors.New("HMACAccessKey and HMACSecret are required for HMAC GCS credentials") } - if opts.Retryer != nil { - if opts.Retryer.BackoffInitial > 0 { - minio.DefaultRetryUnit = opts.Retryer.BackoffInitial + if cfg.Retryer != nil { + if cfg.Retryer.BackoffInitial > 0 { + minio.DefaultRetryUnit = cfg.Retryer.BackoffInitial } - if opts.Retryer.BackoffMax > 0 { - minio.DefaultRetryCap = opts.Retryer.BackoffMax + if cfg.Retryer.BackoffMax > 0 { + minio.DefaultRetryCap = cfg.Retryer.BackoffMax } } minioClient, err := minio.New(gcsEndpointURL, &minio.Options{ - Creds: credentials.NewStaticV2(opts.Credentials.HMACAccessKey, opts.Credentials.HMACSecret, ""), + Creds: credentials.NewStaticV2(cfg.Credentials.HMACAccessKey, cfg.Credentials.HMACSecret, ""), }) if err != nil { return nil, errors.Wrap(err, "create minio client for GCS HMAC") @@ -48,7 +48,7 @@ func newHMACClient(opts *Config, l log.LogEvent) (*hmacClient, error) { return &hmacClient{ client: minioClient, - opts: opts, + cfg: cfg, log: l, }, nil } @@ -66,7 +66,7 @@ func (h hmacClient) save(name string, data io.Reader, options ...storage.Option) 10<<20, // default: 10 MiB 5<<20, // min GCS XML part size 10_000, - int64(h.opts.ChunkSize), + int64(h.cfg.ChunkSize), ) if h.log != nil && opts.UseLogger { @@ -85,8 +85,8 @@ func (h hmacClient) save(name string, data io.Reader, options ...storage.Option) } putInfo, err := h.client.PutObject( context.Background(), - h.opts.Bucket, - path.Join(h.opts.Prefix, name), + h.cfg.Bucket, + path.Join(h.cfg.Prefix, name), dataWithCRC, -1, putOpts, @@ -105,9 +105,9 @@ func (h hmacClient) save(name string, data io.Reader, options ...storage.Option) } func (h hmacClient) fileStat(name string) (storage.FileInfo, error) { - objectName := path.Join(h.opts.Prefix, name) + objectName := path.Join(h.cfg.Prefix, name) - object, err := h.client.StatObject(context.Background(), h.opts.Bucket, objectName, minio.StatObjectOptions{}) + object, err := h.client.StatObject(context.Background(), h.cfg.Bucket, objectName, minio.StatObjectOptions{}) if err != nil { respErr := minio.ToErrorResponse(err) if respErr.Code == "NoSuchKey" || respErr.Code == "NotFound" { @@ -134,7 +134,7 @@ func (h hmacClient) list(prefix, suffix string) ([]storage.FileInfo, error) { var files []storage.FileInfo - for obj := range h.client.ListObjects(ctx, h.opts.Bucket, minio.ListObjectsOptions{ + for obj := range h.client.ListObjects(ctx, h.cfg.Bucket, minio.ListObjectsOptions{ Prefix: prefix, Recursive: true, }) { @@ -162,9 +162,9 @@ func (h hmacClient) list(prefix, suffix string) ([]storage.FileInfo, error) { func (h hmacClient) delete(name string) error { ctx := context.Background() - objectName := path.Join(h.opts.Prefix, name) + objectName := path.Join(h.cfg.Prefix, name) - err := h.client.RemoveObject(ctx, h.opts.Bucket, objectName, minio.RemoveObjectOptions{}) + err := h.client.RemoveObject(ctx, h.cfg.Bucket, objectName, minio.RemoveObjectOptions{}) if err != nil { respErr := minio.ToErrorResponse(err) if respErr.Code == "NoSuchKey" || respErr.Code == "NotFound" { @@ -182,12 +182,12 @@ func (h hmacClient) copy(src, dst string) error { _, err := h.client.CopyObject(ctx, minio.CopyDestOptions{ - Bucket: h.opts.Bucket, - Object: path.Join(h.opts.Prefix, dst), + Bucket: h.cfg.Bucket, + Object: path.Join(h.cfg.Prefix, dst), }, minio.CopySrcOptions{ - Bucket: h.opts.Bucket, - Object: path.Join(h.opts.Prefix, src), + Bucket: h.cfg.Bucket, + Object: path.Join(h.cfg.Prefix, src), }, ) @@ -198,7 +198,7 @@ func (h hmacClient) getPartialObject(name string, buf *storage.Arena, start, len ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) defer cancel() - objectName := path.Join(h.opts.Prefix, name) + objectName := path.Join(h.cfg.Prefix, name) opts := minio.GetObjectOptions{} @@ -207,7 +207,7 @@ func (h hmacClient) getPartialObject(name string, buf *storage.Arena, start, len return nil, errors.Wrap(err, "failed to set range on GetObjectOptions") } - object, err := h.client.GetObject(ctx, h.opts.Bucket, objectName, opts) + object, err := h.client.GetObject(ctx, h.cfg.Bucket, objectName, opts) if err != nil { respErr := minio.ToErrorResponse(err) if respErr.Code == "NoSuchKey" || respErr.Code == "InvalidRange" { From 19484b7934466fbe91d38759c7e4d7bc806a7448 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 24 Oct 2025 11:35:07 +0200 Subject: [PATCH 79/95] Expose chunkRetryDeadline in GCS config --- pbm/storage/gcs/config.go | 8 +++++++- pbm/storage/gcs/google_client.go | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/pbm/storage/gcs/config.go b/pbm/storage/gcs/config.go index e164a6b6b..4dfe8f8bb 100644 --- a/pbm/storage/gcs/config.go +++ b/pbm/storage/gcs/config.go @@ -5,6 +5,7 @@ import ( "time" ) +//nolint:lll type Config struct { Bucket string `bson:"bucket" json:"bucket" yaml:"bucket"` Prefix string `bson:"prefix" json:"prefix" yaml:"prefix"` @@ -45,7 +46,12 @@ type Retryer struct { // MaxAttempts configures the maximum number of tries. // E.g. if you it's set to 5, op will be attempted up to 5 times total (initial call + 4 retries). // https://pkg.go.dev/cloud.google.com/go/storage#WithMaxAttempts - MaxAttempts int `bson:"maxAttempts" json:"maxAttempts" yaml:"maxAttempts"` + MaxAttempts int `bson:"maxAttempts,omitempty" json:"maxAttempts,omitempty" yaml:"maxAttempts,omitempty"` + + // ChunkRetryDeadline sets a per-chunk retry deadline for multi-chunk + // resumable uploads. + // https://pkg.go.dev/cloud.google.com/go/storage#Writer + ChunkRetryDeadline time.Duration `bson:"chunkRetryDeadline,omitempty" json:"chunkRetryDeadline,omitempty" yaml:"chunkRetryDeadline,omitempty"` } func (cfg *Config) Clone() *Config { diff --git a/pbm/storage/gcs/google_client.go b/pbm/storage/gcs/google_client.go index fbcd436c8..de3fb111a 100644 --- a/pbm/storage/gcs/google_client.go +++ b/pbm/storage/gcs/google_client.go @@ -107,6 +107,7 @@ func (g googleClient) save(name string, data io.Reader, options ...storage.Optio ctx := context.Background() w := g.bucketHandle.Object(path.Join(g.cfg.Prefix, name)).NewWriter(ctx) w.ChunkSize = int(partSize) + w.ChunkRetryDeadline = g.cfg.Retryer.ChunkRetryDeadline if g.log != nil && opts.UseLogger { w.ProgressFunc = func(written int64) { if opts.Size > 0 { From d98685ff18f604d8fbe18b2ba72ff19c8278e3f0 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 24 Oct 2025 11:52:59 +0200 Subject: [PATCH 80/95] Add tests for new GCS params Refactor to table-driven tests in addition. --- pbm/config/config_test.go | 165 ++++++++++++++++++++------------------ 1 file changed, 86 insertions(+), 79 deletions(-) diff --git a/pbm/config/config_test.go b/pbm/config/config_test.go index 5edd11122..d8e2de5ca 100644 --- a/pbm/config/config_test.go +++ b/pbm/config/config_test.go @@ -273,6 +273,7 @@ func TestConfig(t *testing.T) { ctx := context.Background() t.Run("gcs config", func(t *testing.T) { + wantCfg := &Config{ Storage: StorageConf{ Type: storage.GCS, @@ -286,98 +287,104 @@ func TestConfig(t *testing.T) { ChunkSize: 100, MaxObjSizeGB: floatPtr(1.1), Retryer: &gcs.Retryer{ - BackoffInitial: 11 * time.Minute, - BackoffMax: 111 * time.Minute, - BackoffMultiplier: 11.1, + BackoffInitial: 11 * time.Minute, + BackoffMax: 111 * time.Minute, + BackoffMultiplier: 11.1, + MaxAttempts: 1, + ChunkRetryDeadline: 11 * time.Millisecond, }, }, }, } - err := SetConfig(ctx, connClient, &Config{Storage: StorageConf{Type: storage.GCS}}) - if err != nil { - t.Fatal("set config:", err) - } - - err = SetConfigVar(ctx, connClient, - "storage.gcs.bucket", - wantCfg.Storage.GCS.Bucket, - ) - if err != nil { - t.Fatal("set config var", err) - } - - err = SetConfigVar(ctx, connClient, - "storage.gcs.prefix", - wantCfg.Storage.GCS.Prefix, - ) - if err != nil { - t.Fatal("set config var", err) - } - err = SetConfigVar(ctx, connClient, - "storage.gcs.credentials.clientEmail", - wantCfg.Storage.GCS.Credentials.ClientEmail, - ) - if err != nil { - t.Fatal("set config var", err) - } - - err = SetConfigVar(ctx, connClient, - "storage.gcs.credentials.privateKey", - wantCfg.Storage.GCS.Credentials.PrivateKey, - ) - if err != nil { - t.Fatal("set config var", err) - } - - err = SetConfigVar(ctx, connClient, - "storage.gcs.chunkSize", - fmt.Sprintf("%d", wantCfg.Storage.GCS.ChunkSize), - ) - if err != nil { - t.Fatal("set config var", err) - } - - err = SetConfigVar(ctx, connClient, - "storage.gcs.maxObjSizeGB", - fmt.Sprintf("%f", *wantCfg.Storage.GCS.MaxObjSizeGB), - ) - if err != nil { - t.Fatal("set config var", err) + var testCases = []struct { + desc string + param string + val string + }{ + { + desc: "bucket", + param: "storage.gcs.bucket", + val: wantCfg.Storage.GCS.Bucket, + }, + { + desc: "prefix", + param: "storage.gcs.prefix", + val: wantCfg.Storage.GCS.Prefix, + }, + { + desc: "credentials.clientEmail", + param: "storage.gcs.credentials.clientEmail", + val: wantCfg.Storage.GCS.Credentials.ClientEmail, + }, + { + desc: "credentials.privateKey", + param: "storage.gcs.credentials.privateKey", + val: wantCfg.Storage.GCS.Credentials.PrivateKey, + }, + { + desc: "chunkSize", + param: "storage.gcs.chunkSize", + val: fmt.Sprintf("%d", wantCfg.Storage.GCS.ChunkSize), + }, + { + desc: "maxObjSizeGB", + param: "storage.gcs.maxObjSizeGB", + val: fmt.Sprintf("%f", *wantCfg.Storage.GCS.MaxObjSizeGB), + }, + { + desc: "retryer.backoffInitial", + param: "storage.gcs.retryer.backoffInitial", + val: wantCfg.Storage.GCS.Retryer.BackoffInitial.String(), + }, + { + desc: "retryer.backoffMax", + param: "storage.gcs.retryer.backoffMax", + val: wantCfg.Storage.GCS.Retryer.BackoffMax.String(), + }, + { + desc: "retryer.backoffMultiplier", + param: "storage.gcs.retryer.backoffMultiplier", + val: fmt.Sprintf("%f", wantCfg.Storage.GCS.Retryer.BackoffMultiplier), + }, + { + desc: "retryer.maxAttempts", + param: "storage.gcs.retryer.maxAttempts", + val: fmt.Sprintf("%d", wantCfg.Storage.GCS.Retryer.MaxAttempts), + }, + { + desc: "retryer.chunkRetryDeadline", + param: "storage.gcs.retryer.chunkRetryDeadline", + val: wantCfg.Storage.GCS.Retryer.ChunkRetryDeadline.String(), + }, } - err = SetConfigVar(ctx, connClient, - "storage.gcs.retryer.backoffInitial", - wantCfg.Storage.GCS.Retryer.BackoffInitial.String(), - ) + err := SetConfig(ctx, connClient, &Config{Storage: StorageConf{Type: storage.GCS}}) if err != nil { - t.Fatal("set config var", err) + t.Fatalf("setup: initial SetConfig failed: %v", err) } - err = SetConfigVar(ctx, connClient, - "storage.gcs.retryer.backoffMax", - wantCfg.Storage.GCS.Retryer.BackoffMax.String(), - ) - if err != nil { - t.Fatal("set config var", err) - } - err = SetConfigVar(ctx, connClient, - "storage.gcs.retryer.backoffMultiplier", - fmt.Sprintf("%f", wantCfg.Storage.GCS.Retryer.BackoffMultiplier), - ) - if err != nil { - t.Fatal("set config var", err) + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + err := SetConfigVar(ctx, connClient, tc.param, tc.val) + if err != nil { + t.Fatalf("SetConfigVar failed for %s with value %s: %v", + tc.param, tc.val, err) + } + }) } - gotCfg, err := GetConfig(ctx, connClient) - if err != nil { - t.Fatal("get config:", err) - } + t.Run("check final config", func(t *testing.T) { + gotCfg, err := GetConfig(ctx, connClient) + if err != nil { + t.Fatalf("GetConfig failed: %v", err) + } - if !gotCfg.Storage.Equal(&wantCfg.Storage) { - t.Fatalf("wrong config after using set config var, diff=%s", - cmp.Diff(*wantCfg.Storage.GCS, *gotCfg.Storage.GCS)) - } + if !gotCfg.Storage.Equal(&wantCfg.Storage) { + t.Fatalf("Wrong config after using SetConfigVar.\n-want: %+v\n-got: %+v\n\nDiff:\n%s", + wantCfg.Storage.GCS, gotCfg.Storage.GCS, cmp.Diff(*wantCfg.Storage.GCS, *gotCfg.Storage.GCS)) + } + }) }) } From 0c3b1f1c9675af9d2808f840f7418800cbf20854 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 24 Oct 2025 12:50:01 +0200 Subject: [PATCH 81/95] Set dafalut for GCS storage config params --- pbm/config/config.go | 2 +- pbm/config/config_test.go | 5 +- pbm/storage/gcs/config.go | 40 +++++++++++++ pbm/storage/gcs/config_test.go | 97 ++++++++++++++++++++++++++++++++ pbm/storage/gcs/gcs.go | 17 +++++- pbm/storage/gcs/gcs_test.go | 62 -------------------- pbm/storage/gcs/google_client.go | 2 +- 7 files changed, 158 insertions(+), 67 deletions(-) create mode 100644 pbm/storage/gcs/config_test.go diff --git a/pbm/config/config.go b/pbm/config/config.go index 0256bd0dc..81a404fcb 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -346,7 +346,7 @@ func (s *StorageConf) Cast() error { case storage.Azure: return s.Azure.Cast() case storage.GCS: - return nil + return s.GCS.Cast() case storage.Blackhole: // noop return nil } diff --git a/pbm/config/config_test.go b/pbm/config/config_test.go index d8e2de5ca..bf1f8ac96 100644 --- a/pbm/config/config_test.go +++ b/pbm/config/config_test.go @@ -359,7 +359,10 @@ func TestConfig(t *testing.T) { }, } - err := SetConfig(ctx, connClient, &Config{Storage: StorageConf{Type: storage.GCS}}) + emptyCfg := &Config{ + Storage: StorageConf{Type: storage.GCS, GCS: &gcs.Config{}}, + } + err := SetConfig(ctx, connClient, emptyCfg) if err != nil { t.Fatalf("setup: initial SetConfig failed: %v", err) } diff --git a/pbm/storage/gcs/config.go b/pbm/storage/gcs/config.go index 4dfe8f8bb..45c0193a7 100644 --- a/pbm/storage/gcs/config.go +++ b/pbm/storage/gcs/config.go @@ -3,6 +3,8 @@ package gcs import ( "reflect" "time" + + "github.com/percona/percona-backup-mongodb/pbm/errors" ) //nolint:lll @@ -115,6 +117,44 @@ func (cfg *Config) IsSameStorage(other *Config) bool { return true } +func (cfg *Config) Cast() error { + if cfg == nil { + return errors.New("missing GCS configuration with GCS storage type") + } + + if cfg.ChunkSize == 0 { + cfg.ChunkSize = defaultChunkSize + } + + if cfg.Retryer == nil { + cfg.Retryer = &Retryer{ + MaxAttempts: defaultMaxAttempts, + BackoffInitial: defaultBackoffInitial, + BackoffMax: defaultBackoffMax, + BackoffMultiplier: defaultBackoffMultiplier, + ChunkRetryDeadline: defaultChunkRetryDeadline, + } + } else { + if cfg.Retryer.MaxAttempts == 0 { + cfg.Retryer.MaxAttempts = defaultMaxAttempts + } + if cfg.Retryer.BackoffInitial == 0 { + cfg.Retryer.BackoffInitial = defaultBackoffInitial + } + if cfg.Retryer.BackoffMax == 0 { + cfg.Retryer.BackoffMax = defaultBackoffMax + } + if cfg.Retryer.BackoffMultiplier == 0 { + cfg.Retryer.BackoffMultiplier = defaultBackoffMultiplier + } + if cfg.Retryer.ChunkRetryDeadline == 0 { + cfg.Retryer.ChunkRetryDeadline = defaultChunkRetryDeadline + } + } + + return nil +} + func (cfg *Config) GetMaxObjSizeGB() float64 { if cfg.MaxObjSizeGB != nil && *cfg.MaxObjSizeGB > 0 { return *cfg.MaxObjSizeGB diff --git a/pbm/storage/gcs/config_test.go b/pbm/storage/gcs/config_test.go new file mode 100644 index 000000000..c9bf019bb --- /dev/null +++ b/pbm/storage/gcs/config_test.go @@ -0,0 +1,97 @@ +package gcs + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestCast(t *testing.T) { + var c *Config + err := c.Cast() + if err == nil { + t.Fatal("sigsegv should have happened instead") + } + + c = &Config{} + err = c.Cast() + if err != nil { + t.Fatalf("got error during Cast: %v", err) + } + want := &Config{ + ChunkSize: defaultChunkSize, + Retryer: &Retryer{ + MaxAttempts: defaultMaxAttempts, + BackoffInitial: defaultBackoffInitial, + BackoffMax: defaultBackoffMax, + BackoffMultiplier: defaultBackoffMultiplier, + ChunkRetryDeadline: defaultChunkRetryDeadline, + }, + } + + if !c.Equal(want) { + t.Fatalf("wrong config after Cast, diff=%s", cmp.Diff(*c, *want)) + } +} + +func TestConfig(t *testing.T) { + opts := &Config{ + Bucket: "bucketName", + Prefix: "prefix", + Credentials: Credentials{ + ClientEmail: "email@example.com", + PrivateKey: "-----BEGIN PRIVATE KEY-----\nKey\n-----END PRIVATE KEY-----\n", + }, + } + + t.Run("Clone", func(t *testing.T) { + clone := opts.Clone() + if clone == opts { + t.Error("expected clone to be a different pointer") + } + + if !opts.Equal(clone) { + t.Error("expected clone to be equal") + } + + opts.Bucket = "updatedName" + if opts.Equal(clone) { + t.Error("expected clone to be unchanged when updating original") + } + }) + + t.Run("Equal fails", func(t *testing.T) { + if opts.Equal(nil) { + t.Error("expected not to be equal other nil") + } + + clone := opts.Clone() + clone.Prefix = "updatedPrefix" + if opts.Equal(clone) { + t.Error("expected not to be equal when updating prefix") + } + + clone = opts.Clone() + clone.Credentials.ClientEmail = "updated@example.com" + if opts.Equal(clone) { + t.Error("expected not to be equal when updating credentials") + } + }) +} + +func TestEmptyCredentialsFail(t *testing.T) { + opts := &Config{ + Bucket: "bucketName", + } + + _, err := New(opts, "node", nil) + + if err == nil { + t.Fatalf("expected error when not specifying credentials") + } + + if !strings.Contains(err.Error(), "required for GCS credentials") { + t.Errorf("expected required credentials, got %s", err) + } +} diff --git a/pbm/storage/gcs/gcs.go b/pbm/storage/gcs/gcs.go index ea2deb3b9..d3f053fa5 100644 --- a/pbm/storage/gcs/gcs.go +++ b/pbm/storage/gcs/gcs.go @@ -4,6 +4,7 @@ import ( "io" "path" "strings" + "time" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/log" @@ -11,8 +12,16 @@ import ( ) const ( - gcsEndpointURL = "storage.googleapis.com" - defaultMaxObjSizeGB = 5018 // 4.9 TB + gcsEndpointURL = "storage.googleapis.com" + + defaultChunkSize = 10 * 1024 * 1024 // 10MiB + defaultMaxObjSizeGB = 5018 // 4.9 TB + + defaultMaxAttempts = 5 + defaultBackoffInitial = time.Second + defaultBackoffMax = 30 * time.Second + defaultBackoffMultiplier = 2 + defaultChunkRetryDeadline = 32 * time.Second ) type ServiceAccountCredentials struct { @@ -44,6 +53,10 @@ type GCS struct { } func New(cfg *Config, node string, l log.LogEvent) (storage.Storage, error) { + if err := cfg.Cast(); err != nil { + return nil, errors.Wrap(err, "set defaults") + } + g := &GCS{ cfg: cfg, log: l, diff --git a/pbm/storage/gcs/gcs_test.go b/pbm/storage/gcs/gcs_test.go index 6434e74d2..608b5f3fe 100644 --- a/pbm/storage/gcs/gcs_test.go +++ b/pbm/storage/gcs/gcs_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "os" - "strings" "testing" gcs "cloud.google.com/go/storage" @@ -122,64 +121,3 @@ func TestGCS(t *testing.T) { } }) } - -func TestConfig(t *testing.T) { - opts := &Config{ - Bucket: "bucketName", - Prefix: "prefix", - Credentials: Credentials{ - ClientEmail: "email@example.com", - PrivateKey: "-----BEGIN PRIVATE KEY-----\nKey\n-----END PRIVATE KEY-----\n", - }, - } - - t.Run("Clone", func(t *testing.T) { - clone := opts.Clone() - if clone == opts { - t.Error("expected clone to be a different pointer") - } - - if !opts.Equal(clone) { - t.Error("expected clone to be equal") - } - - opts.Bucket = "updatedName" - if opts.Equal(clone) { - t.Error("expected clone to be unchanged when updating original") - } - }) - - t.Run("Equal fails", func(t *testing.T) { - if opts.Equal(nil) { - t.Error("expected not to be equal other nil") - } - - clone := opts.Clone() - clone.Prefix = "updatedPrefix" - if opts.Equal(clone) { - t.Error("expected not to be equal when updating prefix") - } - - clone = opts.Clone() - clone.Credentials.ClientEmail = "updated@example.com" - if opts.Equal(clone) { - t.Error("expected not to be equal when updating credentials") - } - }) -} - -func TestEmptyCredentialsFail(t *testing.T) { - opts := &Config{ - Bucket: "bucketName", - } - - _, err := New(opts, "node", nil) - - if err == nil { - t.Fatalf("expected error when not specifying credentials") - } - - if !strings.Contains(err.Error(), "required for GCS credentials") { - t.Errorf("expected required credentials, got %s", err) - } -} diff --git a/pbm/storage/gcs/google_client.go b/pbm/storage/gcs/google_client.go index de3fb111a..8214b7220 100644 --- a/pbm/storage/gcs/google_client.go +++ b/pbm/storage/gcs/google_client.go @@ -87,7 +87,7 @@ func (g googleClient) save(name string, data io.Reader, options ...storage.Optio partSize := storage.ComputePartSize( opts.Size, - 10<<20, // default: 10 MiB + defaultChunkSize, align, 10_000, int64(g.cfg.ChunkSize), From c43e46499fa9f1bd12bfc21225e3a344ca617d83 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 24 Oct 2025 14:20:32 +0200 Subject: [PATCH 82/95] Apply changes after review --- pbm/config/config_test.go | 26 +++++--------------------- pbm/storage/gcs/config.go | 1 + 2 files changed, 6 insertions(+), 21 deletions(-) diff --git a/pbm/config/config_test.go b/pbm/config/config_test.go index bf1f8ac96..a498b2870 100644 --- a/pbm/config/config_test.go +++ b/pbm/config/config_test.go @@ -273,7 +273,6 @@ func TestConfig(t *testing.T) { ctx := context.Background() t.Run("gcs config", func(t *testing.T) { - wantCfg := &Config{ Storage: StorageConf{ Type: storage.GCS, @@ -297,7 +296,7 @@ func TestConfig(t *testing.T) { }, } - var testCases = []struct { + testCases := []struct { desc string param string val string @@ -367,12 +366,12 @@ func TestConfig(t *testing.T) { t.Fatalf("setup: initial SetConfig failed: %v", err) } - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - err := SetConfigVar(ctx, connClient, tc.param, tc.val) + for _, tt := range testCases { + t.Run(tt.desc, func(t *testing.T) { + err := SetConfigVar(ctx, connClient, tt.param, tt.val) if err != nil { t.Fatalf("SetConfigVar failed for %s with value %s: %v", - tc.param, tc.val, err) + tt.param, tt.val, err) } }) } @@ -391,21 +390,6 @@ func TestConfig(t *testing.T) { }) } -func Test(t *testing.T) { - testCases := []struct { - desc string - }{ - { - desc: "", - }, - } - for _, tC := range testCases { - t.Run(tC.desc, func(t *testing.T) { - - }) - } -} - func boolPtr(b bool) *bool { return &b } diff --git a/pbm/storage/gcs/config.go b/pbm/storage/gcs/config.go index 45c0193a7..e7e0f9453 100644 --- a/pbm/storage/gcs/config.go +++ b/pbm/storage/gcs/config.go @@ -31,6 +31,7 @@ type Credentials struct { HMACSecret string `bson:"hmacSecret" json:"hmacSecret,omitempty" yaml:"hmacSecret,omitempty"` } +//nolint:lll type Retryer struct { // BackoffInitial is the initial value of the retry period. // https://pkg.go.dev/github.com/googleapis/gax-go/v2@v2.12.3#Backoff.Initial From 8fd3f4dba7c366984c31b6db2ea4c0bd0b0d8e74 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 24 Oct 2025 14:23:09 +0200 Subject: [PATCH 83/95] Update GCS config reference example --- packaging/conf/pbm-conf-reference.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/packaging/conf/pbm-conf-reference.yml b/packaging/conf/pbm-conf-reference.yml index 70a2bbc46..7e9dd0bea 100644 --- a/packaging/conf/pbm-conf-reference.yml +++ b/packaging/conf/pbm-conf-reference.yml @@ -151,9 +151,11 @@ ## Retry upload configuration options. # retryer: -# backoffInitial: 1 -# backoffMax: 30 +# maxAttempts: 5 +# backoffInitial: 1s +# backoffMax: 30s # backoffMultiplier: 2 +# chunkRetryDeadline: 32s # ## The maximum object size that will be stored on the storage # maxObjSizeGB: 5018 From 4b81f7f2d8f3af71e797eb0ae457bd11aae71ce8 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 27 Oct 2025 11:35:01 +0100 Subject: [PATCH 84/95] Extend GCS retryable error list --- pbm/storage/gcs/google_client.go | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/pbm/storage/gcs/google_client.go b/pbm/storage/gcs/google_client.go index 8214b7220..87661b337 100644 --- a/pbm/storage/gcs/google_client.go +++ b/pbm/storage/gcs/google_client.go @@ -54,10 +54,8 @@ func newGoogleClient(cfg *Config, l log.LogEvent) (*googleClient, error) { return nil, errors.Wrap(err, "new GCS client") } - bh := cli.Bucket(cfg.Bucket) - - if cfg.Retryer != nil { - bh = bh.Retryer( + bh := cli.Bucket(cfg.Bucket). + Retryer( storagegcs.WithBackoff(gax.Backoff{ Initial: cfg.Retryer.BackoffInitial, Max: cfg.Retryer.BackoffMax, @@ -65,8 +63,8 @@ func newGoogleClient(cfg *Config, l log.LogEvent) (*googleClient, error) { }), storagegcs.WithMaxAttempts(cfg.Retryer.MaxAttempts), storagegcs.WithPolicy(storagegcs.RetryAlways), + storagegcs.WithErrorFunc(shouldRetryExtended), ) - } return &googleClient{ bucketHandle: bh, @@ -75,6 +73,23 @@ func newGoogleClient(cfg *Config, l log.LogEvent) (*googleClient, error) { }, nil } +// shouldRetryExtended extends default shouldRetry with mainly +// `client connection lost` error from std library's http package. +func shouldRetryExtended(err error) bool { + if err == nil { + return false + } + if storagegcs.ShouldRetry(err) { + return true + } + if strings.Contains(err.Error(), "http2: client connection lost") || + strings.Contains(err.Error(), "connect: network is unreachable") { + return true + } + + return false +} + func (g googleClient) save(name string, data io.Reader, options ...storage.Option) error { opts := storage.GetDefaultOpts() for _, opt := range options { From 550a23ce93e7282eecb8237807f623992ddb6d2a Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 27 Oct 2025 16:06:34 +0100 Subject: [PATCH 85/95] Add OSS storage type info for `pbm status` cmd --- pbm/config/config.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pbm/config/config.go b/pbm/config/config.go index 81a404fcb..30e531a0f 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -364,6 +364,8 @@ func (s *StorageConf) Typ() string { return "Azure" case storage.GCS: return "GCS" + case storage.OSS: + return "OSS" case storage.Filesystem: return "FS" case storage.Blackhole: @@ -418,6 +420,19 @@ func (s *StorageConf) Path() string { if s.GCS.Prefix != "" { path += "/" + s.GCS.Prefix } + case storage.OSS: + path = s.OSS.EndpointURL + if path == "" { + path = "oss://" + s.OSS.Bucket + } else { + if !strings.Contains(path, "://") { + path = "oss://" + path + } + path += "/" + s.OSS.Bucket + } + if s.OSS.Prefix != "" { + path += "/" + s.OSS.Prefix + } case storage.Filesystem: path = s.Filesystem.Path } @@ -433,6 +448,8 @@ func (s *StorageConf) Region() string { region = s.S3.Region case storage.Minio: region = s.Minio.Region + case storage.OSS: + region = s.OSS.Region } return region From 331b8bf91bfd8e395a6ec1ce7f4f9a646fd26948 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 28 Oct 2025 10:00:10 +0100 Subject: [PATCH 86/95] Add IsSameStorage method for OSS storage That fixes: - taking the increments during incremental backup: PBM-1650, - stapping PITR during logical backups: PBM-1644. --- pbm/config/config.go | 4 +++- pbm/storage/oss/client.go | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/pbm/config/config.go b/pbm/config/config.go index 30e531a0f..eea8e739d 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -324,6 +324,8 @@ func (s *StorageConf) IsSameStorage(other *StorageConf) bool { return s.Azure.IsSameStorage(other.Azure) case storage.GCS: return s.GCS.IsSameStorage(other.GCS) + case storage.OSS: + return s.OSS.IsSameStorage(other.OSS) case storage.Filesystem: return s.Filesystem.IsSameStorage(other.Filesystem) case storage.Blackhole: @@ -365,7 +367,7 @@ func (s *StorageConf) Typ() string { case storage.GCS: return "GCS" case storage.OSS: - return "OSS" + return "" case storage.Filesystem: return "FS" case storage.Blackhole: diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index aa29ed5aa..1ed1f5760 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -63,6 +63,24 @@ type Credentials struct { SessionName string `bson:"sessionName,omitempty" json:"sessionName,omitempty" yaml:"sessionName,omitempty"` } +// IsSameStorage identifies the same instance of the S3 storage. +func (cfg *Config) IsSameStorage(other *Config) bool { + if cfg == nil || other == nil { + return cfg == other + } + + if cfg.Region != other.Region { + return false + } + if cfg.Bucket != other.Bucket { + return false + } + if cfg.Prefix != other.Prefix { + return false + } + return true +} + func (cfg *Config) Cast() error { if cfg == nil { return errors.New("missing oss configuration with oss storage type") From 891251c6a1a4ba27523b58382e92ab27d2eac4e1 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 28 Oct 2025 10:25:42 +0100 Subject: [PATCH 87/95] Fix storage type reporting for OSS storage --- pbm/config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbm/config/config.go b/pbm/config/config.go index eea8e739d..9b2655811 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -367,7 +367,7 @@ func (s *StorageConf) Typ() string { case storage.GCS: return "GCS" case storage.OSS: - return "" + return "OSS" case storage.Filesystem: return "FS" case storage.Blackhole: From 5f169f79d9830682693f6acb744673f7fdb8868b Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 28 Oct 2025 10:39:16 +0100 Subject: [PATCH 88/95] Update doc for IsSameStorage for OSS Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- pbm/storage/oss/client.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbm/storage/oss/client.go b/pbm/storage/oss/client.go index 1ed1f5760..09968c7b8 100644 --- a/pbm/storage/oss/client.go +++ b/pbm/storage/oss/client.go @@ -63,7 +63,7 @@ type Credentials struct { SessionName string `bson:"sessionName,omitempty" json:"sessionName,omitempty" yaml:"sessionName,omitempty"` } -// IsSameStorage identifies the same instance of the S3 storage. +// IsSameStorage identifies the same instance of the OSS storage. func (cfg *Config) IsSameStorage(other *Config) bool { if cfg == nil || other == nil { return cfg == other From 715b1596417ef2e8a4bb83dea1c3c8d96675f018 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 28 Oct 2025 16:03:53 +0100 Subject: [PATCH 89/95] Obfuscate OSS storage roleArn and sessionName ... config parameters when using `config --list` --- pbm/config/config.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pbm/config/config.go b/pbm/config/config.go index 9b2655811..a3c48f5e4 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -183,6 +183,12 @@ func (c *Config) String() string { if c.Storage.OSS.Credentials.SecurityToken != "" { c.Storage.OSS.Credentials.SecurityToken = "***" } + if c.Storage.OSS.Credentials.SessionName != "" { + c.Storage.OSS.Credentials.SessionName = "***" + } + if c.Storage.OSS.Credentials.RoleARN != "" { + c.Storage.OSS.Credentials.RoleARN = "***" + } } b, err := yaml.Marshal(c) From a749501d1991cb376d63599fe8d9465235c8eb66 Mon Sep 17 00:00:00 2001 From: Sandra Romanchenko <53295797+sandraromanchenko@users.noreply.github.com> Date: Wed, 29 Oct 2025 13:50:19 +0200 Subject: [PATCH 90/95] PBM_e2e_tests. Add OSS for tests (#1218) --- e2e-tests/cmd/pbm-test/run.go | 1 + e2e-tests/cmd/pbm-test/run_physical.go | 1 + 2 files changed, 2 insertions(+) diff --git a/e2e-tests/cmd/pbm-test/run.go b/e2e-tests/cmd/pbm-test/run.go index 36013b456..4431c80e2 100644 --- a/e2e-tests/cmd/pbm-test/run.go +++ b/e2e-tests/cmd/pbm-test/run.go @@ -29,6 +29,7 @@ func run(t *sharded.Cluster, typ testTyp) { {"GCS_HMAC", "/etc/pbm/gcs_hmac.yaml"}, {"AWS_MinIO", "/etc/pbm/aws_minio.yaml"}, {"Azure", "/etc/pbm/azure.yaml"}, + {"OSS", "/etc/pbm/oss.yaml"}, {"FS", "/etc/pbm/fs.yaml"}, } diff --git a/e2e-tests/cmd/pbm-test/run_physical.go b/e2e-tests/cmd/pbm-test/run_physical.go index b89a7d820..38f5585ea 100644 --- a/e2e-tests/cmd/pbm-test/run_physical.go +++ b/e2e-tests/cmd/pbm-test/run_physical.go @@ -20,6 +20,7 @@ func runPhysical(t *sharded.Cluster, typ testTyp) { {"GCS_HMAC", "/etc/pbm/gcs_hmac.yaml"}, {"AWS_MinIO", "/etc/pbm/aws_minio.yaml"}, {"Azure", "/etc/pbm/azure.yaml"}, + {"OSS", "/etc/pbm/oss.yaml"}, {"FS", "/etc/pbm/fs.yaml"}, } From 7f3c4be36e3eb53c6a601faf315446564438584b Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 29 Oct 2025 19:07:55 +0100 Subject: [PATCH 91/95] Add debugTrace cfg option for GCS --- pbm/storage/gcs/config.go | 1 + pbm/storage/gcs/google_client.go | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/pbm/storage/gcs/config.go b/pbm/storage/gcs/config.go index e7e0f9453..e1e7eec9e 100644 --- a/pbm/storage/gcs/config.go +++ b/pbm/storage/gcs/config.go @@ -17,6 +17,7 @@ type Config struct { // https://pkg.go.dev/cloud.google.com/go/storage#Writer ChunkSize int `bson:"chunkSize,omitempty" json:"chunkSize,omitempty" yaml:"chunkSize,omitempty"` MaxObjSizeGB *float64 `bson:"maxObjSizeGB,omitempty" json:"maxObjSizeGB,omitempty" yaml:"maxObjSizeGB,omitempty"` + DebugTrace bool `bson:"debugTrace,omitempty" json:"debugTrace,omitempty" yaml:"debugTrace,omitempty"` Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` } diff --git a/pbm/storage/gcs/google_client.go b/pbm/storage/gcs/google_client.go index 87661b337..8e7d4d9bc 100644 --- a/pbm/storage/gcs/google_client.go +++ b/pbm/storage/gcs/google_client.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "path" "strings" "time" @@ -49,7 +50,21 @@ func newGoogleClient(cfg *Config, l log.LogEvent) (*googleClient, error) { return nil, errors.Wrap(err, "marshal GCS credentials") } - cli, err := storagegcs.NewClient(ctx, option.WithCredentialsJSON(creds)) + clOpts := []option.ClientOption{ + option.WithCredentialsJSON(creds), + } + if cfg.DebugTrace { + h := slog.NewTextHandler(l.GetLogger(), &slog.HandlerOptions{ + Level: slog.LevelDebug, + }) + gcsLogger := slog.New(h) + clOpts = append(clOpts, option.WithLogger(gcsLogger)) + } + + cli, err := storagegcs.NewClient( + ctx, + clOpts..., + ) if err != nil { return nil, errors.Wrap(err, "new GCS client") } From a570955bb5cba37b08f8a8ea0ba7c5fc2389b5f4 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 29 Oct 2025 19:10:39 +0100 Subject: [PATCH 92/95] Add storage.gcs.debugTrace help reference --- packaging/conf/pbm-conf-reference.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packaging/conf/pbm-conf-reference.yml b/packaging/conf/pbm-conf-reference.yml index 7e9dd0bea..825808e17 100644 --- a/packaging/conf/pbm-conf-reference.yml +++ b/packaging/conf/pbm-conf-reference.yml @@ -160,6 +160,10 @@ ## The maximum object size that will be stored on the storage # maxObjSizeGB: 5018 +## Enable debug trace of HTTP communication +# debugTrace: true + + #--------------------Filesystem Configuration--------------------------- # type: # filesystem: From 4ecddf7d249caa71ba687fe96390c60ad36bd2f8 Mon Sep 17 00:00:00 2001 From: Sandra Romanchenko <53295797+sandraromanchenko@users.noreply.github.com> Date: Thu, 30 Oct 2025 12:09:52 +0200 Subject: [PATCH 93/95] PBM. Bump version (#1219) --- pbm/version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbm/version/version.go b/pbm/version/version.go index 40ef6f6ac..987fa5aa9 100644 --- a/pbm/version/version.go +++ b/pbm/version/version.go @@ -16,7 +16,7 @@ import ( ) // current PBM version -const version = "2.11.0" +const version = "2.12.0" var ( platform string From df963459b38e591a4059b69fe8b79b7bf6a1707a Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 30 Oct 2025 16:14:36 +0100 Subject: [PATCH 94/95] Revert "PBM-1608: Add config option to enable debug for GCS storage" --- packaging/conf/pbm-conf-reference.yml | 4 ---- pbm/storage/gcs/config.go | 1 - pbm/storage/gcs/google_client.go | 17 +---------------- 3 files changed, 1 insertion(+), 21 deletions(-) diff --git a/packaging/conf/pbm-conf-reference.yml b/packaging/conf/pbm-conf-reference.yml index 825808e17..7e9dd0bea 100644 --- a/packaging/conf/pbm-conf-reference.yml +++ b/packaging/conf/pbm-conf-reference.yml @@ -160,10 +160,6 @@ ## The maximum object size that will be stored on the storage # maxObjSizeGB: 5018 -## Enable debug trace of HTTP communication -# debugTrace: true - - #--------------------Filesystem Configuration--------------------------- # type: # filesystem: diff --git a/pbm/storage/gcs/config.go b/pbm/storage/gcs/config.go index e1e7eec9e..e7e0f9453 100644 --- a/pbm/storage/gcs/config.go +++ b/pbm/storage/gcs/config.go @@ -17,7 +17,6 @@ type Config struct { // https://pkg.go.dev/cloud.google.com/go/storage#Writer ChunkSize int `bson:"chunkSize,omitempty" json:"chunkSize,omitempty" yaml:"chunkSize,omitempty"` MaxObjSizeGB *float64 `bson:"maxObjSizeGB,omitempty" json:"maxObjSizeGB,omitempty" yaml:"maxObjSizeGB,omitempty"` - DebugTrace bool `bson:"debugTrace,omitempty" json:"debugTrace,omitempty" yaml:"debugTrace,omitempty"` Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` } diff --git a/pbm/storage/gcs/google_client.go b/pbm/storage/gcs/google_client.go index 8e7d4d9bc..87661b337 100644 --- a/pbm/storage/gcs/google_client.go +++ b/pbm/storage/gcs/google_client.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "log/slog" "path" "strings" "time" @@ -50,21 +49,7 @@ func newGoogleClient(cfg *Config, l log.LogEvent) (*googleClient, error) { return nil, errors.Wrap(err, "marshal GCS credentials") } - clOpts := []option.ClientOption{ - option.WithCredentialsJSON(creds), - } - if cfg.DebugTrace { - h := slog.NewTextHandler(l.GetLogger(), &slog.HandlerOptions{ - Level: slog.LevelDebug, - }) - gcsLogger := slog.New(h) - clOpts = append(clOpts, option.WithLogger(gcsLogger)) - } - - cli, err := storagegcs.NewClient( - ctx, - clOpts..., - ) + cli, err := storagegcs.NewClient(ctx, option.WithCredentialsJSON(creds)) if err != nil { return nil, errors.Wrap(err, "new GCS client") } From 2f7100a2a1da2bb7074e15d757693999cac7574e Mon Sep 17 00:00:00 2001 From: Sandra Romanchenko <53295797+sandraromanchenko@users.noreply.github.com> Date: Fri, 31 Oct 2025 11:01:49 +0200 Subject: [PATCH 95/95] Update go version due to CVEs (#1223) --- packaging/scripts/mongodb-backup_builder.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/scripts/mongodb-backup_builder.sh b/packaging/scripts/mongodb-backup_builder.sh index c466bdfda..0c0a0b0c0 100644 --- a/packaging/scripts/mongodb-backup_builder.sh +++ b/packaging/scripts/mongodb-backup_builder.sh @@ -146,7 +146,7 @@ install_golang() { elif [ x"$ARCH" = "xaarch64" ]; then GO_ARCH="arm64" fi - GO_VERSION="1.25.1" + GO_VERSION="1.25.3" GO_TAR="go${GO_VERSION}.linux-${GO_ARCH}.tar.gz" GO_URL="https://downloads.percona.com/downloads/packaging/go/${GO_TAR}" DL_PATH="/tmp/${GO_TAR}"