From bf8ddb55fc476df853b01fdbec0a4f8ff3cd50d6 Mon Sep 17 00:00:00 2001 From: Paul Wright Date: Tue, 23 May 2023 13:47:00 +0100 Subject: [PATCH 01/10] Empty-Commit From 28081c9f5166fe45921e4aeaae5ce93132636e0c Mon Sep 17 00:00:00 2001 From: Paul Wright Date: Thu, 25 May 2023 13:26:08 +0100 Subject: [PATCH 02/10] git subrepo clone https://github.com/skupperproject/skewer subrepos/skewer subrepo: subdir: "subrepos/skewer" merged: "abd345a" upstream: origin: "https://github.com/skupperproject/skewer" branch: "main" commit: "abd345a" git-subrepo: version: "0.4.6" origin: "???" commit: "???" --- subrepos/skewer/.github/workflows/main.yaml | 23 + subrepos/skewer/.gitignore | 3 + subrepos/skewer/.gitrepo | 12 + subrepos/skewer/.plano.py | 59 + subrepos/skewer/LICENSE.txt | 202 ++ subrepos/skewer/README.md | 275 +++ .../skewer/config/.github/workflows/main.yaml | 41 + subrepos/skewer/config/.plano.py | 122 ++ subrepos/skewer/plano | 1 + subrepos/skewer/python/plano | 1 + subrepos/skewer/python/skewer/__init__.py | 20 + subrepos/skewer/python/skewer/main.py | 474 +++++ .../skewer/python/skewer/standardsteps.yaml | 230 +++ subrepos/skewer/python/skewer/tests.py | 68 + .../plano/.github/workflows/main.yaml | 48 + subrepos/skewer/subrepos/plano/.gitignore | 6 + subrepos/skewer/subrepos/plano/.gitrepo | 12 + subrepos/skewer/subrepos/plano/LICENSE.txt | 202 ++ subrepos/skewer/subrepos/plano/MANIFEST.in | 1 + subrepos/skewer/subrepos/plano/Makefile | 69 + subrepos/skewer/subrepos/plano/README.md | 78 + subrepos/skewer/subrepos/plano/bin/plano | 31 + subrepos/skewer/subrepos/plano/bin/plano-test | 31 + subrepos/skewer/subrepos/plano/docs/conf.py | 34 + subrepos/skewer/subrepos/plano/docs/index.rst | 4 + subrepos/skewer/subrepos/plano/pyproject.toml | 23 + .../subrepos/plano/src/plano/__init__.py | 24 + .../plano/src/plano/_testproject/.plano.py | 112 ++ .../_testproject/src/chucker/__init__.py | 0 .../plano/_testproject/src/chucker/tests.py | 59 + .../skewer/subrepos/plano/src/plano/_tests.py | 1213 ++++++++++++ .../subrepos/plano/src/plano/command.py | 513 ++++++ .../skewer/subrepos/plano/src/plano/main.py | 1634 +++++++++++++++++ .../skewer/subrepos/plano/src/plano/test.py | 397 ++++ subrepos/skewer/test-example/.gitignore | 1 + subrepos/skewer/test-example/.plano.py | 1 + subrepos/skewer/test-example/README.md | 461 +++++ .../skewer/test-example/images/entities.svg | 3 + .../skewer/test-example/images/sequence.svg | 1 + .../skewer/test-example/images/sequence.txt | 22 + subrepos/skewer/test-example/plano | 1 + subrepos/skewer/test-example/python/skewer | 1 + subrepos/skewer/test-example/skewer.yaml | 113 ++ subrepos/skewer/test-example/subrepos/skewer | 1 + 44 files changed, 6627 insertions(+) create mode 100644 subrepos/skewer/.github/workflows/main.yaml create mode 100644 subrepos/skewer/.gitignore create mode 100644 subrepos/skewer/.gitrepo create mode 100644 subrepos/skewer/.plano.py create mode 100644 subrepos/skewer/LICENSE.txt create mode 100644 subrepos/skewer/README.md create mode 100644 subrepos/skewer/config/.github/workflows/main.yaml create mode 100644 subrepos/skewer/config/.plano.py create mode 120000 subrepos/skewer/plano create mode 120000 subrepos/skewer/python/plano create mode 100644 subrepos/skewer/python/skewer/__init__.py create mode 100644 subrepos/skewer/python/skewer/main.py create mode 100644 subrepos/skewer/python/skewer/standardsteps.yaml create mode 100644 subrepos/skewer/python/skewer/tests.py create mode 100644 subrepos/skewer/subrepos/plano/.github/workflows/main.yaml create mode 100644 subrepos/skewer/subrepos/plano/.gitignore create mode 100644 subrepos/skewer/subrepos/plano/.gitrepo create mode 100644 subrepos/skewer/subrepos/plano/LICENSE.txt create mode 100644 subrepos/skewer/subrepos/plano/MANIFEST.in create mode 100644 subrepos/skewer/subrepos/plano/Makefile create mode 100644 subrepos/skewer/subrepos/plano/README.md create mode 100755 subrepos/skewer/subrepos/plano/bin/plano create mode 100755 subrepos/skewer/subrepos/plano/bin/plano-test create mode 100644 subrepos/skewer/subrepos/plano/docs/conf.py create mode 100644 subrepos/skewer/subrepos/plano/docs/index.rst create mode 100644 subrepos/skewer/subrepos/plano/pyproject.toml create mode 100644 subrepos/skewer/subrepos/plano/src/plano/__init__.py create mode 100644 subrepos/skewer/subrepos/plano/src/plano/_testproject/.plano.py create mode 100644 subrepos/skewer/subrepos/plano/src/plano/_testproject/src/chucker/__init__.py create mode 100644 subrepos/skewer/subrepos/plano/src/plano/_testproject/src/chucker/tests.py create mode 100644 subrepos/skewer/subrepos/plano/src/plano/_tests.py create mode 100644 subrepos/skewer/subrepos/plano/src/plano/command.py create mode 100644 subrepos/skewer/subrepos/plano/src/plano/main.py create mode 100644 subrepos/skewer/subrepos/plano/src/plano/test.py create mode 100644 subrepos/skewer/test-example/.gitignore create mode 120000 subrepos/skewer/test-example/.plano.py create mode 100644 subrepos/skewer/test-example/README.md create mode 100644 subrepos/skewer/test-example/images/entities.svg create mode 100644 subrepos/skewer/test-example/images/sequence.svg create mode 100644 subrepos/skewer/test-example/images/sequence.txt create mode 120000 subrepos/skewer/test-example/plano create mode 120000 subrepos/skewer/test-example/python/skewer create mode 100644 subrepos/skewer/test-example/skewer.yaml create mode 120000 subrepos/skewer/test-example/subrepos/skewer diff --git a/subrepos/skewer/.github/workflows/main.yaml b/subrepos/skewer/.github/workflows/main.yaml new file mode 100644 index 0000000..1c2c681 --- /dev/null +++ b/subrepos/skewer/.github/workflows/main.yaml @@ -0,0 +1,23 @@ +name: main +on: + push: + pull_request: + schedule: + - cron: "0 0 * * 0" +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: "3.x" + - uses: manusa/actions-setup-minikube@v2.7.2 + with: + minikube version: "v1.28.0" + kubernetes version: "v1.25.4" + github token: ${{secrets.GITHUB_TOKEN}} + - run: pip install pyyaml + - run: curl https://skupper.io/install.sh | sh + - run: echo "$HOME/.local/bin" >> $GITHUB_PATH + - run: ./plano test diff --git a/subrepos/skewer/.gitignore b/subrepos/skewer/.gitignore new file mode 100644 index 0000000..04d68f4 --- /dev/null +++ b/subrepos/skewer/.gitignore @@ -0,0 +1,3 @@ +__pycache__/ +/README.html +/.coverage diff --git a/subrepos/skewer/.gitrepo b/subrepos/skewer/.gitrepo new file mode 100644 index 0000000..232d5d4 --- /dev/null +++ b/subrepos/skewer/.gitrepo @@ -0,0 +1,12 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/ingydotnet/git-subrepo#readme +; +[subrepo] + remote = https://github.com/skupperproject/skewer + branch = main + commit = abd345a07b73d23e9d8795dba19f1948147b6222 + parent = bf8ddb55fc476df853b01fdbec0a4f8ff3cd50d6 + method = merge + cmdver = 0.4.6 diff --git a/subrepos/skewer/.plano.py b/subrepos/skewer/.plano.py new file mode 100644 index 0000000..5b267ac --- /dev/null +++ b/subrepos/skewer/.plano.py @@ -0,0 +1,59 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from skewer import * + +@command(passthrough=True) +def test(coverage=False, passthrough_args=[]): + clean() + + args = " ".join(passthrough_args) + + if coverage: + check_program("coverage") + + with working_env(PYTHONPATH="python"): + run(f"coverage run --source skewer -m skewer.tests {args}") + + run("coverage report") + run("coverage html") + + print(f"file:{get_current_dir()}/htmlcov/index.html") + else: + with working_env(PYTHONPATH="python"): + run(f"python -m skewer.tests {args}") + +@command +def render(): + """ + Render README.html from README.md + """ + check_program("pandoc") + + run(f"pandoc -o README.html README.md") + + print(f"file:{get_real_path('README.html')}") + +@command +def clean(): + remove(join("python", "__pycache__")) + remove(join("test-example", "python", "__pycache__")) + remove("README.html") + remove("htmlcov") + remove(".coverage") diff --git a/subrepos/skewer/LICENSE.txt b/subrepos/skewer/LICENSE.txt new file mode 100644 index 0000000..e06d208 --- /dev/null +++ b/subrepos/skewer/LICENSE.txt @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/subrepos/skewer/README.md b/subrepos/skewer/README.md new file mode 100644 index 0000000..6771c1e --- /dev/null +++ b/subrepos/skewer/README.md @@ -0,0 +1,275 @@ +# Skewer + +[![main](https://github.com/skupperproject/skewer/actions/workflows/main.yaml/badge.svg)](https://github.com/skupperproject/skewer/actions/workflows/main.yaml) + +A library for documenting and testing Skupper examples + +A `skewer.yaml` file describes the steps and commands to achieve an +objective using Skupper. Skewer takes the `skewer.yaml` file as input +and produces two outputs: a `README.md` file and a test routine. + +## An example example + +[Example `skewer.yaml` file](test-example/skewer.yaml) + +[Example `README.md` output](test-example/README.md) + +## Setting up Skewer for your own example + +**Note:** This is how you set things up from scratch. You can also +use the [Skupper example template][template] as a starting point. + +[template]: https://github.com/skupperproject/skupper-example-template + +Make sure you have git-subrepo installed: + + dnf install git-subrepo + +Add the Skewer code as a subrepo in your example project: + + cd project-dir/ + git subrepo clone https://github.com/skupperproject/skewer subrepos/skewer + +Symlink the Skewer library into your `python` directory: + + mkdir -p python + ln -s ../subrepos/skewer/python/skewer python/skewer + +Symlink the `plano` command into the root of your project. Symlink +the standard `config/.plano.py` as `.plano.py` in the root as well: + + ln -s subrepos/skewer/plano + ln -s subrepos/skewer/config/.plano.py + + + + + + + + + + +To use the `./plano` command, you must have the Python `pyyaml` +package installed. Use `pip` (or `pip3` on some systems) to install +it: + + pip install pyyaml + +Use the `plano update-workflow` command to copy the latest GitHub +Actions workflow file into your project: + + ./plano update-workflow + +Use your editor to create a `skewer.yaml` file in the root of your +project: + + emacs skewer.yaml + +Run the `./plano` command to see the available commands: + +~~~ console +$ ./plano +usage: plano [--verbose] [--quiet] [--debug] [-h] [-f FILE] {generate,render,run,run-external,demo,test,update-workflow} ... + +Run commands defined as Python functions + +options: + --verbose Print detailed logging to the console + --quiet Print no logging to the console + --debug Print debugging output to the console + -h, --help Show this help message and exit + -f FILE, --file FILE Load commands from FILE (default '.plano.py') + +commands: + {generate,render,run,run-external,demo,test,update-workflow} + generate Generate README.md from the data in skewer.yaml + render Render README.html from the data in skewer.yaml + run Run the example steps using Minikube + run-external Run the example steps against external clusters + demo Run the example steps and pause before cleaning up + test Test README generation and run the steps on Minikube + update-workflow Update the GitHub Actions workflow file +~~~ + +## Updating a Skewer subrepo inside your example project + +Use `git subrepo pull`: + + git subrepo pull --force subrepos/skewer + +Some older versions of git-subrepo won't complete a force pull. If +that happens, you can simply blow away your changes and get the latest +Skewer, using these commands: + + git subrepo clean subrepos/skewer + git rm -rf subrepos/skewer/ + git commit -am "Temporarily remove the previous version of Skewer" + git subrepo clone https://github.com/skupperproject/skewer subrepos/skewer + +## Skewer YAML + +The top level: + +~~~ yaml +title: # Your example's title (required) +subtitle: # Your chosen subtitle (required) +github_actions_url: # The URL of your workflow (optional) +overview: # Text introducing your example (optional) +prerequisites: # Text describing prerequisites (optional, has default text) +sites: # A map of named sites (see below) +steps: # A list of steps (see below) +summary: # Text to summarize what the user did (optional) +next_steps: # Text linking to more examples (optional, has default text) +~~~ + +A **site**: + +~~~ yaml +: + kubeconfig: # (required) + namespace: # (required) +~~~ + +A tilde (~) in the kubeconfig file path is replaced with a temporary +working directory during testing. + +Example sites: + +~~~ yaml +sites: + east: + kubeconfig: ~/.kube/config-east + namespace: east + west: + kubeconfig: ~/.kube/config-west + namespace: west +~~~ + +A **step**: + +~~~ yaml +- title: # The step title (required) + preamble: # Text before the commands (optional) + commands: # Named groups of commands. See below. + postamble: # Text after the commands (optional) +~~~ + +An example step: + +~~~ yaml +steps: + - title: Expose the frontend service + preamble: | + We have established connectivity between the two namespaces and + made the backend in `east` available to the frontend in `west`. + Before we can test the application, we need external access to + the frontend. + + Use `kubectl expose` with `--type LoadBalancer` to open network + access to the frontend service. Use `kubectl get services` to + check for the service and its external IP address. + commands: + east: + west: +~~~ + +Or you can use a named step from the library of standard steps: + +~~~ yaml +- standard: configure_separate_console_sessions +~~~ + +The standard steps are defined in +[python/standardsteps.yaml](python/standardsteps.yaml). Note that you +should not edit this file. Instead, in your `skewer.yaml` file, you +can create custom steps based on the standard steps. You can override +the `title`, `preamble`, `commands`, or `postamble` field of a +standard step by adding the field in addition to `standard`: + +~~~ yaml +- standard: cleaning_up + commands: + east: + - run: skupper delete + - run: kubectl delete deployment/database + west: + - run: skupper delete +~~~ + +The initial steps are usually standard ones. There are also some +standard steps at the end. You may be able to use something like +this: + +~~~ yaml +steps: + - standard: configure_separate_console_sessions + - standard: access_your_clusters + - standard: set_up_your_namespaces + - standard: install_skupper_in_your_namespaces + - standard: check_the_status_of_your_namespaces + - standard: link_your_namespaces + + - standard: test_the_application + - standard: accessing_the_web_console + - standard: cleaning_up +~~~ + +Note that the `link_your_namespaces` and `test_the_application` steps +are less generic than the other steps, so check that the text and +commands they produce are doing what you need. If not, you'll need to +provide a custom step. + +The step commands are separated into named groups corresponding to the +sites. Each named group contains a list of command entries. Each +command entry has a `run` field containing a shell command and other +fields for awaiting completion or providing sample output. + +A **command**: + +~~~ yaml +- run: # A shell command (required) + apply: # Use this command only for "readme" or "test" (optional, default is both) + output: # Sample output to include in the README (optional) +~~~ + +Only the `run` and `output` fields are used in the README content. +The `output` field is used as sample output only, not for any kind of +testing. + +The `apply` field is useful when you want the readme instructions to +be different from the test procedure, or you simply want to omit +something. + +There is also a special `await` command you can use to pause for a +condition you require before going to the next step. It is used only +for testing and does not impact the README. + +~~~ yaml +- await: # A resource or list of resources for which to await readiness (optional) +~~~ + +Example commands: + +~~~ yaml +commands: + east: + - run: kubectl expose deployment/backend --port 8080 --type LoadBalancer + output: | + service/frontend exposed + west: + - await: service/backend + - run: kubectl get service/backend + output: | + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + backend ClusterIP 10.102.112.121 8080/TCP 30s +~~~ + +## Demo mode + +Skewer has a mode where it executes all the steps, but before cleaning +up and exiting, it pauses so you can inspect things. + +It is enabled by setting the environment variable `SKEWER_DEMO` to any +value when you call `./plano run` or one of its variants. You can +also use `./plano demo`, which sets the variable for you. diff --git a/subrepos/skewer/config/.github/workflows/main.yaml b/subrepos/skewer/config/.github/workflows/main.yaml new file mode 100644 index 0000000..3266cb2 --- /dev/null +++ b/subrepos/skewer/config/.github/workflows/main.yaml @@ -0,0 +1,41 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +name: main +on: + push: + pull_request: + schedule: + - cron: "0 0 * * 0" +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: "3.x" + - uses: manusa/actions-setup-minikube@v2.7.2 + with: + minikube version: "v1.28.0" + kubernetes version: "v1.25.4" + github token: ${{secrets.GITHUB_TOKEN}} + - run: curl https://skupper.io/install.sh | sh + - run: echo "$HOME/.local/bin" >> $GITHUB_PATH + - run: ./plano test --debug diff --git a/subrepos/skewer/config/.plano.py b/subrepos/skewer/config/.plano.py new file mode 100644 index 0000000..5565ece --- /dev/null +++ b/subrepos/skewer/config/.plano.py @@ -0,0 +1,122 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from skewer import * + +@command +def generate(): + """ + Generate README.md from the data in skewer.yaml + """ + generate_readme("skewer.yaml", "README.md") + +render_template = """ + + + + + + +
+ +@content@ + +
+ + +""".strip() + +@command +def render(): + """ + Render README.html from the data in skewer.yaml + """ + generate() + + markdown = read("README.md") + data = {"text": markdown} + json = emit_json(data) + content = http_post("https://api.github.com/markdown", json, content_type="application/json") + html = render_template.replace("@content@", content) + + write("README.html", html) + + print(f"file:{get_real_path('README.html')}") + +@command +def clean(): + remove(find(".", "__pycache__")) + remove("README.html") + +@command +def run_(debug=False): + """ + Run the example steps using Minikube + """ + run_steps_minikube("skewer.yaml", debug=debug) + +@command +def run_external(*kubeconfigs, debug=False): + """ + Run the example steps with user-provided kubeconfigs + """ + run_steps("skewer.yaml", *kubeconfigs, debug=debug) + +@command +def demo(debug=False): + """ + Run the example steps and pause before cleaning up + """ + with working_env(SKEWER_DEMO=1): + run_steps_minikube("skewer.yaml", debug=debug) + +@command +def test_(debug=False): + """ + Test README generation and run the steps on Minikube + """ + generate_readme("skewer.yaml", make_temp_file()) + run_steps_minikube("skewer.yaml", debug=debug) + +@command +def update_workflow(): + """ + Update the GitHub Actions workflow file + """ + + from_file = join("subrepos", "skewer", "config", ".github", "workflows", "main.yaml") + to_file = join(".github", "workflows", "main.yaml") + + copy(from_file, to_file) diff --git a/subrepos/skewer/plano b/subrepos/skewer/plano new file mode 120000 index 0000000..41ce1be --- /dev/null +++ b/subrepos/skewer/plano @@ -0,0 +1 @@ +subrepos/plano/bin/plano \ No newline at end of file diff --git a/subrepos/skewer/python/plano b/subrepos/skewer/python/plano new file mode 120000 index 0000000..aa28e7e --- /dev/null +++ b/subrepos/skewer/python/plano @@ -0,0 +1 @@ +../subrepos/plano/src/plano \ No newline at end of file diff --git a/subrepos/skewer/python/skewer/__init__.py b/subrepos/skewer/python/skewer/__init__.py new file mode 100644 index 0000000..3324b21 --- /dev/null +++ b/subrepos/skewer/python/skewer/__init__.py @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from .main import * diff --git a/subrepos/skewer/python/skewer/main.py b/subrepos/skewer/python/skewer/main.py new file mode 100644 index 0000000..35bd965 --- /dev/null +++ b/subrepos/skewer/python/skewer/main.py @@ -0,0 +1,474 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from plano import * + +_standard_steps_yaml = read(join(get_parent_dir(__file__), "standardsteps.yaml")) +_standard_steps = parse_yaml(_standard_steps_yaml) + +_example_suite_para = """ +This example is part of a [suite of examples][examples] showing the +different ways you can use [Skupper][website] to connect services +across cloud providers, data centers, and edge sites. + +[website]: https://skupper.io/ +[examples]: https://skupper.io/examples/index.html +""".strip() + +_standard_prerequisites = """ +* The `kubectl` command-line tool, version 1.15 or later + ([installation guide][install-kubectl]) + +* Access to at least one Kubernetes cluster, from [any provider you + choose][kube-providers] + +[install-kubectl]: https://kubernetes.io/docs/tasks/tools/install-kubectl/ +[kube-providers]: https://skupper.io/start/kubernetes.html +""".strip() + +_standard_next_steps = """ +Check out the other [examples][examples] on the Skupper website. +""".strip() + +_about_this_example = """ +This example was produced using [Skewer][skewer], a library for +documenting and testing Skupper examples. + +[skewer]: https://github.com/skupperproject/skewer + +Skewer provides utility functions for generating the README and +running the example steps. Use the `./plano` command in the project +root to see what is available. + +To quickly stand up the example using Minikube, try the `./plano demo` +command. +""".strip() + +def check_environment(): + check_program("base64") + check_program("curl") + check_program("kubectl") + check_program("skupper") + +# Eventually Kubernetes will make this nicer: +# https://github.com/kubernetes/kubernetes/pull/87399 +# https://github.com/kubernetes/kubernetes/issues/80828 +# https://github.com/kubernetes/kubernetes/issues/83094 +def await_resource(group, name, timeout=180): + notice(f"Waiting for {group}/{name} to become available") + + for i in range(timeout): + sleep(1) + + if run(f"kubectl get {group}/{name}", check=False).exit_code == 0: + break + else: + fail(f"Timed out waiting for {group}/{name}") + + if group == "deployment": + try: + run(f"kubectl wait --for condition=available --timeout {timeout}s {group}/{name}") + except: + run(f"kubectl logs {group}/{name}") + raise + +def await_external_ip(group, name, timeout=180): + await_resource(group, name, timeout=timeout) + + for i in range(timeout): + sleep(1) + + if call(f"kubectl get {group}/{name} -o jsonpath='{{.status.loadBalancer.ingress}}'") != "": + break + else: + fail(f"Timed out waiting for external IP for {group}/{name}") + + return call(f"kubectl get {group}/{name} -o jsonpath='{{.status.loadBalancer.ingress[0].ip}}'") + +def run_steps_minikube(skewer_file, debug=False): + check_environment() + check_program("minikube") + + skewer_data = read_yaml(skewer_file) + kubeconfigs = list() + + for site in skewer_data["sites"]: + kubeconfigs.append(make_temp_file()) + + try: + run("minikube -p skewer start") + + for kubeconfig in kubeconfigs: + with working_env(KUBECONFIG=kubeconfig): + run("minikube -p skewer update-context") + check_file(ENV["KUBECONFIG"]) + + with open("/tmp/minikube-tunnel-output", "w") as tunnel_output_file: + with start("minikube -p skewer tunnel", output=tunnel_output_file): + run_steps(skewer_file, *kubeconfigs, debug=debug) + finally: + run("minikube -p skewer delete") + +def run_steps(skewer_file, *kubeconfigs, debug=False): + check_environment() + + skewer_data = read_yaml(skewer_file) + work_dir = make_temp_dir() + + for i, site in enumerate(skewer_data["sites"].values()): + site["kubeconfig"] = kubeconfigs[i] + + _apply_standard_steps(skewer_data) + + try: + for step in skewer_data["steps"]: + if step.get("id") == "cleaning_up": + continue + + _run_step(work_dir, skewer_data, step) + + if "SKEWER_DEMO" in ENV: + _pause_for_demo(work_dir, skewer_data) + except: + if debug: + print("TROUBLE!") + print("-- Start of debug output") + + for site_name, site_data in skewer_data["sites"].items(): + kubeconfig = site_data["kubeconfig"].replace("~", work_dir) + print(f"---- Debug output for site '{site_name}'") + + with working_env(KUBECONFIG=kubeconfig): + run("kubectl get services", check=False) + run("kubectl get deployments", check=False) + run("kubectl get statefulsets", check=False) + run("kubectl get pods", check=False) + run("skupper version", check=False) + run("skupper status", check=False) + run("skupper link status", check=False) + run("skupper service status", check=False) + run("skupper gateway status", check=False) + run("skupper network status", check=False) + run("skupper debug events", check=False) + run("kubectl logs deployment/skupper-router", check=False) + run("kubectl logs deployment/skupper-service-controller", check=False) + + print("-- End of debug output") + + raise + finally: + for step in skewer_data["steps"]: + if step.get("id") == "cleaning_up": + _run_step(work_dir, skewer_data, step, check=False) + break + + +def _pause_for_demo(work_dir, skewer_data): + first_site_name, first_site_data = list(skewer_data["sites"].items())[0] + first_site_kubeconfig = first_site_data["kubeconfig"].replace("~", work_dir) + frontend_url = None + + with working_env(KUBECONFIG=first_site_kubeconfig): + console_ip = await_external_ip("service", "skupper") + console_url = f"https://{console_ip}:8010/" + password_data = call("kubectl get secret skupper-console-users -o jsonpath='{.data.admin}'") + password = base64_decode(password_data).decode("ascii") + + if run("kubectl get service/frontend", check=False, output=DEVNULL).exit_code == 0: + if call("kubectl get service/frontend -o jsonpath='{.spec.type}'") == "LoadBalancer": + frontend_ip = await_external_ip("service", "frontend") + frontend_url = f"http://{frontend_ip}:8080/" + + print() + print("Demo time!") + print() + print("Sites:") + + for site_name, site_data in skewer_data["sites"].items(): + kubeconfig = site_data["kubeconfig"].replace("~", work_dir) + print(f" {site_name}: export KUBECONFIG={kubeconfig}") + + if frontend_url: + print() + print(f"Frontend URL: {frontend_url}") + + print() + print(f"Console URL: {console_url}") + print( "Console user: admin") + print(f"Console password: {password}") + print() + + if "SKEWER_DEMO_NO_WAIT" not in ENV: + while input("Are you done (yes)? ") != "yes": # pragma: nocover + pass + +def _run_step(work_dir, skewer_data, step_data, check=True): + if "commands" not in step_data: + return + + if "title" in step_data: + notice("Running step '{}'", step_data["title"]) + + items = step_data["commands"].items() + + for site_name, commands in items: + kubeconfig = skewer_data["sites"][site_name]["kubeconfig"].replace("~", work_dir) + + with working_env(KUBECONFIG=kubeconfig): + for command in commands: + if command.get("apply") == "readme": + continue + + if "run" in command: + run(command["run"].replace("~", work_dir), shell=True, check=check) + + if "await" in command: + resources = command["await"] + + if isinstance(resources, str): + resources = (resources,) + + for resource in resources: + group, name = resource.split("/", 1) + await_resource(group, name) + + if "await_external_ip" in command: + resources = command["await_external_ip"] + + if isinstance(resources, str): + resources = (resources,) + + for resource in resources: + group, name = resource.split("/", 1) + await_external_ip(group, name) + +def generate_readme(skewer_file, output_file): + skewer_data = read_yaml(skewer_file) + out = list() + + out.append(f"# {skewer_data['title']}") + out.append("") + + if "github_actions_url" in skewer_data: + url = skewer_data["github_actions_url"] + out.append(f"[![main]({url}/badge.svg)]({url})") + out.append("") + + if "subtitle" in skewer_data: + out.append(f"#### {skewer_data['subtitle']}") + out.append("") + + out.append(_example_suite_para) + out.append("") + out.append("#### Contents") + out.append("") + + if "overview" in skewer_data: + out.append("* [Overview](#overview)") + + out.append("* [Prerequisites](#prerequisites)") + + _apply_standard_steps(skewer_data) + + for i, step_data in enumerate(skewer_data["steps"], 1): + if step_data.get("numbered", True): + title = f"Step {i}: {step_data['title']}" + else: + title = step_data['title'] + + fragment = replace(title, r"[ -]", "_") + fragment = replace(fragment, r"[\W]", "") + fragment = replace(fragment, "_", "-") + fragment = fragment.lower() + + out.append(f"* [{title}](#{fragment})") + + if "summary" in skewer_data: + out.append("* [Summary](#summary)") + + if "next_steps" in skewer_data: + out.append("* [Next steps](#next-steps)") + + out.append("* [About this example](#about-this-example)") + out.append("") + + if "overview" in skewer_data: + out.append("## Overview") + out.append("") + out.append(skewer_data["overview"].strip()) + out.append("") + + prerequisites = _standard_prerequisites + + if "prerequisites" in skewer_data: + prerequisites = skewer_data["prerequisites"].strip() + + out.append("## Prerequisites") + out.append("") + out.append(prerequisites) + out.append("") + + for i, step_data in enumerate(skewer_data["steps"], 1): + notice("Generating step '{}'", step_data["title"]) + + if step_data.get("numbered", True): + title = f"Step {i}: {step_data['title']}" + else: + title = step_data["title"] + + + out.append(f"## {title}") + out.append("") + out.append(_generate_readme_step(skewer_data, step_data)) + out.append("") + + if "summary" in skewer_data: + out.append("## Summary") + out.append("") + out.append(skewer_data["summary"].strip()) + out.append("") + + next_steps = _standard_next_steps + + if "next_steps" in skewer_data: + next_steps = skewer_data["next_steps"].strip() + + out.append("## Next steps") + out.append("") + out.append(next_steps) + out.append("") + + out.append("## About this example") + out.append("") + out.append(_about_this_example) + out.append("") + + write(output_file, "\n".join(out).strip() + "\n") + +def _generate_readme_step(skewer_data, step_data): + out = list() + + if "preamble" in step_data: + out.append(step_data["preamble"].strip()) + out.append("") + + if "commands" in step_data: + items = step_data["commands"].items() + + for i, item in enumerate(items): + site_name, commands = item + namespace = skewer_data["sites"][site_name]["namespace"] + outputs = list() + + out.append(f"_**Console for {namespace}:**_") + out.append("") + out.append("~~~ shell") + + for command in commands: + if command.get("apply") == "test": + continue + + if "run" in command: + out.append(command["run"]) + + if "output" in command: + assert "run" in command, command + + outputs.append((command["run"], command["output"])) + + out.append("~~~") + out.append("") + + if outputs: + out.append("_Sample output:_") + out.append("") + out.append("~~~ console") + out.append("\n\n".join((f"$ {run}\n{output.strip()}" for run, output in outputs))) + out.append("~~~") + out.append("") + + if "postamble" in step_data: + out.append(step_data["postamble"].strip()) + + return "\n".join(out).strip() + +def _apply_standard_steps(skewer_data): + notice("Applying standard steps") + + for step_data in skewer_data["steps"]: + if "standard" not in step_data: + continue + + standard_step_data = _standard_steps[step_data["standard"]] + + if "id" not in step_data: + step_data["id"] = standard_step_data.get("id") + + if "title" not in step_data: + step_data["title"] = standard_step_data["title"] + + if "numbered" not in step_data: + step_data["numbered"] = standard_step_data.get("numbered", True) + + if "preamble" not in step_data: + if "preamble" in standard_step_data: + step_data["preamble"] = standard_step_data["preamble"] + + if "postamble" not in step_data: + if "postamble" in standard_step_data: + step_data["postamble"] = standard_step_data["postamble"] + + if "commands" not in step_data: + if "commands" in standard_step_data: + step_data["commands"] = dict() + + for i, site in enumerate(skewer_data["sites"].items()): + site_key, site_data = site + + if str(i) in standard_step_data["commands"]: + # Is a specific index in the standard commands? + commands = standard_step_data["commands"][str(i)] + step_data["commands"][site_key] = _resolve_commands(commands, site_data) + elif "*" in standard_step_data["commands"]: + # Is "*" in the standard commands? + commands = standard_step_data["commands"]["*"] + step_data["commands"][site_key] = _resolve_commands(commands, site_data) + else: + # Otherwise, omit commands for this site + continue + +def _resolve_commands(commands, site_data): + resolved_commands = list() + + for command in commands: + resolved_command = dict(command) + + if "run" in command: + resolved_command["run"] = command["run"] + resolved_command["run"] = resolved_command["run"].replace("@kubeconfig@", site_data["kubeconfig"]) + resolved_command["run"] = resolved_command["run"].replace("@namespace@", site_data["namespace"]) + + if "output" in command: + resolved_command["output"] = command["output"] + resolved_command["output"] = resolved_command["output"].replace("@kubeconfig@", site_data["kubeconfig"]) + resolved_command["output"] = resolved_command["output"].replace("@namespace@", site_data["namespace"]) + + resolved_commands.append(resolved_command) + + return resolved_commands diff --git a/subrepos/skewer/python/skewer/standardsteps.yaml b/subrepos/skewer/python/skewer/standardsteps.yaml new file mode 100644 index 0000000..8d08dda --- /dev/null +++ b/subrepos/skewer/python/skewer/standardsteps.yaml @@ -0,0 +1,230 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +install_the_skupper_command_line_tool: + title: Install the Skupper command-line tool + preamble: | + The `skupper` command-line tool is the entrypoint for installing + and configuring Skupper. You need to install the `skupper` + command only once for each development environment. + + On Linux or Mac, you can use the install script (inspect it + [here][install-script]) to download and extract the command: + + ~~~ shell + curl https://skupper.io/install.sh | sh + ~~~ + + The script installs the command under your home directory. It + prompts you to add the command to your path if necessary. + + For Windows and other installation options, see [Installing + Skupper][install-docs]. + + [install-script]: https://github.com/skupperproject/skupper-website/blob/main/docs/install.sh + [install-docs]: https://skupper.io/install/index.html +configure_separate_console_sessions: + title: Configure separate console sessions + preamble: | + Skupper is designed for use with multiple namespaces, usually on + different clusters. The `skupper` command uses your + [kubeconfig][kubeconfig] and current context to select the + namespace where it operates. + + [kubeconfig]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ + + Your kubeconfig is stored in a file in your home directory. The + `skupper` and `kubectl` commands use the `KUBECONFIG` environment + variable to locate it. + + A single kubeconfig supports only one active context per user. + Since you will be using multiple contexts at once in this + exercise, you need to create distinct kubeconfigs. + + Start a console session for each of your namespaces. Set the + `KUBECONFIG` environment variable to a different path in each + session. + commands: + "*": + - run: export KUBECONFIG=@kubeconfig@ +access_your_clusters: + title: Access your clusters + preamble: | + + The procedure for accessing a Kubernetes cluster varies by + provider. [Find the instructions for your chosen + provider][kube-providers] and use them to authenticate and + configure access for each console session. + + [kube-providers]: https://skupper.io/start/kubernetes.html +set_up_your_namespaces: + title: Set up your namespaces + preamble: | + Use `kubectl create namespace` to create the namespaces you wish + to use (or use existing namespaces). Use `kubectl config + set-context` to set the current namespace for each session. + commands: + "*": + - run: kubectl create namespace @namespace@ + - run: kubectl config set-context --current --namespace @namespace@ +install_skupper_in_your_namespaces: + title: Install Skupper in your namespaces + preamble: | + The `skupper init` command installs the Skupper router and service + controller in the current namespace. Run the `skupper init` command + in each namespace. + + **Note:** If you are using Minikube, [you need to start `minikube + tunnel`][minikube-tunnel] before you install Skupper. + + [minikube-tunnel]: https://skupper.io/start/minikube.html#running-minikube-tunnel + commands: + "0": + - run: skupper init --enable-console --enable-flow-collector + "*": + - run: skupper init + postamble: | + _Sample output:_ + + ~~~ console + $ skupper init + Waiting for LoadBalancer IP or hostname... + Skupper is now installed in namespace ''. Use 'skupper status' to get more information. + ~~~ +check_the_status_of_your_namespaces: + title: Check the status of your namespaces + preamble: | + Use `skupper status` in each console to check that Skupper is + installed. + commands: + "*": + - await: [deployment/skupper-service-controller, deployment/skupper-router] + - run: skupper status + postamble: | + _Sample output:_ + + ~~~ console + Skupper is enabled for namespace "" in interior mode. It is connected to 1 other site. It has 1 exposed service. + The site console url is: + The credentials for internal console-auth mode are held in secret: 'skupper-console-users' + ~~~ + + As you move through the steps below, you can use `skupper status` at + any time to check your progress. +link_your_namespaces: + title: Link your namespaces + preamble: | + Creating a link requires use of two `skupper` commands in + conjunction, `skupper token create` and `skupper link create`. + + The `skupper token create` command generates a secret token that + signifies permission to create a link. The token also carries the + link details. Then, in a remote namespace, The `skupper link + create` command uses the token to create a link to the namespace + that generated it. + + **Note:** The link token is truly a *secret*. Anyone who has the + token can link to your namespace. Make sure that only those you + trust have access to it. + + First, use `skupper token create` in one namespace to generate the + token. Then, use `skupper link create` in the other to create a + link. + commands: + "0": + - output: Token written to ~/secret.token + run: skupper token create ~/secret.token + "1": + - run: skupper link create ~/secret.token + output: | + Site configured to link to https://10.105.193.154:8081/ed9c37f6-d78a-11ec-a8c7-04421a4c5042 (name=link1) + Check the status of the link using 'skupper link status'. + - run: skupper link status --wait 60 + apply: test + postamble: | + If your console sessions are on different machines, you may need + to use `sftp` or a similar tool to transfer the token securely. + By default, tokens expire after a single use or 15 minutes after + creation. +test_the_application: + title: Test the application + preamble: | + Now we're ready to try it out. Use `kubectl get service/frontend` + to look up the external IP of the frontend service. Then use + `curl` or a similar tool to request the `/api/health` endpoint at + that address. + + **Note:** The `` field in the following commands is a + placeholder. The actual value is an IP address. + commands: + "0": + - run: kubectl get service/frontend + apply: readme + output: | + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + frontend LoadBalancer 10.103.232.28 8080:30407/TCP 15s + - run: curl http://:8080/api/health + apply: readme + output: OK + - await_external_ip: service/frontend + - run: curl --fail --verbose --retry 60 --retry-connrefused --retry-delay 2 $(kubectl get service/frontend -o jsonpath='http://{.status.loadBalancer.ingress[0].ip}:8080/api/health') + apply: test + postamble: | + If everything is in order, you can now access the web interface by + navigating to `http://:8080/` in your browser. +accessing_the_web_console: + title: Accessing the web console + numbered: false + preamble: | + Skupper includes a web console you can use to view the application + network. To access it, use `skupper status` to look up the URL of + the web console. Then use `kubectl get + secret/skupper-console-users` to look up the console admin + password. + + **Note:** The `` and `` fields in the + following output are placeholders. The actual values are specific + to your environment. + commands: + "0": + - run: skupper status + apply: readme + output: | + Skupper is enabled for namespace "@namespace@" in interior mode. It is connected to 1 other site. It has 1 exposed service. + The site console url is: + The credentials for internal console-auth mode are held in secret: 'skupper-console-users' + - run: kubectl get secret/skupper-console-users -o jsonpath={.data.admin} | base64 -d + apply: readme + output: + - await_external_ip: service/skupper + - run: curl --fail --insecure --verbose --retry 60 --retry-connrefused --retry-delay 2 $(kubectl get service/skupper -o jsonpath='https://{.status.loadBalancer.ingress[0].ip}:8080/') --user admin:$(kubectl get secret/skupper-console-users -o jsonpath={.data.admin} | base64 -d); echo + apply: test + postamble: | + Navigate to `` in your browser. When prompted, log + in as user `admin` and enter the password. +cleaning_up: + id: cleaning_up + title: Cleaning up + numbered: false + preamble: | + To remove Skupper and the other resources from this exercise, use + the following commands. + commands: + "*": + - run: skupper delete diff --git a/subrepos/skewer/python/skewer/tests.py b/subrepos/skewer/python/skewer/tests.py new file mode 100644 index 0000000..3e339e9 --- /dev/null +++ b/subrepos/skewer/python/skewer/tests.py @@ -0,0 +1,68 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from skewer import * + +@test +def check_environment_(): + check_environment() + +@test +def plano_(): + with working_dir("test-example"): + run("./plano") + run("./plano generate") + +@test +def workflow(): + parse_yaml(read("config/.github/workflows/main.yaml")) + +@test +def generate_readme_(): + with working_dir("test-example"): + generate_readme("skewer.yaml", "README.md") + check_file("README.md") + +@test +def await_resource_(): + try: + run("minikube -p skewer start") + + with expect_error(): + await_resource("deployment", "not-there", timeout=1) + + with expect_error(): + await_external_ip("service", "not-there", timeout=1) + finally: + run("minikube -p skewer delete") + +@test(timeout=600) +def run_steps_(): + with working_dir("test-example"): + with working_env(SKEWER_DEMO=1, SKEWER_DEMO_NO_WAIT=1): + run_steps_minikube("skewer.yaml", debug=True) + + with expect_error(): + with working_env(SKEWER_FAIL=1): + run_steps_minikube("skewer.yaml", debug=True) + +if __name__ == "__main__": + import sys + + PlanoTestCommand(sys.modules[__name__]).main() diff --git a/subrepos/skewer/subrepos/plano/.github/workflows/main.yaml b/subrepos/skewer/subrepos/plano/.github/workflows/main.yaml new file mode 100644 index 0000000..83ba30d --- /dev/null +++ b/subrepos/skewer/subrepos/plano/.github/workflows/main.yaml @@ -0,0 +1,48 @@ +name: main +on: + push: + pull_request: + schedule: + - cron: "0 0 * * 0" +jobs: + main: + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest, windows-latest] + version: [3.7, 3.x] + runs-on: ${{matrix.os}} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: ${{matrix.version}} + - run: pip install build wheel + - run: python -m build + - run: pip install dist/ssorj_plano-1.0.0-py3-none-any.whl + - run: plano-self-test + cygwin: + runs-on: windows-latest + steps: + - run: git config --global core.autocrlf input + - uses: actions/checkout@v3 + - uses: cygwin/cygwin-install-action@master + with: + packages: python3 + - run: pip install build wheel + shell: C:\cygwin\bin\bash.exe -o igncr '{0}' + - run: make install + shell: C:\cygwin\bin\bash.exe -o igncr '{0}' + - run: echo "C:\Users\runneradmin\AppData\Roaming\Python\Python39\Scripts" >> "$GITHUB_PATH" + shell: C:\cygwin\bin\bash.exe -o igncr '{0}' + - run: plano-self-test + shell: C:\cygwin\bin\bash.exe -o igncr '{0}' + fedora: + runs-on: ubuntu-latest + container: fedora:latest + steps: + - uses: actions/checkout@v3 + - run: dnf -y install make pip python python-build python-wheel + - run: make install + - run: echo "$HOME/.local/bin" >> "$GITHUB_PATH" + - run: plano-self-test diff --git a/subrepos/skewer/subrepos/plano/.gitignore b/subrepos/skewer/subrepos/plano/.gitignore new file mode 100644 index 0000000..3af00c3 --- /dev/null +++ b/subrepos/skewer/subrepos/plano/.gitignore @@ -0,0 +1,6 @@ +__pycache__/ +*.egg-info/ +/build +/dist +/.coverage +/htmlcov diff --git a/subrepos/skewer/subrepos/plano/.gitrepo b/subrepos/skewer/subrepos/plano/.gitrepo new file mode 100644 index 0000000..96d297e --- /dev/null +++ b/subrepos/skewer/subrepos/plano/.gitrepo @@ -0,0 +1,12 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme +; +[subrepo] + remote = git@github.com:ssorj/plano.git + branch = main + commit = 75534fab4abb9f5316f80575ecfc3ed949f2d60b + parent = 05cf19ec1b033fab683c29b0618750be7adb4a8e + method = merge + cmdver = 0.4.5 diff --git a/subrepos/skewer/subrepos/plano/LICENSE.txt b/subrepos/skewer/subrepos/plano/LICENSE.txt new file mode 100644 index 0000000..e06d208 --- /dev/null +++ b/subrepos/skewer/subrepos/plano/LICENSE.txt @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/subrepos/skewer/subrepos/plano/MANIFEST.in b/subrepos/skewer/subrepos/plano/MANIFEST.in new file mode 100644 index 0000000..778ca32 --- /dev/null +++ b/subrepos/skewer/subrepos/plano/MANIFEST.in @@ -0,0 +1 @@ +include src/plano/_testproject/* diff --git a/subrepos/skewer/subrepos/plano/Makefile b/subrepos/skewer/subrepos/plano/Makefile new file mode 100644 index 0000000..a728c17 --- /dev/null +++ b/subrepos/skewer/subrepos/plano/Makefile @@ -0,0 +1,69 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +.NOTPARALLEL: + +# A workaround for an install-with-prefix problem in Fedora 36 +# +# https://docs.fedoraproject.org/en-US/fedora/latest/release-notes/developers/Development_Python/#_pipsetup_py_installation_with_prefix +# https://bugzilla.redhat.com/show_bug.cgi?id=2026979 + +export RPM_BUILD_ROOT := fake + +.PHONY: build +build: + python -m build + +.PHONY: test +test: clean build + python -m venv build/venv + . build/venv/bin/activate && pip install --force-reinstall dist/ssorj_plano-*-py3-none-any.whl + . build/venv/bin/activate && plano-self-test + +.PHONY: qtest +qtest: + PYTHONPATH=src python -m plano._tests + +.PHONY: install +install: build + pip install --user --force-reinstall dist/ssorj_plano-*-py3-none-any.whl + +.PHONY: clean +clean: + rm -rf build dist htmlcov .coverage src/plano/__pycache__ src/plano.egg-info + +.PHONY: docs +docs: + mkdir -p build + sphinx-build -M html docs build/docs + +.PHONY: coverage +coverage: build + python -m venv build/venv + . build/venv/bin/activate && pip install --force-reinstall dist/ssorj_plano-*-py3-none-any.whl + . build/venv/bin/activate && PYTHONPATH=build/venv/lib/python3.10/site-packages coverage run \ + --include build/venv/lib/python\*/site-packages/plano/\*,build/venv/bin/\* \ + build/venv/bin/plano-self-test + coverage report + coverage html + @echo "OUTPUT: file:${CURDIR}/htmlcov/index.html" + +.PHONY: upload +upload: build + twine upload --repository testpypi dist/* diff --git a/subrepos/skewer/subrepos/plano/README.md b/subrepos/skewer/subrepos/plano/README.md new file mode 100644 index 0000000..2bf8c99 --- /dev/null +++ b/subrepos/skewer/subrepos/plano/README.md @@ -0,0 +1,78 @@ +# Plano + +[![main](https://github.com/ssorj/plano/workflows/main/badge.svg)](https://github.com/ssorj/plano/actions?query=workflow%3Amain) + +Python functions for writing shell-style system scripts. + +## Installation + +To install plano globally for the current user: + +~~~ +make install +~~~ + +## Example 1 + +`~/.local/bin/widget`: +~~~ python +#!/usr/bin/python + +import sys +from plano import * + +@command +def greeting(message="Howdy"): + print(message) + +if __name__ == "__main__": + PlanoCommand(sys.modules[__name__]).main() +~~~ + +~~~ shell +$ widget greeting --message Hello +--> greeting +Hello +<-- greeting +OK (0s) +~~~ + +## Example 2 + +`~/.local/bin/widget-test`: +~~~ python +import sys +from plano import * + +@test +def check(): + run("widget --message Yo") + +if __name__ == "__main__": + PlanoTestCommand(sys.modules[__name__]).main() +~~~ + +~~~ shell +$ widget-test +=== Configuration === +Modules: __main__ +Test timeout: 5m +Fail fast: False + +=== Module '__main__' === +check ........................................................... PASSED 0.0s + +=== Summary === +Total: 1 +Skipped: 0 +Failed: 0 + +=== RESULT === +All tests passed +~~~ + +## Things to know + +* The plano command accepts command sequences in the form "this,that" + (no spaces). The command arguments are applied to the last command + only. diff --git a/subrepos/skewer/subrepos/plano/bin/plano b/subrepos/skewer/subrepos/plano/bin/plano new file mode 100755 index 0000000..9d1e018 --- /dev/null +++ b/subrepos/skewer/subrepos/plano/bin/plano @@ -0,0 +1,31 @@ +#!/usr/bin/python3 +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import os +import sys + +if os.path.islink(__file__): + repo_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + sys.path.insert(0, os.path.join(repo_dir, "src")) + +from plano import PlanoCommand + +if __name__ == "__main__": + PlanoCommand().main() diff --git a/subrepos/skewer/subrepos/plano/bin/plano-test b/subrepos/skewer/subrepos/plano/bin/plano-test new file mode 100755 index 0000000..a256740 --- /dev/null +++ b/subrepos/skewer/subrepos/plano/bin/plano-test @@ -0,0 +1,31 @@ +#!/usr/bin/python3 +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import os +import sys + +if os.path.islink(__file__): + repo_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + sys.path.insert(0, os.path.join(repo_dir, "src")) + +from plano import PlanoTestCommand + +if __name__ == "__main__": + PlanoTestCommand().main() diff --git a/subrepos/skewer/subrepos/plano/docs/conf.py b/subrepos/skewer/subrepos/plano/docs/conf.py new file mode 100644 index 0000000..3277b1e --- /dev/null +++ b/subrepos/skewer/subrepos/plano/docs/conf.py @@ -0,0 +1,34 @@ +# import os +# import sys + +# sys.path.insert(0, os.path.abspath("../python")) + +extensions = [ + "sphinx.ext.autodoc", +] + +# autodoc_member_order = "bysource" +# autodoc_default_flags = ["members", "undoc-members", "inherited-members"] + +autodoc_default_options = { + "members": True, + "member-order": "bysource", + "undoc-members": True, + "imported-members": True, + "exclude-members": "PlanoProcess", +} + +master_doc = "index" +project = u"Plano" +copyright = u"1975" +author = u"Justin Ross" + +version = u"0.1.0" +release = u"" + +pygments_style = "sphinx" +html_theme = "nature" + +html_theme_options = { + "nosidebar": True, +} diff --git a/subrepos/skewer/subrepos/plano/docs/index.rst b/subrepos/skewer/subrepos/plano/docs/index.rst new file mode 100644 index 0000000..7441b03 --- /dev/null +++ b/subrepos/skewer/subrepos/plano/docs/index.rst @@ -0,0 +1,4 @@ +Plano +===== + +.. automodule:: plano diff --git a/subrepos/skewer/subrepos/plano/pyproject.toml b/subrepos/skewer/subrepos/plano/pyproject.toml new file mode 100644 index 0000000..a682141 --- /dev/null +++ b/subrepos/skewer/subrepos/plano/pyproject.toml @@ -0,0 +1,23 @@ +[build-system] +requires = [ "setuptools", "setuptools-scm" ] +build-backend = "setuptools.build_meta" + +[project] +name = "ssorj-plano" +version = "1.0.0" +authors = [ { name = "Justin Ross", email = "jross@apache.org" } ] +description = "Python functions for writing shell-style system scripts" +license = { file = "LICENSE.txt" } +readme = "README.md" +classifiers = [ "License :: OSI Approved :: Apache Software License" ] +requires-python = ">=3.7" +dependencies = [ "PyYAML" ] + +[project.scripts] +plano = "plano.command:_main" +plano-test = "plano.test:_main" +plano-self-test = "plano._tests:main" + +[project.urls] +"Homepage" = "https://github.com/ssorj/plano" +"Bug Tracker" = "https://github.com/ssorj/plano/issues" diff --git a/subrepos/skewer/subrepos/plano/src/plano/__init__.py b/subrepos/skewer/subrepos/plano/src/plano/__init__.py new file mode 100644 index 0000000..3218323 --- /dev/null +++ b/subrepos/skewer/subrepos/plano/src/plano/__init__.py @@ -0,0 +1,24 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from .main import * +from .main import _default_sigterm_handler + +from .command import * +from .test import * diff --git a/subrepos/skewer/subrepos/plano/src/plano/_testproject/.plano.py b/subrepos/skewer/subrepos/plano/src/plano/_testproject/.plano.py new file mode 100644 index 0000000..67904b2 --- /dev/null +++ b/subrepos/skewer/subrepos/plano/src/plano/_testproject/.plano.py @@ -0,0 +1,112 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from plano import * + +@command +def base_command(alpha, beta, omega="x"): + """ + Base command help + """ + + print("base", alpha, beta, omega) + +@command(name="extended-command", parent=base_command) +def extended_command(alpha, beta, omega="y"): + print("extended", alpha, omega) + parent(alpha, beta, omega) + +@command(parameters=[CommandParameter("message_", help="The message to print", display_name="message"), + CommandParameter("count", help="Print the message COUNT times"), + CommandParameter("extra", default=1, short_option="e")]) +def echo(message_, count=1, extra=None, trouble=False): + """ + Print a message to the console + """ + + print("Echoing (message={}, count={})".format(message_, count)) + + if trouble: + raise Exception("Trouble") + + for i in range(count): + print(message_) + +@command +def echoecho(message): + echo(message) + +@command +def haberdash(first, *middle, last="bowler"): + """ + Habberdash command help + """ + + data = [first, *middle, last] + write_json("haberdash.json", data) + +@command(parameters=[CommandParameter("optional", positional=True)]) +def balderdash(required, optional="malarkey", other="rubbish", **extra_kwargs): + """ + Balderdash command help + """ + + data = [required, optional, other] + write_json("balderdash.json", data) + +@command +def splasher(): + write_json("splasher.json", [1]) + +@command +def dasher(alpha, beta=123): + pass + +@command(passthrough=True) +def dancer(gamma, omega="abc", passthrough_args=[]): + write_json("dancer.json", passthrough_args) + +# Vixen's parent calls prancer. We are testing to ensure the extended +# prancer (below) is executed. + +from plano._tests import prancer, vixen + +@command(parent=prancer) +def prancer(): + parent() + + notice("Extended prancer") + + write_json("prancer.json", True) + +@command(parent=vixen) +def vixen(): + parent() + +@command +def no_parent(): + parent() + +@command(parameters=[CommandParameter("spinach")]) +def feta(*args, **kwargs): + write_json("feta.json", kwargs["spinach"]) + +@command(hidden=True) +def invisible(something="nothing"): + write_json("invisible.json", something) diff --git a/subrepos/skewer/subrepos/plano/src/plano/_testproject/src/chucker/__init__.py b/subrepos/skewer/subrepos/plano/src/plano/_testproject/src/chucker/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/subrepos/skewer/subrepos/plano/src/plano/_testproject/src/chucker/tests.py b/subrepos/skewer/subrepos/plano/src/plano/_testproject/src/chucker/tests.py new file mode 100644 index 0000000..a556cc8 --- /dev/null +++ b/subrepos/skewer/subrepos/plano/src/plano/_testproject/src/chucker/tests.py @@ -0,0 +1,59 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from plano import * + +@test +def hello(): + print("Hello") + +@test +async def hello_async(): + print("Hello") + +@test +def goodbye(): + print("Goodbye") + +@test(disabled=True) +def badbye(): + print("Badbye") + assert False + +@test(disabled=True) +def skipped(): + skip_test("Skipped") + assert False + +@test(disabled=True) +def keyboard_interrupt(): + raise KeyboardInterrupt() + +@test(disabled=True, timeout=0.05) +def timeout(): + sleep(10, quiet=True) + assert False + +@test(disabled=True) +def process_error(): + run("expr 1 / 0") + +@test(disabled=True) +def system_exit_(): + exit(1) diff --git a/subrepos/skewer/subrepos/plano/src/plano/_tests.py b/subrepos/skewer/subrepos/plano/src/plano/_tests.py new file mode 100644 index 0000000..96ee35f --- /dev/null +++ b/subrepos/skewer/subrepos/plano/src/plano/_tests.py @@ -0,0 +1,1213 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import getpass as _getpass +import os as _os +import signal as _signal +import socket as _socket +import sys as _sys +import threading as _threading + +try: + import http.server as _http +except ImportError: # pragma: nocover + import BaseHTTPServer as _http + +from .test import * + +test_project_dir = join(get_parent_dir(__file__), "_testproject") + +class test_project(working_dir): + def __enter__(self): + dir = super(test_project, self).__enter__() + copy(test_project_dir, ".", inside=False) + return dir + +TINY_INTERVAL = 0.05 + +@test +def archive_operations(): + with working_dir(): + make_dir("some-dir") + touch("some-dir/some-file") + + make_archive("some-dir") + assert is_file("some-dir.tar.gz"), list_dir() + + extract_archive("some-dir.tar.gz", output_dir="some-subdir") + assert is_dir("some-subdir/some-dir") + assert is_file("some-subdir/some-dir/some-file") + + rename_archive("some-dir.tar.gz", "something-else") + assert is_file("something-else.tar.gz") + + extract_archive("something-else.tar.gz") + assert is_dir("something-else") + assert is_file("something-else/some-file") + +@test +def command_operations(): + class SomeCommand(BaseCommand): + def __init__(self): + self.parser = BaseArgumentParser() + self.parser.add_argument("--interrupt", action="store_true") + self.parser.add_argument("--explode", action="store_true") + + def parse_args(self, args): + return self.parser.parse_args(args) + + def init(self, args): + self.verbose = args.verbose + self.interrupt = args.interrupt + self.explode = args.explode + + def run(self): + if self.verbose: + print("Hello") + + if self.interrupt: + raise KeyboardInterrupt() + + if self.explode: + raise PlanoError("Exploded") + + SomeCommand().main([]) + SomeCommand().main(["--interrupt"]) + SomeCommand().main(["--debug"]) + + with expect_system_exit(): + SomeCommand().main(["--verbose", "--debug", "--explode"]) + +@test +def console_operations(): + eprint("Here's a story") + eprint("About a", "man named Brady") + + pprint(list_dir()) + pprint(PlanoProcess, 1, "abc", end="\n\n") + + flush() + + with console_color("red"): + print("ALERT") + + print(cformat("AMBER ALERT", color="yellow")) + print(cformat("NO ALERT")) + + cprint("CRITICAL ALERT", color="red", bright=True) + +@test +def dir_operations(): + with working_dir(): + test_dir = make_dir("some-dir") + test_file_1 = touch(join(test_dir, "some-file-1")) + test_file_2 = touch(join(test_dir, "some-file-2")) + + result = list_dir(test_dir) + assert join(test_dir, result[0]) == test_file_1, (join(test_dir, result[0]), test_file_1) + + result = list_dir(test_dir, "*-file-1") + assert result == ["some-file-1"], (result, ["some-file-1"]) + + result = list_dir(test_dir, exclude="*-file-1") + assert result == ["some-file-2"], (result, ["some-file-2"]) + + result = list_dir("some-dir", "*.not-there") + assert result == [], result + + with working_dir(): + result = list_dir() + assert result == [], result + + result = find(test_dir) + assert result == [test_file_1, test_file_2], (result, [test_file_1, test_file_2]) + + result = find(test_dir, include="*-file-1") + assert result == [test_file_1], (result, [test_file_1]) + + result = find(test_dir, exclude="*-file-1") + assert result == [test_file_2], (result, [test_file_2]) + + with working_dir(): + result = find() + assert result == [], result + + make_dir("subdir") + + result = find("./subdir") + assert result == [], result + + with working_dir(): + with working_dir("a-dir", quiet=True): + touch("a-file") + + curr_dir = get_current_dir() + prev_dir = change_dir("a-dir") + new_curr_dir = get_current_dir() + new_prev_dir = change_dir(curr_dir) + + assert curr_dir == prev_dir, (curr_dir, prev_dir) + assert new_curr_dir == new_prev_dir, (new_curr_dir, new_prev_dir) + +@test +def env_operations(): + result = join_path_var("a", "b", "c", "a") + assert result == _os.pathsep.join(("a", "b", "c")), result + + curr_dir = get_current_dir() + + with working_dir("."): + assert get_current_dir() == curr_dir, (get_current_dir(), curr_dir) + + result = get_home_dir() + assert result == _os.path.expanduser("~"), (result, _os.path.expanduser("~")) + + result = get_home_dir("alice") + assert result.endswith("alice"), result + + user = _getpass.getuser() + result = get_user() + assert result == user, (result, user) + + result = get_hostname() + assert result, result + + result = get_program_name() + assert result, result + + result = get_program_name("alpha beta") + assert result == "alpha", result + + result = get_program_name("X=Y alpha beta") + assert result == "alpha", result + + result = which("echo") + assert result, result + + with working_env(YES_I_AM_SET=1): + check_env("YES_I_AM_SET") + + with expect_error(): + check_env("NO_I_AM_NOT") + + with working_env(I_AM_SET_NOW=1, amend=False): + check_env("I_AM_SET_NOW") + assert "YES_I_AM_SET" not in ENV, ENV + + with working_env(SOME_VAR=1): + assert ENV["SOME_VAR"] == "1", ENV.get("SOME_VAR") + + with working_env(SOME_VAR=2): + assert ENV["SOME_VAR"] == "2", ENV.get("SOME_VAR") + + with expect_error(): + check_program("not-there") + + with expect_error(): + check_module("not_there") + + with expect_output(contains="ARGS:") as out: + with open(out, "w") as f: + print_env(file=f) + +@test +def file_operations(): + with working_dir(): + alpha_dir = make_dir("alpha-dir") + alpha_file = touch(join(alpha_dir, "alpha-file")) + alpha_link = make_link(join(alpha_dir, "alpha-file-link"), "alpha-file") + alpha_broken_link = make_link(join(alpha_dir, "broken-link"), "no-such-file") + + beta_dir = make_dir("beta-dir") + beta_file = touch(join(beta_dir, "beta-file")) + beta_link = make_link(join(beta_dir, "beta-file-link"), "beta-file") + beta_broken_link = make_link(join(beta_dir, "broken-link"), join("..", alpha_dir, "no-such-file")) + beta_another_link = make_link(join(beta_dir, "broken-link"), join("..", alpha_dir, "alpha-file-link")) + + assert exists(beta_link) + assert exists(beta_file) + + with working_dir("beta-dir"): + assert is_file(read_link("beta-file-link")) + + copied_file = copy(alpha_file, beta_dir) + assert copied_file == join(beta_dir, "alpha-file"), copied_file + assert is_file(copied_file), list_dir(beta_dir) + + copied_link = copy(beta_link, join(beta_dir, "beta-file-link-copy")) + assert copied_link == join(beta_dir, "beta-file-link-copy"), copied_link + assert is_link(copied_link), list_dir(beta_dir) + + copied_dir = copy(alpha_dir, beta_dir) + assert copied_dir == join(beta_dir, "alpha-dir"), copied_dir + assert is_link(join(copied_dir, "alpha-file-link")) + + moved_file = move(beta_file, alpha_dir) + assert moved_file == join(alpha_dir, "beta-file"), moved_file + assert is_file(moved_file), list_dir(alpha_dir) + assert not exists(beta_file), list_dir(beta_dir) + + moved_dir = move(beta_dir, alpha_dir) + assert moved_dir == join(alpha_dir, "beta-dir"), moved_dir + assert is_dir(moved_dir), list_dir(alpha_dir) + assert not exists(beta_dir) + + gamma_dir = make_dir("gamma-dir") + gamma_file = touch(join(gamma_dir, "gamma-file")) + + delta_dir = make_dir("delta-dir") + delta_file = touch(join(delta_dir, "delta-file")) + + copy(gamma_dir, delta_dir, inside=False) + assert is_file(join("delta-dir", "gamma-file")) + + move(gamma_dir, delta_dir, inside=False) + assert is_file(join("delta-dir", "gamma-file")) + assert not exists(gamma_dir) + + epsilon_dir = make_dir("epsilon-dir") + epsilon_file_1 = touch(join(epsilon_dir, "epsilon-file-1")) + epsilon_file_2 = touch(join(epsilon_dir, "epsilon-file-2")) + epsilon_file_3 = touch(join(epsilon_dir, "epsilon-file-3")) + epsilon_file_4 = touch(join(epsilon_dir, "epsilon-file-4")) + + remove("not-there") + + remove(epsilon_file_2) + assert not exists(epsilon_file_2) + + remove(epsilon_dir) + assert not exists(epsilon_file_1) + assert not exists(epsilon_dir) + + remove([epsilon_file_3, epsilon_file_4]) + assert not exists(epsilon_file_3) + assert not exists(epsilon_file_4) + + file = write("xes", "x" * 10) + result = get_file_size(file) + assert result == 10, result + +@test +def http_operations(): + class Handler(_http.BaseHTTPRequestHandler): + def do_GET(self): + self.send_response(200) + self.end_headers() + self.wfile.write(b"[1]") + + def do_POST(self): + length = int(self.headers["content-length"]) + content = self.rfile.read(length) + + self.send_response(200) + self.end_headers() + self.wfile.write(content) + + def do_PUT(self): + length = int(self.headers["content-length"]) + content = self.rfile.read(length) + + self.send_response(200) + self.end_headers() + + class ServerThread(_threading.Thread): + def __init__(self, server): + _threading.Thread.__init__(self) + self.server = server + + def run(self): + self.server.serve_forever() + + host, port = "localhost", get_random_port() + url = "http://{}:{}".format(host, port) + + try: + server = _http.HTTPServer((host, port), Handler) + except (OSError, PermissionError): # pragma: nocover + # Try one more time + port = get_random_port() + server = _http.HTTPServer((host, port), Handler) + + server_thread = ServerThread(server) + server_thread.start() + + try: + with working_dir(): + result = http_get(url) + assert result == "[1]", result + + result = http_get(url, insecure=True) + assert result == "[1]", result + + result = http_get(url, output_file="a") + output = read("a") + assert result is None, result + assert output == "[1]", output + + result = http_get_json(url) + assert result == [1], result + + file_b = write("b", "[2]") + + result = http_post(url, read(file_b), insecure=True) + assert result == "[2]", result + + result = http_post(url, read(file_b), output_file="x") + output = read("x") + assert result is None, result + assert output == "[2]", output + + result = http_post_file(url, file_b) + assert result == "[2]", result + + result = http_post_json(url, parse_json(read(file_b))) + assert result == [2], result + + file_c = write("c", "[3]") + + result = http_put(url, read(file_c), insecure=True) + assert result is None, result + + result = http_put_file(url, file_c) + assert result is None, result + + result = http_put_json(url, parse_json(read(file_c))) + assert result is None, result + finally: + server.shutdown() + server.server_close() + server_thread.join() + +@test +def io_operations(): + with working_dir(): + input_ = "some-text\n" + file_a = write("a", input_) + output = read(file_a) + + assert input_ == output, (input_, output) + + pre_input = "pre-some-text\n" + post_input = "post-some-text\n" + + prepend(file_a, pre_input) + append(file_a, post_input) + + output = tail(file_a, 100) + tailed = tail(file_a, 1) + + assert output.startswith(pre_input), (output, pre_input) + assert output.endswith(post_input), (output, post_input) + assert tailed == post_input, (tailed, post_input) + + input_lines = [ + "alpha\n", + "beta\n", + "gamma\n", + "chi\n", + "psi\n", + "omega\n", + ] + + file_b = write_lines("b", input_lines) + output_lines = read_lines(file_b) + + assert input_lines == output_lines, (input_lines, output_lines) + + pre_lines = ["pre-alpha\n"] + post_lines = ["post-omega\n"] + + prepend_lines(file_b, pre_lines) + append_lines(file_b, post_lines) + + output_lines = tail_lines(file_b, 100) + tailed_lines = tail_lines(file_b, 1) + + assert output_lines[0] == pre_lines[0], (output_lines[0], pre_lines[0]) + assert output_lines[-1] == post_lines[0], (output_lines[-1], post_lines[0]) + assert tailed_lines[0] == post_lines[0], (tailed_lines[0], post_lines[0]) + + file_c = touch("c") + assert is_file(file_c), file_c + + file_d = write("d", "front@middle@@middle@back") + path = replace_in_file(file_d, "@middle@", "M", count=1) + result = read(path) + assert result == "frontM@middle@back", result + + file_e = write("e", "123") + file_f = write("f", "456") + path = concatenate("g", (file_e, "not-there", file_f)) + result = read(path) + assert result == "123456", result + +@test +def iterable_operations(): + result = unique([1, 1, 1, 2, 2, 3]) + assert result == [1, 2, 3], result + + result = skip([1, "", 2, None, 3]) + assert result == [1, 2, 3], result + + result = skip([1, "", 2, None, 3], 2) + assert result == [1, "", None, 3], result + +@test +def json_operations(): + with working_dir(): + input_data = { + "alpha": [1, 2, 3], + } + + file_a = write_json("a", input_data) + output_data = read_json(file_a) + + assert input_data == output_data, (input_data, output_data) + + json = read(file_a) + parsed_data = parse_json(json) + emitted_json = emit_json(input_data) + + assert input_data == parsed_data, (input_data, parsed_data) + assert json == emitted_json, (json, emitted_json) + +@test +def link_operations(): + with working_dir(): + make_dir("some-dir") + path = get_absolute_path(touch("some-dir/some-file")) + + with working_dir("another-dir"): + link = make_link("a-link", path) + linked_path = read_link(link) + assert linked_path.endswith(path), (linked_path, path) + +@test +def logging_operations(): + error("Error!") + warn("Warning!") + notice("Take a look!") + notice(123) + debug("By the way") + debug("abc{}{}{}", 1, 2, 3) + + with expect_exception(RuntimeError): + fail(RuntimeError("Error!")) + + with expect_error(): + fail("Error!") + + for level in ("debug", "notice", "warn", "error"): + with expect_output(contains="Hello") as out: + with logging_disabled(): + with logging_enabled(level=level, output=out): + log(level, "hello") + + with expect_output(equals="") as out: + with logging_enabled(output=out): + with logging_disabled(): + error("Yikes") + +@test +def path_operations(): + abspath = _os.path.abspath + normpath = _os.path.normpath + + with working_dir("/"): + result = get_current_dir() + expect = abspath(_os.sep) + assert result == expect, (result, expect) + + path = "a/b/c" + result = get_absolute_path(path) + expect = join(get_current_dir(), path) + assert result == expect, (result, expect) + + path = "/x/y/z" + result = get_absolute_path(path) + expect = abspath(path) + assert result == expect, (result, expect) + + path = "/x/y/z" + assert is_absolute(path) + + path = "x/y/z" + assert not is_absolute(path) + + path = "a//b/../c/" + result = normalize_path(path) + expect = normpath("a/c") + assert result == expect, (result, expect) + + path = "/a/../c" + result = get_real_path(path) + expect = abspath("/c") + assert result == expect, (result, expect) + + path = abspath("/a/b") + result = get_relative_path(path, "/a/c") + expect = normpath("../b") + assert result == expect, (result, expect) + + path = abspath("/a/b") + result = get_file_url(path) + expect = "file:{}".format(path) + assert result == expect, (result, expect) + + with working_dir(): + result = get_file_url("afile") + expect = join(get_file_url(get_current_dir()), "afile") + assert result == expect, (result, expect) + + path = "/alpha/beta.ext" + path_split = "/alpha", "beta.ext" + path_split_extension = "/alpha/beta", ".ext" + name_split_extension = "beta", ".ext" + + result = join(*path_split) + expect = normpath(path) + assert result == expect, (result, expect) + + result = split(path) + expect = normpath(path_split[0]), normpath(path_split[1]) + assert result == expect, (result, expect) + + result = split_extension(path) + expect = normpath(path_split_extension[0]), normpath(path_split_extension[1]) + assert result == expect, (result, expect) + + result = get_parent_dir(path) + expect = normpath(path_split[0]) + assert result == expect, (result, expect) + + result = get_base_name(path) + expect = normpath(path_split[1]) + assert result == expect, (result, expect) + + result = get_name_stem(path) + expect = normpath(name_split_extension[0]) + assert result == expect, (result, expect) + + result = get_name_stem("alpha.tar.gz") + expect = "alpha" + assert result == expect, (result, expect) + + result = get_name_extension(path) + expect = normpath(name_split_extension[1]) + assert result == expect, (result, expect) + + with working_dir(): + touch("adir/afile") + + check_exists("adir") + check_exists("adir/afile") + check_dir("adir") + check_file("adir/afile") + + with expect_error(): + check_exists("adir/notafile") + + with expect_error(): + check_file("adir/notafile") + + with expect_error(): + check_file("adir") + + with expect_error(): + check_dir("not-there") + + with expect_error(): + check_dir("adir/afile") + + await_exists("adir/afile") + + if not WINDOWS: + with expect_timeout(): + await_exists("adir/notafile", timeout=TINY_INTERVAL) + +@test +def port_operations(): + result = get_random_port() + assert result >= 49152 and result <= 65535, result + + server_port = get_random_port() + server_socket = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM) + + try: + try: + server_socket.bind(("localhost", server_port)) + except (OSError, PermissionError): # pragma: nocover + # Try one more time + server_port = get_random_port() + server_socket.bind(("localhost", server_port)) + + server_socket.listen(5) + + await_port(server_port) + await_port(str(server_port)) + + check_port(server_port) + + # Non-Linux platforms don't seem to produce the expected + # error. + if LINUX: + with expect_error(): + get_random_port(min=server_port, max=server_port) + finally: + server_socket.close() + + if not WINDOWS: + with expect_timeout(): + await_port(get_random_port(), timeout=TINY_INTERVAL) + +@test +def process_operations(): + result = get_process_id() + assert result, result + + proc = run("date") + assert proc is not None, proc + + print(repr(proc)) + + run("date", stash=True) + + proc = run(["echo", "hello"], check=False) + assert proc.exit_code == 0, proc.exit_code + + proc = run("cat /uh/uh", check=False) + assert proc.exit_code > 0, proc.exit_code + + with expect_output() as out: + run("date", output=out) + + run("date", output=DEVNULL) + run("date", stdin=DEVNULL) + run("date", stdout=DEVNULL) + run("date", stderr=DEVNULL) + + run("echo hello", quiet=True) + run("echo hello | cat", shell=True) + run(["echo", "hello"], shell=True) + + with expect_error(): + run("/not/there") + + with expect_error(): + run("cat /whoa/not/really", stash=True) + + result = call("echo hello").strip() + expect = "hello" + assert result == expect, (result, expect) + + result = call("echo hello | cat", shell=True).strip() + expect = "hello" + assert result == expect, (result, expect) + + with expect_error(): + call("cat /whoa/not/really") + + proc = start("sleep 10") + + if not WINDOWS: + with expect_timeout(): + wait(proc, timeout=TINY_INTERVAL) + + proc = start("echo hello") + sleep(TINY_INTERVAL) + stop(proc) + + proc = start("sleep 10") + stop(proc) + + proc = start("sleep 10") + kill(proc) + sleep(TINY_INTERVAL) + stop(proc) + + proc = start("date --not-there") + sleep(TINY_INTERVAL) + stop(proc) + + with start("sleep 10"): + sleep(TINY_INTERVAL) + + with working_dir(): + touch("i") + + with start("date", stdin="i", stdout="o", stderr="e"): + pass + + with expect_system_exit(): + exit() + + with expect_system_exit(): + exit(verbose=True) + + with expect_system_exit(): + exit("abc") + + with expect_system_exit(): + exit("abc", verbose=True) + + with expect_system_exit(): + exit(Exception()) + + with expect_system_exit(): + exit(Exception(), verbose=True) + + with expect_system_exit(): + exit(123) + + with expect_system_exit(): + exit(123, verbose=True) + + with expect_system_exit(): + exit(-123) + + with expect_exception(PlanoException): + exit(object()) + +@test +def string_operations(): + result = replace("ab", "a", "b") + assert result == "bb", result + + result = replace("aba", "a", "b", count=1) + assert result == "bba", result + + result = remove_prefix(None, "xxx") + assert result == "", result + + result = remove_prefix("anterior", "ant") + assert result == "erior", result + + result = remove_prefix("anterior", "ext") + assert result == "anterior", result + + result = remove_suffix(None, "xxx") + assert result == "", result + + result = remove_suffix("exterior", "ior") + assert result == "exter", result + + result = remove_suffix("exterior", "nal") + assert result == "exterior" + + result = shorten("abc", 2) + assert result == "ab", result + + result = shorten("abc", None) + assert result == "abc", result + + result = shorten("abc", 10) + assert result == "abc", result + + result = shorten("ellipsis", 6, ellipsis="...") + assert result == "ell...", result + + result = shorten(None, 6) + assert result == "", result + + result = plural(None) + assert result == "", result + + result = plural("") + assert result == "", result + + result = plural("test") + assert result == "tests", result + + result = plural("test", 1) + assert result == "test", result + + result = plural("bus") + assert result == "busses", result + + result = plural("bus", 1) + assert result == "bus", result + + result = plural("terminus", 2, "termini") + assert result == "termini", result + + result = capitalize(None) + assert result == "", result + + result = capitalize("") + assert result == "", result + + result = capitalize("hello, Frank") + assert result == "Hello, Frank", result + + encoded_result = base64_encode(b"abc") + decoded_result = base64_decode(encoded_result) + assert decoded_result == b"abc", decoded_result + + encoded_result = url_encode("abc=123&yeah!") + decoded_result = url_decode(encoded_result) + assert decoded_result == "abc=123&yeah!", decoded_result + +@test +def temp_operations(): + system_temp_dir = get_system_temp_dir() + + result = make_temp_file() + assert result.startswith(system_temp_dir), result + + result = make_temp_file(suffix=".txt") + assert result.endswith(".txt"), result + + result = make_temp_dir() + assert result.startswith(system_temp_dir), result + + with temp_dir() as d: + assert is_dir(d), d + list_dir(d) + + with temp_file() as f: + assert is_file(f), f + write(f, "test") + + with working_dir() as d: + assert is_dir(d), d + list_dir(d) + + user_temp_dir = get_user_temp_dir() + assert user_temp_dir, user_temp_dir + + ENV.pop("XDG_RUNTIME_DIR", None) + + user_temp_dir = get_user_temp_dir() + assert user_temp_dir, user_temp_dir + +@test +def test_operations(): + with test_project(): + with working_module_path("src"): + import chucker + import chucker.tests + + print_tests(chucker.tests) + + for verbose in (False, True): + # Module 'chucker' has no tests + with expect_error(): + run_tests(chucker, verbose=verbose) + + run_tests(chucker.tests, verbose=verbose) + run_tests(chucker.tests, exclude="*hello*", verbose=verbose) + run_tests(chucker.tests, enable="skipped", verbose=verbose) + + with expect_error(): + run_tests(chucker.tests, enable="skipped", unskip="*skipped*", verbose=verbose) + + with expect_error(): + run_tests(chucker.tests, enable="*badbye*", verbose=verbose) + + with expect_error(): + run_tests(chucker.tests, enable="*badbye*", fail_fast=True, verbose=verbose) + + with expect_exception(KeyboardInterrupt): + run_tests(chucker.tests, enable="keyboard-interrupt", verbose=verbose) + + with expect_error(): + run_tests(chucker.tests, enable="timeout", verbose=verbose) + + with expect_error(): + run_tests(chucker.tests, enable="process-error", verbose=verbose) + + with expect_error(): + run_tests(chucker.tests, enable="system-exit", verbose=verbose) + + with expect_system_exit(): + PlanoTestCommand().main(["--module", "nosuchmodule"]) + + def run_command(*args): + PlanoTestCommand(chucker.tests).main(args) + + run_command("--verbose") + run_command("--list") + + with expect_system_exit(): + run_command("--enable", "*badbye*") + + with expect_system_exit(): + run_command("--enable", "*badbye*", "--verbose") + + try: + with expect_exception(): + pass + raise Exception() # pragma: nocover + except AssertionError: + pass + + with expect_output(equals="abc123", contains="bc12", startswith="abc", endswith="123") as out: + write(out, "abc123") + +@test +def time_operations(): + start_time = get_time() + + sleep(TINY_INTERVAL) + + assert get_time() - start_time > TINY_INTERVAL + + with expect_system_exit(): + with start("sleep 10"): + from plano import _default_sigterm_handler + _default_sigterm_handler(_signal.SIGTERM, None) + + result = format_duration(0.1) + assert result == "0.1s", result + + result = format_duration(1) + assert result == "1s", result + + result = format_duration(1, align=True) + assert result == "1.0s", result + + result = format_duration(60) + assert result == "60s", result + + result = format_duration(3600) + assert result == "1h", result + + with Timer() as timer: + sleep(TINY_INTERVAL) + assert timer.elapsed_time > TINY_INTERVAL + + assert timer.elapsed_time > TINY_INTERVAL + + if not WINDOWS: + with expect_timeout(): + with Timer(timeout=TINY_INTERVAL) as timer: + sleep(10) + +@test +def unique_id_operations(): + id1 = get_unique_id() + id2 = get_unique_id() + + assert id1 != id2, (id1, id2) + + result = get_unique_id(1) + assert len(result) == 2 + + result = get_unique_id(16) + assert len(result) == 32 + +@test +def value_operations(): + result = nvl(None, "a") + assert result == "a", result + + result = nvl("b", "a") + assert result == "b", result + + assert is_string("a") + assert not is_string(1) + + for value in (None, "", (), [], {}): + assert is_empty(value), value + + for value in (object(), " ", (1,), [1], {"a": 1}): + assert not is_empty(value), value + + result = pformat({"z": 1, "a": 2}) + assert result == "{'a': 2, 'z': 1}", result + + result = format_empty((), "[nothing]") + assert result == "[nothing]", result + + result = format_empty((1,), "[nothing]") + assert result == (1,), result + + result = format_not_empty("abc", "[{}]") + assert result == "[abc]", result + + result = format_not_empty({}, "[{}]") + assert result == {}, result + + result = format_repr(Namespace(a=1, b=2), limit=1) + assert result == "Namespace(a=1)", result + + result = Namespace(a=1, b=2) + assert result.a == 1, result + assert result.b == 2, result + assert "a" in result, result + assert "c" not in result, result + repr(result) + + other = Namespace(a=1, b=2, c=3) + assert result != other, (result, other) + +@test +def yaml_operations(): + try: + import yaml as _yaml + except ImportError: # pragma: nocover + raise PlanoTestSkipped("PyYAML is not available") + + with working_dir(): + input_data = { + "alpha": [1, 2, 3], + } + + file_a = write_yaml("a", input_data) + output_data = read_yaml(file_a) + + assert input_data == output_data, (input_data, output_data) + + yaml = read(file_a) + parsed_data = parse_yaml(yaml) + emitted_yaml = emit_yaml(input_data) + + assert input_data == parsed_data, (input_data, parsed_data) + assert yaml == emitted_yaml, (yaml, emitted_yaml) + +@command +def prancer(): + notice("Base prancer") + +@command +def vixen(): + prancer() + +@test +def plano_command(): + with working_dir(): + PlanoCommand().main([]) + + PlanoCommand(_sys.modules[__name__]).main([]) + + PlanoCommand().main(["-m", "plano.test"]) + + with expect_system_exit(): + PlanoCommand().main(["-m", "nosuchmodule"]) + + with working_dir(): + write(".plano.py", "garbage") + + with expect_system_exit(): + PlanoCommand().main([]) + + with expect_system_exit(): + PlanoCommand().main(["-f", "no-such-file"]) + + def run_command(*args): + PlanoCommand().main(["-f", test_project_dir] + list(args)) + + with test_project(): + run_command() + run_command("--help") + run_command("--quiet") + run_command("--init-only") + + with expect_system_exit(): + run_command("no-such-command") + + with expect_system_exit(): + run_command("no-such-command", "--help") + + with expect_system_exit(): + run_command("--help", "no-such-command") + + run_command("extended-command", "a", "b", "--omega", "z") + + with expect_system_exit(): + run_command("echo") + + with expect_exception(contains="Trouble"): + run_command("echo", "Hello", "--trouble") + + run_command("echo", "Hello", "--count", "5") + + run_command("echoecho", "Greetings") + + with expect_system_exit(): + run_command("echo", "Hello", "--count", "not-an-int") + + run_command("haberdash", "ballcap", "fedora", "hardhat", "--last", "turban") + result = read_json("haberdash.json") + assert result == ["ballcap", "fedora", "hardhat", "turban"], result + + run_command("haberdash", "ballcap", "--last", "turban") + result = read_json("haberdash.json") + assert result == ["ballcap", "turban"], result + + run_command("haberdash", "ballcap") + result = read_json("haberdash.json") + assert result == ["ballcap", "bowler"], result + + run_command("balderdash", "bunk", "poppycock") + result = read_json("balderdash.json") + assert result == ["bunk", "poppycock", "rubbish"], result + + run_command("balderdash", "bunk") + result = read_json("balderdash.json") + assert result == ["bunk", "malarkey", "rubbish"], result + + run_command("balderdash", "bunk", "--other", "bollocks") + result = read_json("balderdash.json") + assert result == ["bunk", "malarkey", "bollocks"], result + + run_command("splasher,balderdash", "claptrap") + result = read_json("splasher.json") + assert result == [1], result + result = read_json("balderdash.json") + assert result == ["claptrap", "malarkey", "rubbish"], result + + with expect_system_exit(): + run_command("no-such-command,splasher") + + with expect_system_exit(): + run_command("splasher,no-such-command-nope") + + run_command("dasher", "alpha", "--beta", "123") + + # Gamma is an unexpected arg + with expect_system_exit(): + run_command("dasher", "alpha", "--gamma", "123") + + # Args after "xyz" are extra passthrough args + run_command("dancer", "gamma", "--omega", "xyz", "extra1", "--extra2", "extra3") + result = read_json("dancer.json") + assert result == ["extra1", "--extra2", "extra3"], result + + # Ensure indirect calls (through parent commands) are specialized + run_command("vixen") + assert exists("prancer.json") + + with expect_system_exit(): + run_command("no-parent") + + run_command("feta", "--spinach", "oregano") + result = read_json("feta.json") + assert result == "oregano" + + run_command("invisible") + result = read_json("invisible.json") + assert result == "nothing" + +def main(): + PlanoTestCommand(_sys.modules[__name__]).main() + +if __name__ == "__main__": # pragma: nocover + main() diff --git a/subrepos/skewer/subrepos/plano/src/plano/command.py b/subrepos/skewer/subrepos/plano/src/plano/command.py new file mode 100644 index 0000000..c5d3308 --- /dev/null +++ b/subrepos/skewer/subrepos/plano/src/plano/command.py @@ -0,0 +1,513 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from .main import * + +import argparse as _argparse +import importlib as _importlib +import inspect as _inspect +import os as _os +import sys as _sys +import traceback as _traceback + +class BaseCommand: + def main(self, args=None): + if args is None: + args = ARGS[1:] + + args = self.parse_args(args) + + assert isinstance(args, _argparse.Namespace), args + + self.verbose = args.verbose or args.debug + self.quiet = args.quiet + self.debug_enabled = args.debug + self.init_only = args.init_only + + level = "notice" + + if self.verbose: + level = "info" + + if self.quiet: + level = "error" + + if self.debug_enabled: + level = "debug" + + with logging_enabled(level=level): + try: + self.init(args) + + if self.init_only: + return + + self.run() + except KeyboardInterrupt: + pass + except PlanoError as e: + if self.debug_enabled: + _traceback.print_exc() + exit(1) + else: + exit(str(e)) + + def parse_args(self, args): # pragma: nocover + raise NotImplementedError() + + def init(self, args): # pragma: nocover + pass + + def run(self): # pragma: nocover + raise NotImplementedError() + +class BaseArgumentParser(_argparse.ArgumentParser): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + self.allow_abbrev = False + self.formatter_class = _argparse.RawDescriptionHelpFormatter + + self.add_argument("--verbose", action="store_true", + help="Print detailed logging to the console") + self.add_argument("--quiet", action="store_true", + help="Print no logging to the console") + self.add_argument("--debug", action="store_true", + help="Print debugging output to the console") + self.add_argument("--init-only", action="store_true", + help=_argparse.SUPPRESS) + + _capitalize_help(self) + +_plano_command = None + +class PlanoCommand(BaseCommand): + def __init__(self, module=None, description="Run commands defined as Python functions", epilog=None): + self.module = module + self.bound_commands = dict() + self.running_commands = list() + self.passthrough_args = None + + assert self.module is None or _inspect.ismodule(self.module), self.module + + self.pre_parser = BaseArgumentParser(description=description, add_help=False) + self.pre_parser.add_argument("-h", "--help", action="store_true", + help="Show this help message and exit") + + if self.module is None: + self.pre_parser.add_argument("-f", "--file", help="Load commands from FILE (default '.plano.py')") + self.pre_parser.add_argument("-m", "--module", help="Load commands from MODULE") + + self.parser = _argparse.ArgumentParser(parents=(self.pre_parser,), + description=description, epilog=epilog, + add_help=False, allow_abbrev=False) + + # This is intentionally added after self.pre_parser is passed + # as parent to self.parser, since it is used only in the + # preliminary parsing. + self.pre_parser.add_argument("command", nargs="?", help=_argparse.SUPPRESS) + + global _plano_command + _plano_command = self + + def parse_args(self, args): + pre_args, _ = self.pre_parser.parse_known_args(args) + + if self.module is None: + if pre_args.module is None: + self.module = self._load_file(pre_args.file) + else: + self.module = self._load_module(pre_args.module) + + if self.module is not None: + self._bind_commands(self.module) + + self._process_commands() + + self.preceding_commands = list() + + if pre_args.command is not None and "," in pre_args.command: + names = pre_args.command.split(",") + + for name in names[:-1]: + try: + self.preceding_commands.append(self.bound_commands[name]) + except KeyError: + self.parser.error(f"Command '{name}' is unknown") + + args[args.index(pre_args.command)] = names[-1] + + args, self.passthrough_args = self.parser.parse_known_args(args) + + return args + + def init(self, args): + self.help = args.help + + self.selected_command = None + self.command_args = list() + self.command_kwargs = dict() + + if args.command is not None: + for command in self.preceding_commands: + command() + + self.selected_command = self.bound_commands[args.command] + + if not self.selected_command.passthrough and self.passthrough_args: + self.parser.error(f"unrecognized arguments: {' '.join(self.passthrough_args)}") + + for param in self.selected_command.parameters.values(): + if param.name == "passthrough_args": + continue + + if param.positional: + if param.multiple: + self.command_args.extend(getattr(args, param.name)) + else: + self.command_args.append(getattr(args, param.name)) + else: + self.command_kwargs[param.name] = getattr(args, param.name) + + if self.selected_command.passthrough: + self.command_kwargs["passthrough_args"] = self.passthrough_args + + def run(self): + if self.help or self.module is None or self.selected_command is None: + self.parser.print_help() + return + + with Timer() as timer: + self.selected_command(*self.command_args, **self.command_kwargs) + + cprint("OK", color="green", file=_sys.stderr, end="") + cprint(" ({})".format(format_duration(timer.elapsed_time)), color="magenta", file=_sys.stderr) + + def _load_module(self, name): + try: + return _importlib.import_module(name) + except ImportError: + exit("Module '{}' not found", name) + + def _load_file(self, path): + if path is not None and is_dir(path): + path = self._find_file(path) + + if path is not None and not is_file(path): + exit("File '{}' not found", path) + + if path is None: + path = self._find_file(get_current_dir()) + + if path is None: + return + + debug("Loading '{}'", path) + + _sys.path.insert(0, join(get_parent_dir(path), "python")) + + spec = _importlib.util.spec_from_file_location("_plano", path) + module = _importlib.util.module_from_spec(spec) + _sys.modules["_plano"] = module + + try: + spec.loader.exec_module(module) + except Exception as e: + error(e) + exit("Failure loading {}: {}", path, str(e)) + + return module + + def _find_file(self, dir): + # Planofile and .planofile remain temporarily for backward compatibility + for name in (".plano.py", "Planofile", ".planofile"): + path = join(dir, name) + + if is_file(path): + return path + + def _bind_commands(self, module): + for var in vars(module).values(): + if callable(var) and var.__class__.__name__ == "Command": + self.bound_commands[var.name] = var + + def _process_commands(self): + subparsers = self.parser.add_subparsers(title="commands", dest="command", metavar="{command}") + + for command in self.bound_commands.values(): + # This doesn't work yet, but in the future it might. + # https://bugs.python.org/issue22848 + # + # help = _argparse.SUPPRESS if command.hidden else command.help + + help = "[internal]" if command.hidden else command.help + add_help = False if command.passthrough else True + description = nvl(command.description, command.help) + + subparser = subparsers.add_parser(command.name, help=help, add_help=add_help, description=description, + formatter_class=_argparse.RawDescriptionHelpFormatter) + + for param in command.parameters.values(): + if param.positional: + if param.multiple: + subparser.add_argument(param.name, metavar=param.metavar, type=param.type, help=param.help, + nargs="*") + elif param.optional: + subparser.add_argument(param.name, metavar=param.metavar, type=param.type, help=param.help, + nargs="?", default=param.default) + else: + subparser.add_argument(param.name, metavar=param.metavar, type=param.type, help=param.help) + else: + flag_args = list() + + if param.short_option is not None: + flag_args.append("-{}".format(param.short_option)) + + flag_args.append("--{}".format(param.display_name)) + + help = param.help + + if param.default not in (None, False): + if help is None: + help = "Default value is {}".format(repr(param.default)) + else: + help += " (default {})".format(repr(param.default)) + + if param.default is False: + subparser.add_argument(*flag_args, dest=param.name, default=param.default, action="store_true", + help=help) + else: + subparser.add_argument(*flag_args, dest=param.name, default=param.default, + metavar=param.metavar, type=param.type, help=help) + + _capitalize_help(subparser) + +_command_help = { + "build": "Build artifacts from source", + "clean": "Clean up the source tree", + "dist": "Generate distribution artifacts", + "install": "Install the built artifacts on your system", + "test": "Run the tests", +} + +def command(_function=None, name=None, parameters=None, parent=None, passthrough=False, hidden=False): + class Command: + def __init__(self, function): + self.function = function + self.module = _inspect.getmodule(self.function) + + self.name = name + self.parent = parent + + if self.parent is None: + # Strip trailing underscores and convert remaining + # underscores to hyphens + default = self.function.__name__.rstrip("_").replace("_", "-") + + self.name = nvl(self.name, default) + self.parameters = self._process_parameters(parameters) + else: + assert parameters is None + + self.name = nvl(self.name, self.parent.name) + self.parameters = self.parent.parameters + + doc = _inspect.getdoc(self.function) + + if doc is None: + self.help = _command_help.get(self.name) + self.description = self.help + else: + self.help = doc.split("\n")[0] + self.description = doc + + if self.parent is not None: + self.help = nvl(self.help, self.parent.help) + self.description = nvl(self.description, self.parent.description) + + self.passthrough = passthrough + self.hidden = hidden + + debug("Defining {}", self) + + for param in self.parameters.values(): + debug(" {}", str(param).capitalize()) + + def __repr__(self): + return "command '{}:{}'".format(self.module.__name__, self.name) + + def _process_parameters(self, cparams): + # CommandParameter objects from the @command decorator + cparams_in = {x.name: x for x in nvl(cparams, ())} + cparams_out = dict() + + # Parameter objects from the function signature + sig = _inspect.signature(self.function) + sparams = list(sig.parameters.values()) + + if len(sparams) == 2 and sparams[0].name == "args" and sparams[1].name == "kwargs": + # Don't try to derive command parameters from *args and **kwargs + return cparams_in + + for sparam in sparams: + try: + cparam = cparams_in[sparam.name] + except KeyError: + cparam = CommandParameter(sparam.name) + + if sparam.kind is sparam.POSITIONAL_ONLY: # pragma: nocover + if sparam.positional is None: + cparam.positional = True + elif sparam.kind is sparam.POSITIONAL_OR_KEYWORD and sparam.default is sparam.empty: + if cparam.positional is None: + cparam.positional = True + elif sparam.kind is sparam.POSITIONAL_OR_KEYWORD and sparam.default is not sparam.empty: + cparam.optional = True + cparam.default = sparam.default + elif sparam.kind is sparam.VAR_POSITIONAL: + if cparam.positional is None: + cparam.positional = True + cparam.multiple = True + elif sparam.kind is sparam.VAR_KEYWORD: + continue + elif sparam.kind is sparam.KEYWORD_ONLY: + cparam.optional = True + cparam.default = sparam.default + else: # pragma: nocover + raise NotImplementedError(sparam.kind) + + if cparam.type is None and cparam.default not in (None, False): # XXX why false? + cparam.type = type(cparam.default) + + cparams_out[cparam.name] = cparam + + return cparams_out + + def __call__(self, *args, **kwargs): + from .command import _plano_command, PlanoCommand + assert isinstance(_plano_command, PlanoCommand), _plano_command + + app = _plano_command + command = app.bound_commands[self.name] + + if command is not self: + # The command bound to this name has been overridden. + # This happens when a parent command invokes a peer + # command that is overridden. + + command(*args, **kwargs) + + return + + debug("Running {} {} {}".format(self, args, kwargs)) + + app.running_commands.append(self) + + dashes = "--" * len(app.running_commands) + display_args = list(self._get_display_args(args, kwargs)) + + with console_color("magenta", file=_sys.stderr): + eprint("{}> {}".format(dashes, self.name), end="") + + if display_args: + eprint(" ({})".format(", ".join(display_args)), end="") + + eprint() + + self.function(*args, **kwargs) + + cprint("<{} {}".format(dashes, self.name), color="magenta", file=_sys.stderr) + + app.running_commands.pop() + + if app.running_commands: + name = app.running_commands[-1].name + + cprint("{}| {}".format(dashes[:-2], name), color="magenta", file=_sys.stderr) + + def _get_display_args(self, args, kwargs): + for i, param in enumerate(self.parameters.values()): + if param.positional: + if param.multiple: + for va in args[i:]: + yield repr(va) + elif param.optional: + value = args[i] + + if value == param.default: + continue + + yield repr(value) + else: + yield repr(args[i]) + else: + value = kwargs.get(param.name, param.default) + + if value == param.default: + continue + + if value in (True, False): + value = str(value).lower() + else: + value = repr(value) + + yield "{}={}".format(param.display_name, value) + + if _function is None: + return Command + else: + return Command(_function) + +def parent(*args, **kwargs): + try: + f_locals = _inspect.stack()[2].frame.f_locals + parent_fn = f_locals["self"].parent.function + except: + fail("Missing parent command") + + parent_fn(*args, **kwargs) + +class CommandParameter: + def __init__(self, name, display_name=None, type=None, metavar=None, help=None, short_option=None, default=None, positional=None): + self.name = name + self.display_name = nvl(display_name, self.name.replace("_", "-")) + self.type = type + self.metavar = nvl(metavar, self.display_name.upper()) + self.help = help + self.short_option = short_option + self.default = default + self.positional = positional + + self.optional = False + self.multiple = False + + def __repr__(self): + return "argument '{}' (default {})".format(self.name, repr(self.default)) + +# Patch the default help text +def _capitalize_help(parser): + try: + for action in parser._actions: + if action.help and action.help is not _argparse.SUPPRESS: + action.help = capitalize(action.help) + except: # pragma: nocover + pass + +def _main(): # pragma: nocover + PlanoCommand().main() diff --git a/subrepos/skewer/subrepos/plano/src/plano/main.py b/subrepos/skewer/subrepos/plano/src/plano/main.py new file mode 100644 index 0000000..c3daa15 --- /dev/null +++ b/subrepos/skewer/subrepos/plano/src/plano/main.py @@ -0,0 +1,1634 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import base64 as _base64 +import binascii as _binascii +import code as _code +import fnmatch as _fnmatch +import getpass as _getpass +import json as _json +import os as _os +import pprint as _pprint +import pkgutil as _pkgutil +import random as _random +import re as _re +import shlex as _shlex +import shutil as _shutil +import signal as _signal +import socket as _socket +import subprocess as _subprocess +import sys as _sys +import tempfile as _tempfile +import time as _time +import traceback as _traceback +import urllib as _urllib +import uuid as _uuid + +_max = max + +## Exceptions + +class PlanoException(Exception): + pass + +class PlanoError(PlanoException): + pass + +class PlanoTimeout(PlanoException): + pass + +## Global variables + +ENV = _os.environ +ARGS = _sys.argv + +STDIN = _sys.stdin +STDOUT = _sys.stdout +STDERR = _sys.stderr +DEVNULL = _os.devnull + +LINUX = _sys.platform == "linux" +WINDOWS = _sys.platform in ("win32", "cygwin") + +PLANO_DEBUG = "PLANO_DEBUG" in ENV + +## Archive operations + +def make_archive(input_dir, output_file=None, quiet=False): + """ + group: archive_operations + """ + + check_program("tar") + + archive_stem = get_base_name(input_dir) + + if output_file is None: + output_file = "{}.tar.gz".format(join(get_current_dir(), archive_stem)) + + _info(quiet, "Making archive {} from directory {}", repr(output_file), repr(input_dir)) + + with working_dir(get_parent_dir(input_dir)): + run("tar -czf temp.tar.gz {}".format(archive_stem)) + move("temp.tar.gz", output_file) + + return output_file + +def extract_archive(input_file, output_dir=None, quiet=False): + check_program("tar") + + if output_dir is None: + output_dir = get_current_dir() + + _info(quiet, "Extracting archive {} to directory {}", repr(input_file), repr(output_dir)) + + input_file = get_absolute_path(input_file) + + with working_dir(output_dir): + copy(input_file, "temp.tar.gz") + + try: + run("tar -xf temp.tar.gz") + finally: + remove("temp.tar.gz") + + return output_dir + +def rename_archive(input_file, new_archive_stem, quiet=False): + _info(quiet, "Renaming archive {} with stem {}", repr(input_file), repr(new_archive_stem)) + + output_dir = get_absolute_path(get_parent_dir(input_file)) + output_file = "{}.tar.gz".format(join(output_dir, new_archive_stem)) + + input_file = get_absolute_path(input_file) + + with working_dir(): + extract_archive(input_file) + + input_name = list_dir()[0] + input_dir = move(input_name, new_archive_stem) + + make_archive(input_dir, output_file=output_file) + + remove(input_file) + + return output_file + +## Console operations + +def flush(): + _sys.stdout.flush() + _sys.stderr.flush() + +def eprint(*args, **kwargs): + print(*args, file=_sys.stderr, **kwargs) + +def pprint(*args, **kwargs): + args = [pformat(x) for x in args] + print(*args, **kwargs) + +_color_codes = { + "black": "\u001b[30", + "red": "\u001b[31", + "green": "\u001b[32", + "yellow": "\u001b[33", + "blue": "\u001b[34", + "magenta": "\u001b[35", + "cyan": "\u001b[36", + "white": "\u001b[37", +} + +_color_reset = "\u001b[0m" + +def _get_color_code(color, bright): + elems = [_color_codes[color]] + + if bright: + elems.append(";1") + + elems.append("m") + + return "".join(elems) + +def _is_color_enabled(file): + return hasattr(file, "isatty") and file.isatty() + +class console_color: + def __init__(self, color=None, bright=False, file=_sys.stdout): + self.file = file + self.color_code = None + + if (color, bright) != (None, False): + self.color_code = _get_color_code(color, bright) + + self.enabled = self.color_code is not None and _is_color_enabled(self.file) + + def __enter__(self): + if self.enabled: + print(self.color_code, file=self.file, end="", flush=True) + + def __exit__(self, exc_type, exc_value, traceback): + if self.enabled: + print(_color_reset, file=self.file, end="", flush=True) + +def cformat(value, color=None, bright=False, file=_sys.stdout): + if (color, bright) != (None, False) and _is_color_enabled(file): + return "".join((_get_color_code(color, bright), value, _color_reset)) + else: + return value + +def cprint(*args, **kwargs): + color = kwargs.pop("color", "white") + bright = kwargs.pop("bright", False) + file = kwargs.get("file", _sys.stdout) + + with console_color(color, bright=bright, file=file): + print(*args, **kwargs) + +class output_redirected: + def __init__(self, output, quiet=False): + self.output = output + self.quiet = quiet + + def __enter__(self): + flush() + + _info(self.quiet, "Redirecting output to file {}", repr(self.output)) + + if is_string(self.output): + output = open(self.output, "w") + + self.prev_stdout, self.prev_stderr = _sys.stdout, _sys.stderr + _sys.stdout, _sys.stderr = output, output + + def __exit__(self, exc_type, exc_value, traceback): + flush() + + _sys.stdout, _sys.stderr = self.prev_stdout, self.prev_stderr + +try: + breakpoint +except NameError: # pragma: nocover + def breakpoint(): + import pdb + pdb.set_trace() + +def repl(locals): # pragma: nocover + _code.InteractiveConsole(locals=locals).interact() + +def print_properties(props, file=None): + size = max([len(x[0]) for x in props]) + + for prop in props: + name = "{}:".format(prop[0]) + template = "{{:<{}}} ".format(size + 1) + + print(template.format(name), prop[1], end="", file=file) + + for value in prop[2:]: + print(" {}".format(value), end="", file=file) + + print(file=file) + +## Directory operations + +def find(dirs=None, include="*", exclude=()): + if dirs is None: + dirs = "." + + if is_string(dirs): + dirs = (dirs,) + + if is_string(include): + include = (include,) + + if is_string(exclude): + exclude = (exclude,) + + found = set() + + for dir in dirs: + for root, dir_names, file_names in _os.walk(dir, followlinks=True): + names = dir_names + file_names + + for include_pattern in include: + names = _fnmatch.filter(names, include_pattern) + + for exclude_pattern in exclude: + for name in _fnmatch.filter(names, exclude_pattern): + names.remove(name) + + if root.startswith("./"): + root = remove_prefix(root, "./") + elif root == ".": + root = "" + + found.update([join(root, x) for x in names]) + + return sorted(found) + +def make_dir(dir, quiet=False): + if dir == "": + return dir + + if not exists(dir): + _info(quiet, "Making directory '{}'", dir) + _os.makedirs(dir) + + return dir + +def make_parent_dir(path, quiet=False): + return make_dir(get_parent_dir(path), quiet=quiet) + +# Returns the current working directory so you can change it back +def change_dir(dir, quiet=False): + _debug(quiet, "Changing directory to {}", repr(dir)) + + prev_dir = get_current_dir() + + if not dir: + return prev_dir + + _os.chdir(dir) + + return prev_dir + +def list_dir(dir=None, include="*", exclude=()): + if dir in (None, ""): + dir = get_current_dir() + + assert is_dir(dir), dir + + if is_string(include): + include = (include,) + + if is_string(exclude): + exclude = (exclude,) + + names = _os.listdir(dir) + + for include_pattern in include: + names = _fnmatch.filter(names, include_pattern) + + for exclude_pattern in exclude: + for name in _fnmatch.filter(names, exclude_pattern): + names.remove(name) + + return sorted(names) + +# No args constructor gets a temp dir +class working_dir: + def __init__(self, dir=None, quiet=False): + self.dir = dir + self.prev_dir = None + self.remove = False + self.quiet = quiet + + if self.dir is None: + self.dir = make_temp_dir() + self.remove = True + + def __enter__(self): + if self.dir == ".": + return + + _info(self.quiet, "Entering directory {}", repr(get_absolute_path(self.dir))) + + make_dir(self.dir, quiet=True) + + self.prev_dir = change_dir(self.dir, quiet=True) + + return self.dir + + def __exit__(self, exc_type, exc_value, traceback): + if self.dir == ".": + return + + _debug(self.quiet, "Returning to directory {}", repr(get_absolute_path(self.prev_dir))) + + change_dir(self.prev_dir, quiet=True) + + if self.remove: + remove(self.dir, quiet=True) + +## Environment operations + +def join_path_var(*paths): + return _os.pathsep.join(unique(skip(paths))) + +def get_current_dir(): + return _os.getcwd() + +def get_home_dir(user=None): + return _os.path.expanduser("~{}".format(user or "")) + +def get_user(): + return _getpass.getuser() + +def get_hostname(): + return _socket.gethostname() + +def get_program_name(command=None): + if command is None: + args = ARGS + else: + args = command.split() + + for arg in args: + if "=" not in arg: + return get_base_name(arg) + +def which(program_name): + return _shutil.which(program_name) + +def check_env(var, message=None): + if var not in _os.environ: + if message is None: + message = "Environment variable {} is not set".format(repr(var)) + + raise PlanoError(message) + +def check_module(module, message=None): + if _pkgutil.find_loader(module) is None: + if message is None: + message = "Module {} is not found".format(repr(module)) + + raise PlanoError(message) + +def check_program(program, message=None): + if which(program) is None: + if message is None: + message = "Program {} is not found".format(repr(program)) + + raise PlanoError(message) + +class working_env: + def __init__(self, **vars): + self.amend = vars.pop("amend", True) + self.vars = vars + + def __enter__(self): + self.prev_vars = dict(_os.environ) + + if not self.amend: + for name, value in list(_os.environ.items()): + if name not in self.vars: + del _os.environ[name] + + for name, value in self.vars.items(): + _os.environ[name] = str(value) + + def __exit__(self, exc_type, exc_value, traceback): + for name, value in self.prev_vars.items(): + _os.environ[name] = value + + for name, value in self.vars.items(): + if name not in self.prev_vars: + del _os.environ[name] + +class working_module_path: + def __init__(self, path, amend=True): + if is_string(path): + if not is_absolute(path): + path = get_absolute_path(path) + + path = [path] + + if amend: + path = path + _sys.path + + self.path = path + + def __enter__(self): + self.prev_path = _sys.path + _sys.path = self.path + + def __exit__(self, exc_type, exc_value, traceback): + _sys.path = self.prev_path + +def print_env(file=None): + props = ( + ("ARGS", ARGS), + ("ENV['PATH']", ENV.get("PATH")), + ("ENV['PYTHONPATH']", ENV.get("PYTHONPATH")), + ("sys.executable", _sys.executable), + ("sys.path", _sys.path), + ("sys.version", _sys.version.replace("\n", "")), + ("get_current_dir()", get_current_dir()), + ("get_home_dir()", get_home_dir()), + ("get_hostname()", get_hostname()), + ("get_program_name()", get_program_name()), + ("get_user()", get_user()), + ("plano.__file__", __file__), + ("which('plano')", which("plano")), + ) + + print_properties(props, file=file) + +## File operations + +def touch(file, quiet=False): + file = expand(file) + + _info(quiet, "Touching {}", repr(file)) + + try: + _os.utime(file, None) + except OSError: + append(file, "") + + return file + +# symlinks=True - Preserve symlinks +# inside=True - Place from_path inside to_path if to_path is a directory +def copy(from_path, to_path, symlinks=True, inside=True, quiet=False): + from_path = expand(from_path) + to_path = expand(to_path) + + _info(quiet, "Copying {} to {}", repr(from_path), repr(to_path)) + + if is_dir(to_path) and inside: + to_path = join(to_path, get_base_name(from_path)) + else: + make_parent_dir(to_path, quiet=True) + + if is_dir(from_path): + for name in list_dir(from_path): + copy(join(from_path, name), join(to_path, name), symlinks=symlinks, inside=False, quiet=True) + + _shutil.copystat(from_path, to_path) + elif is_link(from_path) and symlinks: + make_link(to_path, read_link(from_path), quiet=True) + else: + _shutil.copy2(from_path, to_path) + + return to_path + +# inside=True - Place from_path inside to_path if to_path is a directory +def move(from_path, to_path, inside=True, quiet=False): + from_path = expand(from_path) + to_path = expand(to_path) + + _info(quiet, "Moving {} to {}", repr(from_path), repr(to_path)) + + to_path = copy(from_path, to_path, inside=inside, quiet=True) + remove(from_path, quiet=True) + + return to_path + +def remove(paths, quiet=False): + if is_string(paths): + paths = (paths,) + + for path in paths: + path = expand(path) + + if not exists(path): + continue + + _debug(quiet, "Removing {}", repr(path)) + + if is_dir(path): + _shutil.rmtree(path, ignore_errors=True) + else: + _os.remove(path) + +def get_file_size(file): + file = expand(file) + return _os.path.getsize(file) + +## IO operations + +def read(file): + file = expand(file) + + with open(file) as f: + return f.read() + +def write(file, string): + file = expand(file) + + make_parent_dir(file, quiet=True) + + with open(file, "w") as f: + f.write(string) + + return file + +def append(file, string): + file = expand(file) + + make_parent_dir(file, quiet=True) + + with open(file, "a") as f: + f.write(string) + + return file + +def prepend(file, string): + file = expand(file) + + orig = read(file) + + return write(file, string + orig) + +def tail(file, count): + file = expand(file) + return "".join(tail_lines(file, count)) + +def read_lines(file): + file = expand(file) + + with open(file) as f: + return f.readlines() + +def write_lines(file, lines): + file = expand(file) + + make_parent_dir(file, quiet=True) + + with open(file, "w") as f: + f.writelines(lines) + + return file + +def append_lines(file, lines): + file = expand(file) + + make_parent_dir(file, quiet=True) + + with open(file, "a") as f: + f.writelines(lines) + + return file + +def prepend_lines(file, lines): + file = expand(file) + + orig_lines = read_lines(file) + + make_parent_dir(file, quiet=True) + + with open(file, "w") as f: + f.writelines(lines) + f.writelines(orig_lines) + + return file + +def tail_lines(file, count): + assert count >= 0, count + + lines = read_lines(file) + + return lines[-count:] + +def replace_in_file(file, expr, replacement, count=0): + file = expand(file) + return write(file, replace(read(file), expr, replacement, count=count)) + +def concatenate(file, input_files): + file = expand(file) + + assert file not in input_files + + make_parent_dir(file, quiet=True) + + with open(file, "wb") as f: + for input_file in input_files: + if not exists(input_file): + continue + + with open(input_file, "rb") as inf: + _shutil.copyfileobj(inf, f) + + return file + +## Iterable operations + +def unique(iterable): + return list(dict.fromkeys(iterable).keys()) + +def skip(iterable, values=(None, "", (), [], {})): + if is_scalar(values): + values = (values,) + + items = list() + + for item in iterable: + if item not in values: + items.append(item) + + return items + +## JSON operations + +def read_json(file): + file = expand(file) + + with open(file) as f: + return _json.load(f) + +def write_json(file, data): + file = expand(file) + + make_parent_dir(file, quiet=True) + + with open(file, "w") as f: + _json.dump(data, f, indent=4, separators=(",", ": "), sort_keys=True) + + return file + +def parse_json(json): + return _json.loads(json) + +def emit_json(data): + return _json.dumps(data, indent=4, separators=(",", ": "), sort_keys=True) + +## HTTP operations + +def _run_curl(method, url, content=None, content_file=None, content_type=None, output_file=None, insecure=False): + check_program("curl") + + options = [ + "-sf", + "-X", method, + "-H", "'Expect:'", + ] + + if content is not None: + assert content_file is None + options.extend(("-d", "@-")) + + if content_file is not None: + assert content is None, content + options.extend(("-d", "@{}".format(content_file))) + + if content_type is not None: + options.extend(("-H", "'Content-Type: {}'".format(content_type))) + + if output_file is not None: + options.extend(("-o", output_file)) + + if insecure: + options.append("--insecure") + + options = " ".join(options) + command = "curl {} {}".format(options, url) + + if output_file is None: + return call(command, input=content) + else: + make_parent_dir(output_file, quiet=True) + run(command, input=content) + +def http_get(url, output_file=None, insecure=False): + return _run_curl("GET", url, output_file=output_file, insecure=insecure) + +def http_get_json(url, insecure=False): + return parse_json(http_get(url, insecure=insecure)) + +def http_put(url, content, content_type=None, insecure=False): + _run_curl("PUT", url, content=content, content_type=content_type, insecure=insecure) + +def http_put_file(url, content_file, content_type=None, insecure=False): + _run_curl("PUT", url, content_file=content_file, content_type=content_type, insecure=insecure) + +def http_put_json(url, data, insecure=False): + http_put(url, emit_json(data), content_type="application/json", insecure=insecure) + +def http_post(url, content, content_type=None, output_file=None, insecure=False): + return _run_curl("POST", url, content=content, content_type=content_type, output_file=output_file, insecure=insecure) + +def http_post_file(url, content_file, content_type=None, output_file=None, insecure=False): + return _run_curl("POST", url, content_file=content_file, content_type=content_type, output_file=output_file, insecure=insecure) + +def http_post_json(url, data, insecure=False): + return parse_json(http_post(url, emit_json(data), content_type="application/json", insecure=insecure)) + +## Link operations + +def make_link(path: str, linked_path: str, quiet=False) -> str: + _info(quiet, "Making symlink {} to {}", repr(path), repr(linked_path)) + + make_parent_dir(path, quiet=True) + remove(path, quiet=True) + + _os.symlink(linked_path, path) + + return path + +def read_link(path): + return _os.readlink(path) + +## Logging operations + +_logging_levels = ( + "debug", + "info", + "notice", + "warn", + "error", + "disabled", +) + +_DEBUG = _logging_levels.index("debug") +_INFO = _logging_levels.index("info") +_NOTICE = _logging_levels.index("notice") +_WARN = _logging_levels.index("warn") +_ERROR = _logging_levels.index("error") +_DISABLED = _logging_levels.index("disabled") + +_logging_output = None +_logging_threshold = _NOTICE + +def enable_logging(level="notice", output=None): + assert level in _logging_levels + + info("Enabling logging (level={}, output={})", repr(level), repr(nvl(output, "stderr"))) + + global _logging_threshold + _logging_threshold = _logging_levels.index(level) + + if is_string(output): + output = open(output, "w") + + global _logging_output + _logging_output = output + +def disable_logging(): + info("Disabling logging") + + global _logging_threshold + _logging_threshold = _DISABLED + +class logging_enabled: + def __init__(self, level="notice", output=None): + self.level = level + self.output = output + + def __enter__(self): + self.prev_level = _logging_levels[_logging_threshold] + self.prev_output = _logging_output + + if self.level == "disabled": + disable_logging() + else: + enable_logging(level=self.level, output=self.output) + + def __exit__(self, exc_type, exc_value, traceback): + if self.prev_level == "disabled": + disable_logging() + else: + enable_logging(level=self.prev_level, output=self.prev_output) + +class logging_disabled(logging_enabled): + def __init__(self): + super().__init__(level="disabled") + +def fail(message, *args): + error(message, *args) + + if isinstance(message, BaseException): + raise message + + raise PlanoError(message.format(*args)) + +def error(message, *args): + log(_ERROR, message, *args) + +def warn(message, *args): + log(_WARN, message, *args) + +def notice(message, *args): + log(_NOTICE, message, *args) + +def info(message, *args): + log(_INFO, message, *args) + +def debug(message, *args): + log(_DEBUG, message, *args) + +def log(level, message, *args): + if is_string(level): + level = _logging_levels.index(level) + + if _logging_threshold <= level: + _print_message(level, message, args) + +def _print_message(level, message, args): + out = nvl(_logging_output, _sys.stderr) + exception = None + + if isinstance(message, BaseException): + exception = message + message = "{}: {}".format(type(message).__name__, str(message)) + else: + message = str(message) + + if args: + message = message.format(*args) + + program = "{}:".format(get_program_name()) + + level_color = ("cyan", "cyan", "blue", "yellow", "red", None)[level] + level_bright = (False, False, False, False, True, False)[level] + level = cformat("{:>6}:".format(_logging_levels[level]), color=level_color, bright=level_bright, file=out) + + print(program, level, capitalize(message), file=out) + + if exception is not None and hasattr(exception, "__traceback__"): + _traceback.print_exception(type(exception), exception, exception.__traceback__, file=out) + + out.flush() + +def _debug(quiet, message, *args): + if quiet: + debug(message, *args) + else: + notice(message, *args) + +def _info(quiet, message, *args): + if quiet: + info(message, *args) + else: + notice(message, *args) + +## Path operations + +def expand(path): + path = _os.path.expanduser(path) + path = _os.path.expandvars(path) + + return path + +def get_absolute_path(path): + path = expand(path) + return _os.path.abspath(path) + +def normalize_path(path): + path = expand(path) + return _os.path.normpath(path) + +def get_real_path(path): + path = expand(path) + return _os.path.realpath(path) + +def get_relative_path(path, start=None): + path = expand(path) + return _os.path.relpath(path, start=start) + +def get_file_url(path): + path = expand(path) + return "file:{}".format(get_absolute_path(path)) + +def exists(path): + path = expand(path) + return _os.path.lexists(path) + +def is_absolute(path): + path = expand(path) + return _os.path.isabs(path) + +def is_dir(path): + path = expand(path) + return _os.path.isdir(path) + +def is_file(path): + path = expand(path) + return _os.path.isfile(path) + +def is_link(path): + path = expand(path) + return _os.path.islink(path) + +def join(*paths): + paths = [expand(x) for x in paths] + + path = _os.path.join(*paths) + path = normalize_path(path) + + return path + +def split(path): + path = expand(path) + path = normalize_path(path) + parent, child = _os.path.split(path) + + return parent, child + +def split_extension(path): + path = expand(path) + path = normalize_path(path) + root, ext = _os.path.splitext(path) + + return root, ext + +def get_parent_dir(path): + path = expand(path) + path = normalize_path(path) + parent, child = split(path) + + return parent + +def get_base_name(path): + path = expand(path) + path = normalize_path(path) + parent, name = split(path) + + return name + +def get_name_stem(file): + file = expand(file) + name = get_base_name(file) + + if name.endswith(".tar.gz"): + name = name[:-3] + + stem, ext = split_extension(name) + + return stem + +def get_name_extension(file): + file = expand(file) + name = get_base_name(file) + stem, ext = split_extension(name) + + return ext + +def _check_path(path, test_func, message): + path = expand(path) + + if not test_func(path): + parent_dir = get_parent_dir(path) + + if is_dir(parent_dir): + found_paths = ", ".join([repr(x) for x in list_dir(parent_dir)]) + message = "{}. The parent directory contains: {}".format(message.format(repr(path)), found_paths) + else: + message = "{}".format(message.format(repr(path))) + + raise PlanoError(message) + +def check_exists(path): + path = expand(path) + _check_path(path, exists, "File or directory {} not found") + +def check_file(path): + path = expand(path) + _check_path(path, is_file, "File {} not found") + +def check_dir(path): + path = expand(path) + _check_path(path, is_dir, "Directory {} not found") + +def await_exists(path, timeout=30, quiet=False): + path = expand(path) + + _info(quiet, "Waiting for path {} to exist", repr(path)) + + timeout_message = "Timed out waiting for path {} to exist".format(path) + period = 0.03125 + + with Timer(timeout=timeout, timeout_message=timeout_message) as timer: + while True: + try: + check_exists(path) + except PlanoError: + sleep(period, quiet=True) + period = min(1, period * 2) + else: + return + +## Port operations + +def get_random_port(min=49152, max=65535): + ports = [_random.randint(min, max) for _ in range(3)] + + for port in ports: + try: + check_port(port) + except PlanoError: + return port + + raise PlanoError("Random ports unavailable") + +def check_port(port, host="localhost"): + sock = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM) + sock.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, 1) + + if sock.connect_ex((host, port)) != 0: + raise PlanoError("Port {} (host {}) is not reachable".format(repr(port), repr(host))) + +def await_port(port, host="localhost", timeout=30, quiet=False): + _info(quiet, "Waiting for port {}", port) + + if is_string(port): + port = int(port) + + timeout_message = "Timed out waiting for port {} to open".format(port) + period = 0.03125 + + with Timer(timeout=timeout, timeout_message=timeout_message) as timer: + while True: + try: + check_port(port, host=host) + except PlanoError: + sleep(period, quiet=True) + period = min(1, period * 2) + else: + return + +## Process operations + +def get_process_id(): + return _os.getpid() + +def _format_command(command, represent=True): + if is_string(command): + args = _shlex.split(command) + else: + args = command + + args = [expand(x) for x in args] + command = " ".join(args) + + if represent: + return repr(command) + else: + return command + +# quiet=False - Don't log at notice level +# stash=False - No output unless there is an error +# output= - Send stdout and stderr to a file +# stdin= - XXX +# stdout= - Send stdout to a file +# stderr= - Send stderr to a file +# shell=False - XXX +def start(command, stdin=None, stdout=None, stderr=None, output=None, shell=False, stash=False, quiet=False): + _info(quiet, "Starting command {}", _format_command(command)) + + if output is not None: + stdout, stderr = output, output + + if is_string(stdin): + stdin = expand(stdin) + stdin = open(stdin, "r") + + if is_string(stdout): + stdout = expand(stdout) + stdout = open(stdout, "w") + + if is_string(stderr): + stderr = expand(stderr) + stderr = open(stderr, "w") + + if stdin is None: + stdin = _sys.stdin + + if stdout is None: + stdout = _sys.stdout + + if stderr is None: + stderr = _sys.stderr + + stash_file = None + + if stash: + stash_file = make_temp_file() + out = open(stash_file, "w") + stdout = out + stderr = out + + if shell: + if is_string(command): + args = command + else: + args = " ".join(command) + else: + if is_string(command): + args = _shlex.split(command) + else: + args = command + + args = [expand(x) for x in args] + + try: + proc = PlanoProcess(args, stdin=stdin, stdout=stdout, stderr=stderr, shell=shell, close_fds=True, stash_file=stash_file) + except OSError as e: + raise PlanoError("Command {}: {}".format(_format_command(command), str(e))) + + debug("{} started", proc) + + return proc + +def stop(proc, timeout=None, quiet=False): + _info(quiet, "Stopping {}", proc) + + if proc.poll() is not None: + if proc.exit_code == 0: + debug("{} already exited normally", proc) + elif proc.exit_code == -(_signal.SIGTERM): + debug("{} was already terminated", proc) + else: + debug("{} already exited with code {}", proc, proc.exit_code) + + return proc + + kill(proc, quiet=True) + + return wait(proc, timeout=timeout, quiet=True) + +def kill(proc, quiet=False): + _info(quiet, "Killing {}", proc) + + proc.terminate() + +def wait(proc, timeout=None, check=False, quiet=False): + _info(quiet, "Waiting for {} to exit", proc) + + try: + proc.wait(timeout=timeout) + except _subprocess.TimeoutExpired: + raise PlanoTimeout() + + if proc.exit_code == 0: + debug("{} exited normally", proc) + elif proc.exit_code < 0: + debug("{} was terminated by signal {}", proc, abs(proc.exit_code)) + else: + debug("{} exited with code {}", proc, proc.exit_code) + + if proc.stash_file is not None: + if proc.exit_code > 0: + eprint(read(proc.stash_file), end="") + + if not WINDOWS: + remove(proc.stash_file, quiet=True) + + if check and proc.exit_code > 0: + raise PlanoProcessError(proc) + + return proc + +# input= - Pipe to the process +def run(command, stdin=None, stdout=None, stderr=None, input=None, output=None, + stash=False, shell=False, check=True, quiet=False): + _info(quiet, "Running command {}", _format_command(command)) + + if input is not None: + assert stdin in (None, _subprocess.PIPE), stdin + + input = input.encode("utf-8") + stdin = _subprocess.PIPE + + proc = start(command, stdin=stdin, stdout=stdout, stderr=stderr, output=output, + stash=stash, shell=shell, quiet=True) + + proc.stdout_result, proc.stderr_result = proc.communicate(input=input) + + if proc.stdout_result is not None: + proc.stdout_result = proc.stdout_result.decode("utf-8") + + if proc.stderr_result is not None: + proc.stderr_result = proc.stderr_result.decode("utf-8") + + return wait(proc, check=check, quiet=True) + +# input= - Pipe the given input into the process +def call(command, input=None, shell=False, quiet=False): + _info(quiet, "Calling {}", _format_command(command)) + + proc = run(command, stdin=_subprocess.PIPE, stdout=_subprocess.PIPE, stderr=_subprocess.PIPE, + input=input, shell=shell, check=True, quiet=True) + + return proc.stdout_result + +def exit(arg=None, *args, **kwargs): + verbose = kwargs.get("verbose", False) + + if arg in (0, None): + if verbose: + notice("Exiting normally") + + _sys.exit() + + if is_string(arg): + if args: + arg = arg.format(*args) + + if verbose: + error(arg) + + _sys.exit(arg) + + if isinstance(arg, BaseException): + if verbose: + error(arg) + + _sys.exit(str(arg)) + + if isinstance(arg, int): + _sys.exit(arg) + + raise PlanoException("Illegal argument") + +_child_processes = list() + +class PlanoProcess(_subprocess.Popen): + def __init__(self, args, **options): + self.stash_file = options.pop("stash_file", None) + + super().__init__(args, **options) + + self.args = args + self.stdout_result = None + self.stderr_result = None + + _child_processes.append(self) + + @property + def exit_code(self): + return self.returncode + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + kill(self) + + def __repr__(self): + return "process {} (command {})".format(self.pid, _format_command(self.args)) + +class PlanoProcessError(_subprocess.CalledProcessError, PlanoError): + def __init__(self, proc): + super().__init__(proc.exit_code, _format_command(proc.args, represent=False)) + +def _default_sigterm_handler(signum, frame): + for proc in _child_processes: + if proc.poll() is None: + kill(proc, quiet=True) + + exit(-(_signal.SIGTERM)) + +_signal.signal(_signal.SIGTERM, _default_sigterm_handler) + +## String operations + +def replace(string, expr, replacement, count=0): + return _re.sub(expr, replacement, string, count) + +def remove_prefix(string, prefix): + if string is None: + return "" + + if prefix and string.startswith(prefix): + string = string[len(prefix):] + + return string + +def remove_suffix(string, suffix): + if string is None: + return "" + + if suffix and string.endswith(suffix): + string = string[:-len(suffix)] + + return string + +def shorten(string, max, ellipsis=None): + assert max is None or isinstance(max, int) + + if string is None: + return "" + + if max is None or len(string) < max: + return string + else: + if ellipsis is not None: + string = string + ellipsis + end = _max(0, max - len(ellipsis)) + return string[0:end] + ellipsis + else: + return string[0:max] + +def plural(noun, count=0, plural=None): + if noun in (None, ""): + return "" + + if count == 1: + return noun + + if plural is None: + if noun.endswith("s"): + plural = "{}ses".format(noun) + else: + plural = "{}s".format(noun) + + return plural + +def capitalize(string): + if not string: + return "" + + return string[0].upper() + string[1:] + +def base64_encode(string): + return _base64.b64encode(string) + +def base64_decode(string): + return _base64.b64decode(string) + +def url_encode(string): + return _urllib.parse.quote_plus(string) + +def url_decode(string): + return _urllib.parse.unquote_plus(string) + +## Temp operations + +def get_system_temp_dir(): + return _tempfile.gettempdir() + +def get_user_temp_dir(): + try: + return _os.environ["XDG_RUNTIME_DIR"] + except KeyError: + return join(get_system_temp_dir(), get_user()) + +def make_temp_file(suffix="", dir=None): + if dir is None: + dir = get_system_temp_dir() + + return _tempfile.mkstemp(prefix="plano-", suffix=suffix, dir=dir)[1] + +def make_temp_dir(suffix="", dir=None): + if dir is None: + dir = get_system_temp_dir() + + return _tempfile.mkdtemp(prefix="plano-", suffix=suffix, dir=dir) + +class temp_file: + def __init__(self, suffix="", dir=None): + if dir is None: + dir = get_system_temp_dir() + + self.fd, self.file = _tempfile.mkstemp(prefix="plano-", suffix=suffix, dir=dir) + + def __enter__(self): + return self.file + + def __exit__(self, exc_type, exc_value, traceback): + _os.close(self.fd) + + if not WINDOWS: # XXX + remove(self.file, quiet=True) + +class temp_dir: + def __init__(self, suffix="", dir=None): + self.dir = make_temp_dir(suffix=suffix, dir=dir) + + def __enter__(self): + return self.dir + + def __exit__(self, exc_type, exc_value, traceback): + remove(self.dir, quiet=True) + +## Time operations + +def sleep(seconds, quiet=False): + _info(quiet, "Sleeping for {} {}", seconds, plural("second", seconds)) + + _time.sleep(seconds) + +def get_time(): + return _time.time() + +def format_duration(duration, align=False): + assert duration >= 0 + + if duration >= 3600: + value = duration / 3600 + unit = "h" + elif duration >= 5 * 60: + value = duration / 60 + unit = "m" + else: + value = duration + unit = "s" + + if align: + return "{:.1f}{}".format(value, unit) + elif value > 10: + return "{:.0f}{}".format(value, unit) + else: + return remove_suffix("{:.1f}".format(value), ".0") + unit + +class Timer: + def __init__(self, timeout=None, timeout_message=None): + self.timeout = timeout + self.timeout_message = timeout_message + + if self.timeout is not None and not hasattr(_signal, "SIGALRM"): # pragma: nocover + self.timeout = None + + self.start_time = None + self.stop_time = None + + def start(self): + self.start_time = get_time() + + if self.timeout is not None: + self.prev_handler = _signal.signal(_signal.SIGALRM, self.raise_timeout) + self.prev_timeout, prev_interval = _signal.setitimer(_signal.ITIMER_REAL, self.timeout) + self.prev_timer_suspend_time = get_time() + + assert prev_interval == 0.0, "This case is not yet handled" + + def stop(self): + self.stop_time = get_time() + + if self.timeout is not None: + assert get_time() - self.prev_timer_suspend_time > 0, "This case is not yet handled" + + _signal.signal(_signal.SIGALRM, self.prev_handler) + _signal.setitimer(_signal.ITIMER_REAL, self.prev_timeout) + + def __enter__(self): + self.start() + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.stop() + + @property + def elapsed_time(self): + assert self.start_time is not None + + if self.stop_time is None: + return get_time() - self.start_time + else: + return self.stop_time - self.start_time + + def raise_timeout(self, *args): + raise PlanoTimeout(self.timeout_message) + +## Unique ID operations + +# Length in bytes, renders twice as long in hex +def get_unique_id(bytes=16): + assert bytes >= 1 + assert bytes <= 16 + + uuid_bytes = _uuid.uuid4().bytes + uuid_bytes = uuid_bytes[:bytes] + + return _binascii.hexlify(uuid_bytes).decode("utf-8") + +## Value operations + +def nvl(value, replacement): + if value is None: + return replacement + + return value + +def is_string(value): + return isinstance(value, str) + +def is_scalar(value): + return value is None or isinstance(value, (str, int, float, complex, bool)) + +def is_empty(value): + return value in (None, "", (), [], {}) + +def pformat(value): + return _pprint.pformat(value, width=120) + +def format_empty(value, replacement): + if is_empty(value): + value = replacement + + return value + +def format_not_empty(value, template=None): + if not is_empty(value) and template is not None: + value = template.format(value) + + return value + +def format_repr(obj, limit=None): + attrs = ["{}={}".format(k, repr(v)) for k, v in obj.__dict__.items()] + return "{}({})".format(obj.__class__.__name__, ", ".join(attrs[:limit])) + +class Namespace: + def __init__(self, **kwargs): + for name in kwargs: + setattr(self, name, kwargs[name]) + + def __eq__(self, other): + return vars(self) == vars(other) + + def __contains__(self, key): + return key in self.__dict__ + + def __repr__(self): + return format_repr(self) + +## YAML operations + +def read_yaml(file): + check_module("yaml", "To install it, run 'pip install pyyaml'") + + import yaml as _yaml + + file = expand(file) + + with open(file) as f: + return _yaml.safe_load(f) + +def write_yaml(file, data): + check_module("yaml", "To install it, run 'pip install pyyaml'") + + import yaml as _yaml + + file = expand(file) + + make_parent_dir(file, quiet=True) + + with open(file, "w") as f: + _yaml.safe_dump(data, f) + + return file + +def parse_yaml(yaml): + check_module("yaml", "To install it, run 'pip install pyyaml'") + + import yaml as _yaml + + return _yaml.safe_load(yaml) + +def emit_yaml(data): + check_module("yaml", "To install it, run 'pip install pyyaml'") + + import yaml as _yaml + + return _yaml.safe_dump(data) + +if PLANO_DEBUG: # pragma: nocover + enable_logging(level="debug") diff --git a/subrepos/skewer/subrepos/plano/src/plano/test.py b/subrepos/skewer/subrepos/plano/src/plano/test.py new file mode 100644 index 0000000..e898c1d --- /dev/null +++ b/subrepos/skewer/subrepos/plano/src/plano/test.py @@ -0,0 +1,397 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from .main import * +from .command import * + +import argparse as _argparse +import asyncio as _asyncio +import fnmatch as _fnmatch +import importlib as _importlib +import inspect as _inspect +import traceback as _traceback + +class PlanoTestCommand(BaseCommand): + def __init__(self, test_modules=[]): + super(PlanoTestCommand, self).__init__() + + self.test_modules = test_modules + + if _inspect.ismodule(self.test_modules): + self.test_modules = [self.test_modules] + + self.parser = BaseArgumentParser() + self.parser.add_argument("include", metavar="PATTERN", nargs="*", default=["*"], + help="Run tests with names matching PATTERN (default '*', all tests)") + self.parser.add_argument("-e", "--exclude", metavar="PATTERN", action="append", default=[], + help="Do not run tests with names matching PATTERN (repeatable)") + self.parser.add_argument("-m", "--module", action="append", default=[], + help="Collect tests from MODULE (repeatable)") + self.parser.add_argument("-l", "--list", action="store_true", + help="Print the test names and exit") + self.parser.add_argument("--enable", metavar="PATTERN", action="append", default=[], + help=_argparse.SUPPRESS) + self.parser.add_argument("--unskip", metavar="PATTERN", action="append", default=[], + help="Run skipped tests matching PATTERN (repeatable)") + self.parser.add_argument("--timeout", metavar="SECONDS", type=int, default=300, + help="Fail any test running longer than SECONDS (default 300)") + self.parser.add_argument("--fail-fast", action="store_true", + help="Exit on the first failure encountered in a test run") + self.parser.add_argument("--iterations", metavar="COUNT", type=int, default=1, + help="Run the tests COUNT times (default 1)") + + def parse_args(self, args): + return self.parser.parse_args(args) + + def init(self, args): + self.list_only = args.list + self.include_patterns = args.include + self.exclude_patterns = args.exclude + self.enable_patterns = args.enable + self.unskip_patterns = args.unskip + self.timeout = args.timeout + self.fail_fast = args.fail_fast + self.iterations = args.iterations + + try: + for name in args.module: + self.test_modules.append(_importlib.import_module(name)) + except ImportError as e: + raise PlanoError(e) + + def run(self): + if self.list_only: + print_tests(self.test_modules) + return + + for i in range(self.iterations): + run_tests(self.test_modules, include=self.include_patterns, + exclude=self.exclude_patterns, + enable=self.enable_patterns, unskip=self.unskip_patterns, + test_timeout=self.timeout, fail_fast=self.fail_fast, + verbose=self.verbose, quiet=self.quiet) + +class PlanoTestSkipped(Exception): + pass + +def test(_function=None, name=None, timeout=None, disabled=False): + class Test: + def __init__(self, function): + self.function = function + self.name = nvl(name, self.function.__name__.rstrip("_").replace("_", "-")) + self.timeout = timeout + self.disabled = disabled + + self.module = _inspect.getmodule(self.function) + + if not hasattr(self.module, "_plano_tests"): + self.module._plano_tests = list() + + self.module._plano_tests.append(self) + + def __call__(self, test_run, unskipped): + try: + ret = self.function() + + if _inspect.iscoroutine(ret): + _asyncio.run(ret) + except SystemExit as e: + error(e) + raise PlanoError("System exit with code {}".format(e)) + + def __repr__(self): + return "test '{}:{}'".format(self.module.__name__, self.name) + + if _function is None: + return Test + else: + return Test(_function) + +def skip_test(reason=None): + if _inspect.stack()[2].frame.f_locals["unskipped"]: + return + + raise PlanoTestSkipped(reason) + +class expect_exception: + def __init__(self, exception_type=Exception, contains=None): + self.exception_type = exception_type + self.contains = contains + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_value, traceback): + if exc_value is None: + assert False, "Never encountered expected exception {}".format(self.exception_type.__name__) + + if self.contains is None: + return isinstance(exc_value, self.exception_type) + else: + return isinstance(exc_value, self.exception_type) and self.contains in str(exc_value) + +class expect_error(expect_exception): + def __init__(self, contains=None): + super().__init__(PlanoError, contains=contains) + +class expect_timeout(expect_exception): + def __init__(self, contains=None): + super().__init__(PlanoTimeout, contains=contains) + +class expect_system_exit(expect_exception): + def __init__(self, contains=None): + super().__init__(SystemExit, contains=contains) + +class expect_output(temp_file): + def __init__(self, equals=None, contains=None, startswith=None, endswith=None): + super().__init__() + self.equals = equals + self.contains = contains + self.startswith = startswith + self.endswith = endswith + + def __exit__(self, exc_type, exc_value, traceback): + result = read(self.file) + + if self.equals is None: + assert len(result) > 0, result + else: + assert result == self.equals, result + + if self.contains is not None: + assert self.contains in result, result + + if self.startswith is not None: + assert result.startswith(self.startswith), result + + if self.endswith is not None: + assert result.endswith(self.endswith), result + + super().__exit__(exc_type, exc_value, traceback) + +def print_tests(modules): + if _inspect.ismodule(modules): + modules = (modules,) + + for module in modules: + for test in module._plano_tests: + flags = "(disabled)" if test.disabled else "" + print(" ".join((str(test), flags)).strip()) + +def run_tests(modules, include="*", exclude=(), enable=(), unskip=(), test_timeout=300, + fail_fast=False, verbose=False, quiet=False): + if _inspect.ismodule(modules): + modules = (modules,) + + if is_string(include): + include = (include,) + + if is_string(exclude): + exclude = (exclude,) + + if is_string(enable): + enable = (enable,) + + if is_string(unskip): + enable = (unskip,) + + test_run = TestRun(test_timeout=test_timeout, fail_fast=fail_fast, verbose=verbose, quiet=quiet) + + if verbose: + notice("Starting {}", test_run) + elif not quiet: + cprint("=== Configuration ===", color="cyan") + + props = ( + ("Modules", format_empty(", ".join([x.__name__ for x in modules]), "[none]")), + ("Test timeout", format_duration(test_timeout)), + ("Fail fast", fail_fast), + ) + + print_properties(props) + print() + + for module in modules: + if verbose: + notice("Running tests from module {} (file {})", repr(module.__name__), repr(module.__file__)) + elif not quiet: + cprint("=== Module {} ===".format(repr(module.__name__)), color="cyan") + + if not hasattr(module, "_plano_tests"): + warn("Module {} has no tests", repr(module.__name__)) + continue + + for test in module._plano_tests: + if test.disabled and not any([_fnmatch.fnmatchcase(test.name, x) for x in enable]): + continue + + included = any([_fnmatch.fnmatchcase(test.name, x) for x in include]) + excluded = any([_fnmatch.fnmatchcase(test.name, x) for x in exclude]) + unskipped = any([_fnmatch.fnmatchcase(test.name, x) for x in unskip]) + + if included and not excluded: + test_run.tests.append(test) + _run_test(test_run, test, unskipped) + + if not verbose and not quiet: + print() + + total = len(test_run.tests) + skipped = len(test_run.skipped_tests) + failed = len(test_run.failed_tests) + + if total == 0: + raise PlanoError("No tests ran") + + notes = "" + + if skipped != 0: + notes = "({} skipped)".format(skipped) + + if failed == 0: + result_message = "All tests passed {}".format(notes).strip() + else: + result_message = "{} {} failed {}".format(failed, plural("test", failed), notes).strip() + + if verbose: + if failed == 0: + notice(result_message) + else: + error(result_message) + elif not quiet: + cprint("=== Summary ===", color="cyan") + + props = ( + ("Total", total), + ("Skipped", skipped, format_not_empty(", ".join([x.name for x in test_run.skipped_tests]), "({})")), + ("Failed", failed, format_not_empty(", ".join([x.name for x in test_run.failed_tests]), "({})")), + ) + + print_properties(props) + print() + + cprint("=== RESULT ===", color="cyan") + + if failed == 0: + cprint(result_message, color="green") + else: + cprint(result_message, color="red", bright="True") + + print() + + if failed != 0: + raise PlanoError(result_message) + +def _run_test(test_run, test, unskipped): + if test_run.verbose: + notice("Running {}", test) + elif not test_run.quiet: + print("{:.<65} ".format(test.name + " "), end="") + + timeout = nvl(test.timeout, test_run.test_timeout) + + with temp_file() as output_file: + try: + with Timer(timeout=timeout) as timer: + if test_run.verbose: + test(test_run, unskipped) + else: + with output_redirected(output_file, quiet=True): + test(test_run, unskipped) + except KeyboardInterrupt: + raise + except PlanoTestSkipped as e: + test_run.skipped_tests.append(test) + + if test_run.verbose: + notice("{} SKIPPED ({})", test, format_duration(timer.elapsed_time)) + elif not test_run.quiet: + _print_test_result("SKIPPED", timer, "yellow") + print("Reason: {}".format(str(e))) + except Exception as e: + test_run.failed_tests.append(test) + + if test_run.verbose: + _traceback.print_exc() + + if isinstance(e, PlanoTimeout): + error("{} **FAILED** (TIMEOUT) ({})", test, format_duration(timer.elapsed_time)) + else: + error("{} **FAILED** ({})", test, format_duration(timer.elapsed_time)) + elif not test_run.quiet: + if isinstance(e, PlanoTimeout): + _print_test_result("**FAILED** (TIMEOUT)", timer, color="red", bright=True) + else: + _print_test_result("**FAILED**", timer, color="red", bright=True) + + _print_test_error(e) + _print_test_output(output_file) + + if test_run.fail_fast: + return True + else: + test_run.passed_tests.append(test) + + if test_run.verbose: + notice("{} PASSED ({})", test, format_duration(timer.elapsed_time)) + elif not test_run.quiet: + _print_test_result("PASSED", timer) + +def _print_test_result(status, timer, color="white", bright=False): + cprint("{:<7}".format(status), color=color, bright=bright, end="") + print("{:>6}".format(format_duration(timer.elapsed_time, align=True))) + +def _print_test_error(e): + cprint("--- Error ---", color="yellow") + + if isinstance(e, PlanoProcessError): + print("> {}".format(str(e))) + else: + lines = _traceback.format_exc().rstrip().split("\n") + lines = ["> {}".format(x) for x in lines] + + print("\n".join(lines)) + +def _print_test_output(output_file): + if get_file_size(output_file) == 0: + return + + cprint("--- Output ---", color="yellow") + + with open(output_file, "r") as out: + for line in out: + print("> {}".format(line), end="") + +class TestRun: + def __init__(self, test_timeout=None, fail_fast=False, verbose=False, quiet=False): + self.test_timeout = test_timeout + self.fail_fast = fail_fast + self.verbose = verbose + self.quiet = quiet + + self.tests = list() + self.skipped_tests = list() + self.failed_tests = list() + self.passed_tests = list() + + def __repr__(self): + return format_repr(self) + +def _main(): # pragma: nocover + PlanoTestCommand().main() diff --git a/subrepos/skewer/test-example/.gitignore b/subrepos/skewer/test-example/.gitignore new file mode 100644 index 0000000..7bd2dc8 --- /dev/null +++ b/subrepos/skewer/test-example/.gitignore @@ -0,0 +1 @@ +/README.html diff --git a/subrepos/skewer/test-example/.plano.py b/subrepos/skewer/test-example/.plano.py new file mode 120000 index 0000000..bf2f77c --- /dev/null +++ b/subrepos/skewer/test-example/.plano.py @@ -0,0 +1 @@ +subrepos/skewer/config/.plano.py \ No newline at end of file diff --git a/subrepos/skewer/test-example/README.md b/subrepos/skewer/test-example/README.md new file mode 100644 index 0000000..1c9caf6 --- /dev/null +++ b/subrepos/skewer/test-example/README.md @@ -0,0 +1,461 @@ +# Skupper Hello World + +[![main](https://github.com/skupperproject/skewer/actions/workflows/main.yaml/badge.svg)](https://github.com/skupperproject/skewer/actions/workflows/main.yaml) + +#### A minimal HTTP application deployed across Kubernetes clusters using Skupper + +This example is part of a [suite of examples][examples] showing the +different ways you can use [Skupper][website] to connect services +across cloud providers, data centers, and edge sites. + +[website]: https://skupper.io/ +[examples]: https://skupper.io/examples/index.html + +#### Contents + +* [Overview](#overview) +* [Prerequisites](#prerequisites) +* [Step 1: Install the Skupper command-line tool](#step-1-install-the-skupper-command-line-tool) +* [Step 2: Configure separate console sessions](#step-2-configure-separate-console-sessions) +* [Step 3: Access your clusters](#step-3-access-your-clusters) +* [Step 4: Set up your namespaces](#step-4-set-up-your-namespaces) +* [Step 5: Install Skupper in your namespaces](#step-5-install-skupper-in-your-namespaces) +* [Step 6: Check the status of your namespaces](#step-6-check-the-status-of-your-namespaces) +* [Step 7: Link your namespaces](#step-7-link-your-namespaces) +* [Step 8: Fail on demand](#step-8-fail-on-demand) +* [Step 9: Deploy the frontend and backend services](#step-9-deploy-the-frontend-and-backend-services) +* [Step 10: Expose the backend service](#step-10-expose-the-backend-service) +* [Step 11: Expose the frontend service](#step-11-expose-the-frontend-service) +* [Step 12: Test the application](#step-12-test-the-application) +* [Accessing the web console](#accessing-the-web-console) +* [Cleaning up](#cleaning-up) +* [Summary](#summary) +* [Next steps](#next-steps) +* [About this example](#about-this-example) + +## Overview + +This example is a very simple multi-service HTTP application that can +be deployed across multiple Kubernetes clusters using Skupper. + +It contains two services: + +* A backend service that exposes an `/api/hello` endpoint. It + returns greetings of the form `Hi, . I am + ()`. + +* A frontend service that sends greetings to the backend and + fetches new greetings in response. + +With Skupper, you can place the backend in one cluster and the +frontend in another and maintain connectivity between the two +services without exposing the backend to the public internet. + + + +## Prerequisites + +Custom prerequisites + +## Step 1: Install the Skupper command-line tool + +The `skupper` command-line tool is the entrypoint for installing +and configuring Skupper. You need to install the `skupper` +command only once for each development environment. + +On Linux or Mac, you can use the install script (inspect it +[here][install-script]) to download and extract the command: + +~~~ shell +curl https://skupper.io/install.sh | sh +~~~ + +The script installs the command under your home directory. It +prompts you to add the command to your path if necessary. + +For Windows and other installation options, see [Installing +Skupper][install-docs]. + +[install-script]: https://github.com/skupperproject/skupper-website/blob/main/docs/install.sh +[install-docs]: https://skupper.io/install/index.html + +## Step 2: Configure separate console sessions + +Skupper is designed for use with multiple namespaces, usually on +different clusters. The `skupper` command uses your +[kubeconfig][kubeconfig] and current context to select the +namespace where it operates. + +[kubeconfig]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ + +Your kubeconfig is stored in a file in your home directory. The +`skupper` and `kubectl` commands use the `KUBECONFIG` environment +variable to locate it. + +A single kubeconfig supports only one active context per user. +Since you will be using multiple contexts at once in this +exercise, you need to create distinct kubeconfigs. + +Start a console session for each of your namespaces. Set the +`KUBECONFIG` environment variable to a different path in each +session. + +_**Console for west:**_ + +~~~ shell +export KUBECONFIG=~/.kube/config-west +~~~ + +_**Console for east:**_ + +~~~ shell +export KUBECONFIG=~/.kube/config-east +~~~ + +## Step 3: Access your clusters + +The procedure for accessing a Kubernetes cluster varies by +provider. [Find the instructions for your chosen +provider][kube-providers] and use them to authenticate and +configure access for each console session. + +[kube-providers]: https://skupper.io/start/kubernetes.html + +## Step 4: Set up your namespaces + +Use `kubectl create namespace` to create the namespaces you wish +to use (or use existing namespaces). Use `kubectl config +set-context` to set the current namespace for each session. + +_**Console for west:**_ + +~~~ shell +kubectl create namespace west +kubectl config set-context --current --namespace west +~~~ + +_**Console for east:**_ + +~~~ shell +kubectl create namespace east +kubectl config set-context --current --namespace east +~~~ + +## Step 5: Install Skupper in your namespaces + +The `skupper init` command installs the Skupper router and service +controller in the current namespace. Run the `skupper init` command +in each namespace. + +**Note:** If you are using Minikube, [you need to start `minikube +tunnel`][minikube-tunnel] before you install Skupper. + +[minikube-tunnel]: https://skupper.io/start/minikube.html#running-minikube-tunnel + +_**Console for west:**_ + +~~~ shell +skupper init --enable-console --enable-flow-collector +~~~ + +_**Console for east:**_ + +~~~ shell +skupper init +~~~ + +_Sample output:_ + +~~~ console +$ skupper init +Waiting for LoadBalancer IP or hostname... +Skupper is now installed in namespace ''. Use 'skupper status' to get more information. +~~~ + +## Step 6: Check the status of your namespaces + +Use `skupper status` in each console to check that Skupper is +installed. + +_**Console for west:**_ + +~~~ shell +skupper status +~~~ + +_**Console for east:**_ + +~~~ shell +skupper status +~~~ + +_Sample output:_ + +~~~ console +Skupper is enabled for namespace "" in interior mode. It is connected to 1 other site. It has 1 exposed service. +The site console url is: +The credentials for internal console-auth mode are held in secret: 'skupper-console-users' +~~~ + +As you move through the steps below, you can use `skupper status` at +any time to check your progress. + +## Step 7: Link your namespaces + +Creating a link requires use of two `skupper` commands in +conjunction, `skupper token create` and `skupper link create`. + +The `skupper token create` command generates a secret token that +signifies permission to create a link. The token also carries the +link details. Then, in a remote namespace, The `skupper link +create` command uses the token to create a link to the namespace +that generated it. + +**Note:** The link token is truly a *secret*. Anyone who has the +token can link to your namespace. Make sure that only those you +trust have access to it. + +First, use `skupper token create` in one namespace to generate the +token. Then, use `skupper link create` in the other to create a +link. + +_**Console for west:**_ + +~~~ shell +skupper token create ~/secret.token +~~~ + +_Sample output:_ + +~~~ console +$ skupper token create ~/secret.token +Token written to ~/secret.token +~~~ + +_**Console for east:**_ + +~~~ shell +skupper link create ~/secret.token +~~~ + +_Sample output:_ + +~~~ console +$ skupper link create ~/secret.token +Site configured to link to https://10.105.193.154:8081/ed9c37f6-d78a-11ec-a8c7-04421a4c5042 (name=link1) +Check the status of the link using 'skupper link status'. +~~~ + +If your console sessions are on different machines, you may need +to use `sftp` or a similar tool to transfer the token securely. +By default, tokens expire after a single use or 15 minutes after +creation. + +## Step 8: Fail on demand + +_**Console for west:**_ + +~~~ shell +if [ -n "${SKEWER_FAIL}" ]; then expr 1 / 0; fi + +~~~ + +## Step 9: Deploy the frontend and backend services + +Use `kubectl create deployment` to deploy the frontend service +in `west` and the backend service in `east`. + +_**Console for west:**_ + +~~~ shell +kubectl create deployment frontend --image quay.io/skupper/hello-world-frontend +~~~ + +_Sample output:_ + +~~~ console +$ kubectl create deployment frontend --image quay.io/skupper/hello-world-frontend +deployment.apps/frontend created +~~~ + +_**Console for east:**_ + +~~~ shell +kubectl create deployment backend --image quay.io/skupper/hello-world-backend --replicas 3 +~~~ + +_Sample output:_ + +~~~ console +$ kubectl create deployment backend --image quay.io/skupper/hello-world-backend --replicas 3 +deployment.apps/backend created +~~~ + +## Step 10: Expose the backend service + +We now have two namespaces linked to form a Skupper network, but +no services are exposed on it. Skupper uses the `skupper +expose` command to select a service from one namespace for +exposure on all the linked namespaces. + +Use `skupper expose` to expose the backend service to the +frontend service. + +_**Console for east:**_ + +~~~ shell +skupper expose deployment/backend --port 8080 +~~~ + +_Sample output:_ + +~~~ console +$ skupper expose deployment/backend --port 8080 +deployment backend exposed as backend +~~~ + +## Step 11: Expose the frontend service + +We have established connectivity between the two namespaces and +made the backend in `east` available to the frontend in `west`. +Before we can test the application, we need external access to +the frontend. + +Use `kubectl expose` with `--type LoadBalancer` to open network +access to the frontend service. + +_**Console for west:**_ + +~~~ shell +kubectl expose deployment/frontend --port 8080 --type LoadBalancer +~~~ + +_Sample output:_ + +~~~ console +$ kubectl expose deployment/frontend --port 8080 --type LoadBalancer +service/frontend exposed +~~~ + +## Step 12: Test the application + +Now we're ready to try it out. Use `kubectl get service/frontend` +to look up the external IP of the frontend service. Then use +`curl` or a similar tool to request the `/api/health` endpoint at +that address. + +**Note:** The `` field in the following commands is a +placeholder. The actual value is an IP address. + +_**Console for west:**_ + +~~~ shell +kubectl get service/frontend +curl http://:8080/api/health +~~~ + +_Sample output:_ + +~~~ console +$ kubectl get service/frontend +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +frontend LoadBalancer 10.103.232.28 8080:30407/TCP 15s + +$ curl http://:8080/api/health +OK +~~~ + +If everything is in order, you can now access the web interface by +navigating to `http://:8080/` in your browser. + +## Accessing the web console + +Skupper includes a web console you can use to view the application +network. To access it, use `skupper status` to look up the URL of +the web console. Then use `kubectl get +secret/skupper-console-users` to look up the console admin +password. + +**Note:** The `` and `` fields in the +following output are placeholders. The actual values are specific +to your environment. + +_**Console for west:**_ + +~~~ shell +skupper status +kubectl get secret/skupper-console-users -o jsonpath={.data.admin} | base64 -d +~~~ + +_Sample output:_ + +~~~ console +$ skupper status +Skupper is enabled for namespace "west" in interior mode. It is connected to 1 other site. It has 1 exposed service. +The site console url is: +The credentials for internal console-auth mode are held in secret: 'skupper-console-users' + +$ kubectl get secret/skupper-console-users -o jsonpath={.data.admin} | base64 -d + +~~~ + +Navigate to `` in your browser. When prompted, log +in as user `admin` and enter the password. + +## Cleaning up + +To remove Skupper and the other resources from this exercise, use +the following commands. + +_**Console for west:**_ + +~~~ shell +skupper delete +kubectl delete service/frontend +kubectl delete deployment/frontend +~~~ + +_**Console for east:**_ + +~~~ shell +skupper delete +kubectl delete deployment/backend +~~~ + +## Summary + +This example locates the frontend and backend services in different +namespaces, on different clusters. Ordinarily, this means that they +have no way to communicate unless they are exposed to the public +internet. + +Introducing Skupper into each namespace allows us to create a virtual +application network that can connect services in different clusters. +Any service exposed on the application network is represented as a +local service in all of the linked namespaces. + +The backend service is located in `east`, but the frontend service +in `west` can "see" it as if it were local. When the frontend +sends a request to the backend, Skupper forwards the request to the +namespace where the backend is running and routes the response back to +the frontend. + + + +## Next steps + +Custom next steps + +## About this example + +This example was produced using [Skewer][skewer], a library for +documenting and testing Skupper examples. + +[skewer]: https://github.com/skupperproject/skewer + +Skewer provides utility functions for generating the README and +running the example steps. Use the `./plano` command in the project +root to see what is available. + +To quickly stand up the example using Minikube, try the `./plano demo` +command. diff --git a/subrepos/skewer/test-example/images/entities.svg b/subrepos/skewer/test-example/images/entities.svg new file mode 100644 index 0000000..6a1ab87 --- /dev/null +++ b/subrepos/skewer/test-example/images/entities.svg @@ -0,0 +1,3 @@ + + +
Frontend service
Frontend service
Skupper
Skupper
Kubernetes cluster 1
Kubernetes cluster 1
Namespace "west"
Namespace "west"
Namespace "east"
Namespace "east"
Kubernetes cluster 2
Kubernetes cluster 2
Backend service
Backend service
Skupper
Skupper
Public
network
Public<br/>network
diff --git a/subrepos/skewer/test-example/images/sequence.svg b/subrepos/skewer/test-example/images/sequence.svg new file mode 100644 index 0000000..20d27c1 --- /dev/null +++ b/subrepos/skewer/test-example/images/sequence.svg @@ -0,0 +1 @@ +westeastCurlFrontendSkupperSkupperBackendBackendGET /         GET /api/hello      GET /api/hello      GET /api/hello"Hello 1"      "Hello 1"      "Hello 1""Hello 1"          diff --git a/subrepos/skewer/test-example/images/sequence.txt b/subrepos/skewer/test-example/images/sequence.txt new file mode 100644 index 0000000..6d081ea --- /dev/null +++ b/subrepos/skewer/test-example/images/sequence.txt @@ -0,0 +1,22 @@ +participant Curl + +participantgroup #cce5ff eu-north +participant Frontend +participant "Skupper" as Skupper1 #lightgreen +end + +participantgroup #ffe6cc us-east +participant "Skupper" as Skupper2 #lightgreen +participant Backend #yellow +end + +abox over Skupper1 #yellow: Backend + +Curl->Frontend: GET / +Frontend->Skupper1: GET /api/hello +Skupper1->Skupper2: GET /api/hello +Skupper2->Backend: GET /api/hello +Skupper2<-Backend: "Hello 1" +Skupper1<-Skupper2: "Hello 1" +Frontend<-Skupper1: "Hello 1" +Curl<-Frontend: "Hello 1" diff --git a/subrepos/skewer/test-example/plano b/subrepos/skewer/test-example/plano new file mode 120000 index 0000000..0f4ec84 --- /dev/null +++ b/subrepos/skewer/test-example/plano @@ -0,0 +1 @@ +subrepos/skewer/plano \ No newline at end of file diff --git a/subrepos/skewer/test-example/python/skewer b/subrepos/skewer/test-example/python/skewer new file mode 120000 index 0000000..0785527 --- /dev/null +++ b/subrepos/skewer/test-example/python/skewer @@ -0,0 +1 @@ +../subrepos/skewer/python/skewer \ No newline at end of file diff --git a/subrepos/skewer/test-example/skewer.yaml b/subrepos/skewer/test-example/skewer.yaml new file mode 100644 index 0000000..93a180d --- /dev/null +++ b/subrepos/skewer/test-example/skewer.yaml @@ -0,0 +1,113 @@ +title: Skupper Hello World +subtitle: A minimal HTTP application deployed across Kubernetes clusters using Skupper +github_actions_url: https://github.com/skupperproject/skewer/actions/workflows/main.yaml +overview: | + This example is a very simple multi-service HTTP application that can + be deployed across multiple Kubernetes clusters using Skupper. + + It contains two services: + + * A backend service that exposes an `/api/hello` endpoint. It + returns greetings of the form `Hi, . I am + ()`. + + * A frontend service that sends greetings to the backend and + fetches new greetings in response. + + With Skupper, you can place the backend in one cluster and the + frontend in another and maintain connectivity between the two + services without exposing the backend to the public internet. + + +prerequisites: | + Custom prerequisites +sites: + west: + kubeconfig: ~/.kube/config-west + namespace: west + east: + kubeconfig: ~/.kube/config-east + namespace: east +steps: + - standard: install_the_skupper_command_line_tool + - standard: configure_separate_console_sessions + - standard: access_your_clusters + - standard: set_up_your_namespaces + - standard: install_skupper_in_your_namespaces + - standard: check_the_status_of_your_namespaces + - standard: link_your_namespaces + - title: Fail on demand + commands: + west: + - run: | + if [ -n "${SKEWER_FAIL}" ]; then expr 1 / 0; fi + - title: Deploy the frontend and backend services + preamble: | + Use `kubectl create deployment` to deploy the frontend service + in `west` and the backend service in `east`. + commands: + west: + - run: kubectl create deployment frontend --image quay.io/skupper/hello-world-frontend + output: deployment.apps/frontend created + east: + - run: kubectl create deployment backend --image quay.io/skupper/hello-world-backend --replicas 3 + output: deployment.apps/backend created + - title: Expose the backend service + preamble: | + We now have two namespaces linked to form a Skupper network, but + no services are exposed on it. Skupper uses the `skupper + expose` command to select a service from one namespace for + exposure on all the linked namespaces. + + Use `skupper expose` to expose the backend service to the + frontend service. + commands: + east: + - await: deployment/backend + - run: skupper expose deployment/backend --port 8080 + output: deployment backend exposed as backend + - title: Expose the frontend service + preamble: | + We have established connectivity between the two namespaces and + made the backend in `east` available to the frontend in `west`. + Before we can test the application, we need external access to + the frontend. + + Use `kubectl expose` with `--type LoadBalancer` to open network + access to the frontend service. + commands: + west: + - await: deployment/frontend + - run: kubectl expose deployment/frontend --port 8080 --type LoadBalancer + output: service/frontend exposed + - standard: test_the_application + - standard: accessing_the_web_console + - standard: cleaning_up + commands: + west: + - run: skupper delete + - run: kubectl delete service/frontend + - run: kubectl delete deployment/frontend + east: + - run: skupper delete + - run: kubectl delete deployment/backend +summary: | + This example locates the frontend and backend services in different + namespaces, on different clusters. Ordinarily, this means that they + have no way to communicate unless they are exposed to the public + internet. + + Introducing Skupper into each namespace allows us to create a virtual + application network that can connect services in different clusters. + Any service exposed on the application network is represented as a + local service in all of the linked namespaces. + + The backend service is located in `east`, but the frontend service + in `west` can "see" it as if it were local. When the frontend + sends a request to the backend, Skupper forwards the request to the + namespace where the backend is running and routes the response back to + the frontend. + + +next_steps: | + Custom next steps diff --git a/subrepos/skewer/test-example/subrepos/skewer b/subrepos/skewer/test-example/subrepos/skewer new file mode 120000 index 0000000..6581736 --- /dev/null +++ b/subrepos/skewer/test-example/subrepos/skewer @@ -0,0 +1 @@ +../../ \ No newline at end of file From cf54271920ee47a6f44234e23fba635051804656 Mon Sep 17 00:00:00 2001 From: Paul Wright Date: Thu, 25 May 2023 13:27:31 +0100 Subject: [PATCH 03/10] init --- .github/workflows/main.yaml | 41 +++++++++ .plano.py | 1 + __pycache__/.plano.cpython-311.pyc | Bin 0 -> 4470 bytes plano | 1 + python/skewer | 1 + skewer.yaml | 139 +++++++++++++++++++++++++++++ 6 files changed, 183 insertions(+) create mode 100644 .github/workflows/main.yaml create mode 120000 .plano.py create mode 100644 __pycache__/.plano.cpython-311.pyc create mode 120000 plano create mode 120000 python/skewer create mode 100644 skewer.yaml diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml new file mode 100644 index 0000000..3266cb2 --- /dev/null +++ b/.github/workflows/main.yaml @@ -0,0 +1,41 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +name: main +on: + push: + pull_request: + schedule: + - cron: "0 0 * * 0" +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: "3.x" + - uses: manusa/actions-setup-minikube@v2.7.2 + with: + minikube version: "v1.28.0" + kubernetes version: "v1.25.4" + github token: ${{secrets.GITHUB_TOKEN}} + - run: curl https://skupper.io/install.sh | sh + - run: echo "$HOME/.local/bin" >> $GITHUB_PATH + - run: ./plano test --debug diff --git a/.plano.py b/.plano.py new file mode 120000 index 0000000..bf2f77c --- /dev/null +++ b/.plano.py @@ -0,0 +1 @@ +subrepos/skewer/config/.plano.py \ No newline at end of file diff --git a/__pycache__/.plano.cpython-311.pyc b/__pycache__/.plano.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f7cc3a038481fa254eb43d58d10a3120a14f2ec GIT binary patch literal 4470 zcmai1O>7&-72YL>;-5s>vK_}p>@FxfwStn=zhql-{3At*f0RUW;7CQwtT-cbrRDBs zc4db@x2U00-^_AXB&E7b z?qK%4H*e;>dGC8Of9U9l2=II}r{h1j3&KCxhx_>JofrRv&KCj}u&4_61tMbKmaq%7 zr>>;1;KTkMSKgztPKz9Z(df|V08j&msBJ)v;oz3oq9X|OHXQnmh{IdH7G8*ti5wae z=Jek7$c;WmwOB5OkQWFW!Xa3&9dyh5#PIzFi-9243e8 z1?&f1sB}3Z!81iPLS>6cvy(R_?oB51SX!cbUb1pT!m=exiYA#Wq)2FTP0p))%zm_i zvLE(F&cM<-vvq}ON@ScTUkWeSy!#WWoSr8sLoTRAs$_FkiV{ONQ|3y+FbGYMRXJ~{ zB$2Z$BcbXtPAtl*tYs7}n@Sqc)04)U9dfnPC@6gmUh^z8o5H^%Lg3`~y`8HSl&Yds z+5OWnu;m(99~sM8dG#6?FqVUru?}=qMO%?5QODz^wWbm?M~D@da+EBM$JtWME2&fl zYs+Rbqv{2`q{@^eGkQKHFUzYbRarDsS;fi~787}yu3){WB{HU&8cOyhds{ilyrLz6 z8|PEA=_T;4MY2?})?fiSd8n^Hapyz*=D^}Y-$%1I=a>6F$V?C3oWAwJa)I7h&~Cjq z@BmY@xOR8KGDdFRE8Z%O;P)4_?A+Z?-o3jtJUTa(eW*Q{zCST!yf4qwkCfq}GJAV? zC_6shIBSOLrm4d=DB5@&T&u0+^@14(M=TLa2{m+8$-t5{J>in#sk+CwpPW83AnDn? zsN*&1Ck?hq(xSeaFqIPQ;T7mp4EqeEt4%WCltfX%R_=<_-`i`na^+Q*dwKK-E(H^= zNWGF=u=HlVhKw7I1_(57^L*5Y2cc3Ko;AKUmxa`KKP9YzWx5E-2!LXa!suL0E5<@>#(H zF7~hcF#0O63wob=x;Ok*=s;$tLE8Zpx2^k|v1N^|wA?tX9g__M z@=UfAT}v&Sx(4vsUfiL$C~w>gv1bMP?^ulWmT9B}UV^!C>6&D+mAPi)_*bV}7u zX><(ZGmF}V_xt1)?VYA<^B@6q_* zqVb)p&w`ccXf-;zIq@8YA0-~2tf2E%biRzv!*@B-`>VuGN2M!S?MlK2{41!hiu%g# zpI3wotjl}6{#+v8)t z%j*w`jg}M*Q!Xn{gu6{m<(krn`0an zQBEM8-rjgJSw{U8)L%vY&~g5bvzQidZ<#hWHv^+1k49N>UHqLmTL-nDi{8=5f#ahs zEJC>lGoh3jRDY;of+c1ugA!`09*bZUN=$tAvHvj6f1 z91K0G_wwb-qsRv|6D~Z$FS~mE&(L7-kK$mkDP378v-fF|B$HlK(%|If(EN-u#5)x&}*;20}l#7hrl3}`_F(*tN$?E z7cqjUZrg=?dCGEzKO4=rDMtddG@N9_M&GZq(@^VuD+tjaD)sR&{?LGGO1R&q6l=P$ zDBZ}g1Ar;9=v`9vqQrc|W$lnzSmf6vyUn?A*=-Kal!EmWg&lT>nWb=Eh6@I}rLk;4 z%etc3D5D!|@D*m4I_`BlXz9Guggl)EzOJ;Zja~!uKY`c02@UwaT{wPfdtvAGC!^2i z{}TP{4=cy!tHr{y8iG8otxh zPDena4F`#Q9-_!O!30c;Dh53P9DaD9ST@u7oW#Odr|3(bed`RrA3DA7!7kuHZ(+;8i{0a1*F(DB7>Ex$VTT``h zK|EHwE`mhO7Zdwy0yN$LMvyqmC!Vbd(0D9H2yqq?2iP=D. I am + ()`. + + * A frontend service that sends greetings to the backend and + fetches new greetings in response. + + With Skupper, you can place the backend in one cluster and the + frontend in another and maintain connectivity between the two + services without exposing the backend to the public internet. + + +sites: + west: + kubeconfig: ~/.kube/config-west + namespace: west + east: + kubeconfig: ~/.kube/config-east + namespace: east + east2: + kubeconfig: ~/.kube/config-east2 + namespace: east2 +steps: + - standard: install_the_skupper_command_line_tool + - standard: configure_separate_console_sessions + - standard: access_your_clusters + - standard: set_up_your_namespaces + - standard: install_skupper_in_your_namespaces + - standard: check_the_status_of_your_namespaces + - standard: link_your_namespaces + - title: Link east2 + preamble: | + Use `skupper token create` + in `west` and `skupper link create` in `east2`. + commands: + west: + - run: skupper token create /tmp/hw.yaml + output: token created + east2: + - run: skupper link create /tmp/hw.yaml + output: link created + - title: Deploy the frontend and backend services + preamble: | + Use `kubectl create deployment` to deploy the frontend service + in `west` and the backend service in both namespaces. + commands: + west: + - run: kubectl create deployment frontend --image quay.io/skupper/hello-world-frontend + output: deployment.apps/frontend created + east: + - run: kubectl create deployment backend --image quay.io/skupper/hello-world-backend --replicas 1 + output: deployment.apps/backend created + east2: + - run: kubectl create deployment backend --image quay.io/skupper/hello-world-backend --replicas 1 + output: deployment.apps/backend created + - title: Expose the backend service + preamble: | + We now have two namespaces linked to form a Skupper network, but + no services are exposed on it. Skupper uses the `skupper + expose` command to select a service from one namespace for + exposure on all the linked namespaces. + + **Note:** You can expose services that are not in the same + namespace where you installed Skupper as described in the + [Exposing services from a different namespace][different-ns] + documentation. + + [different-ns]: https://skupper.io/docs/cli/index.html#exposing-services-from-different-ns + + Use `skupper expose` to expose the backend service to the + frontend service. + commands: + west: + - run: skupper service create backend 8080 --protocol tcp + output: service created + east: + - await: deployment/backend + - run: skupper service bind backend deployment backend + output: service bound + east2: + - await: deployment/backend + - run: skupper service bind backend deployment backend + output: service bound + - title: Expose the frontend service + preamble: | + We have established connectivity between the two namespaces and + made the backend in `east` available to the frontend in `west`. + Before we can test the application, we need external access to + the frontend. + + Use `kubectl expose` with `--type LoadBalancer` to open network + access to the frontend service. + commands: + west: + - await: deployment/frontend + - run: kubectl expose deployment/frontend --port 8080 --type LoadBalancer + output: service/frontend exposed + - standard: test_the_application + - standard: accessing_the_web_console + - standard: cleaning_up + commands: + west: + - run: skupper delete + - run: kubectl delete service/frontend + - run: kubectl delete deployment/frontend + east: + - run: skupper delete + - run: kubectl delete deployment/backend + east2: + - run: skupper delete + - run: kubectl delete deployment/backend + +summary: | + This example locates the frontend and backend services in different + namespaces, on different clusters. Ordinarily, this means that they + have no way to communicate unless they are exposed to the public + internet. + + Introducing Skupper into each namespace allows us to create a virtual + application network that can connect services in different clusters. + Any service exposed on the application network is represented as a + local service in all of the linked namespaces. + + The backend service is located in `east`, but the frontend service + in `west` can "see" it as if it were local. When the frontend + sends a request to the backend, Skupper forwards the request to the + namespace where the backend is running and routes the response back to + the frontend. + + From 5aa665deda9dd86e4bb3f13abad00e67ddecc385 Mon Sep 17 00:00:00 2001 From: Paul Wright Date: Thu, 25 May 2023 19:26:43 +0100 Subject: [PATCH 04/10] update config --- README.md | 650 +++++++++++++++++++++++++++++++++++++++++----------- skewer.yaml | 182 ++++++++------- 2 files changed, 614 insertions(+), 218 deletions(-) diff --git a/README.md b/README.md index 277402f..5da8d23 100644 --- a/README.md +++ b/README.md @@ -1,198 +1,588 @@ -# Deploying multiple http services for anycast access across cluster +# Skupper HTTP load balancing -This tutorial demonstrates how to deploy a set of http servers across multiple clusters and observe anycast application routing over a Virtual Application Network. +[![main](https://github.com/skupperproject/skupper-example-hello-world/actions/workflows/main.yaml/badge.svg)](https://github.com/skupperproject/skupper-example-hello-world/actions/workflows/main.yaml) -In this tutorial, you will deploy http servers to both a public and a private cluster. You will also create http clients that will access the http servers via the same address. You will observe how the VAN supports anycast application addressing by balancing client requests across the https servers on both the public and private cluster. +#### Deploying multiple http services for anycast access across cluster + +This example is part of a [suite of examples][examples] showing the +different ways you can use [Skupper][website] to connect services +across cloud providers, data centers, and edge sites. + +[website]: https://skupper.io/ +[examples]: https://skupper.io/examples/index.html -To complete this tutorial, do the following: +#### Contents +* [Overview](#overview) * [Prerequisites](#prerequisites) -* [Step 1: Set up the demo](#step-1-set-up-the-demo) -* [Step 2: Deploy the Virtual Application Network](#step-2-deploy-the-virtual-application-network) -* [Step 3: Deploy the HTTP service](#step-3-deploy-the-http-service) -* [Step 4: Create Skupper service for the Virtual Application Network](#step-4-create-skupper-service-for-the-virtual-application-network) -* [Step 5: Bind the Skupper service to the deployment target on the Virtual Application Network](#step-5-bind-the-skupper-service-to-the-deployment-target-on-the-virtual-application-network) -* [Step 6: Deploy HTTP client](#step-6-deploy-http-client) -* [Step 7: Review HTTP client metrics](#step-7-review-http-client-metrics) +* [Step 1: Install the Skupper command-line tool](#step-1-install-the-skupper-command-line-tool) +* [Step 2: Configure separate console sessions](#step-2-configure-separate-console-sessions) +* [Step 3: Access your clusters](#step-3-access-your-clusters) +* [Step 4: Set up your namespaces](#step-4-set-up-your-namespaces) +* [Step 5: Install Skupper in your namespaces](#step-5-install-skupper-in-your-namespaces) +* [Step 6: Check the status of your namespaces](#step-6-check-the-status-of-your-namespaces) +* [Step 7: Link your namespaces](#step-7-link-your-namespaces) +* [Step 8: Link the private clusters](#step-8-link-the-private-clusters) +* [Step 9: Deploy the HTTP service](#step-9-deploy-the-http-service) +* [Step 10: Expose the HTTP service](#step-10-expose-the-http-service) +* [Step 11: Bind the service to the deployment](#step-11-bind-the-service-to-the-deployment) +* [Step 12: Deploy the HTTP clients](#step-12-deploy-the-http-clients) +* [Step 13: Review client logs](#step-13-review-client-logs) +* [Accessing the web console](#accessing-the-web-console) * [Cleaning up](#cleaning-up) -* [Next steps](#next-steps) +* [Summary](#summary) +* [About this example](#about-this-example) + +## Overview + +This tutorial demonstrates how to deploy a set of http servers across multiple clusters and observe anycast application routing over a Virtual Application Network. + +In this tutorial, you will deploy http servers to both a public and a private cluster. You will also create http clients that will access the http servers via the same address. You will observe how the VAN supports anycast application addressing by balancing client requests across the https servers on both the public and private cluster. ## Prerequisites -* The `kubectl` command-line tool, version 1.15 or later ([installation guide](https://kubernetes.io/docs/tasks/tools/install-kubectl/)) -* The `skupper` command-line tool, the latest version ([installation guide](https://skupper.io/start/index.html#step-1-install-the-skupper-command-line-tool-in-your-environment)) +* The `kubectl` command-line tool, version 1.15 or later + ([installation guide][install-kubectl]) + +* Access to at least one Kubernetes cluster, from [any provider you + choose][kube-providers] + +[install-kubectl]: https://kubernetes.io/docs/tasks/tools/install-kubectl/ +[kube-providers]: https://skupper.io/start/kubernetes.html + +## Step 1: Install the Skupper command-line tool + +The `skupper` command-line tool is the entrypoint for installing +and configuring Skupper. You need to install the `skupper` +command only once for each development environment. + +On Linux or Mac, you can use the install script (inspect it +[here][install-script]) to download and extract the command: + +~~~ shell +curl https://skupper.io/install.sh | sh +~~~ + +The script installs the command under your home directory. It +prompts you to add the command to your path if necessary. + +For Windows and other installation options, see [Installing +Skupper][install-docs]. + +[install-script]: https://github.com/skupperproject/skupper-website/blob/main/docs/install.sh +[install-docs]: https://skupper.io/install/index.html + +## Step 2: Configure separate console sessions + +Skupper is designed for use with multiple namespaces, usually on +different clusters. The `skupper` command uses your +[kubeconfig][kubeconfig] and current context to select the +namespace where it operates. + +[kubeconfig]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ + +Your kubeconfig is stored in a file in your home directory. The +`skupper` and `kubectl` commands use the `KUBECONFIG` environment +variable to locate it. + +A single kubeconfig supports only one active context per user. +Since you will be using multiple contexts at once in this +exercise, you need to create distinct kubeconfigs. + +Start a console session for each of your namespaces. Set the +`KUBECONFIG` environment variable to a different path in each +session. + +_**Console for public1:**_ + +~~~ shell +export KUBECONFIG=~/.kube/config-public1 +~~~ + +_**Console for public2:**_ + +~~~ shell +export KUBECONFIG=~/.kube/config-public2 +~~~ + +_**Console for private1:**_ + +~~~ shell +export KUBECONFIG=~/.kube/config-private1 +~~~ + +_**Console for private2:**_ + +~~~ shell +export KUBECONFIG=~/.kube/config-private2 +~~~ + +## Step 3: Access your clusters + +The procedure for accessing a Kubernetes cluster varies by +provider. [Find the instructions for your chosen +provider][kube-providers] and use them to authenticate and +configure access for each console session. + +[kube-providers]: https://skupper.io/start/kubernetes.html + +## Step 4: Set up your namespaces + +Use `kubectl create namespace` to create the namespaces you wish +to use (or use existing namespaces). Use `kubectl config +set-context` to set the current namespace for each session. + +_**Console for public1:**_ + +~~~ shell +kubectl create namespace public1 +kubectl config set-context --current --namespace public1 +~~~ + +_**Console for public2:**_ + +~~~ shell +kubectl create namespace public2 +kubectl config set-context --current --namespace public2 +~~~ + +_**Console for private1:**_ + +~~~ shell +kubectl create namespace private1 +kubectl config set-context --current --namespace private1 +~~~ + +_**Console for private2:**_ + +~~~ shell +kubectl create namespace private2 +kubectl config set-context --current --namespace private2 +~~~ + +## Step 5: Install Skupper in your namespaces + +The `skupper init` command installs the Skupper router and service +controller in the current namespace. Run the `skupper init` command +in each namespace. + +**Note:** If you are using Minikube, [you need to start `minikube +tunnel`][minikube-tunnel] before you install Skupper. + +[minikube-tunnel]: https://skupper.io/start/minikube.html#running-minikube-tunnel + +_**Console for public1:**_ + +~~~ shell +skupper init --enable-console --enable-flow-collector +~~~ + +_**Console for public2:**_ + +~~~ shell +skupper init +~~~ + +_**Console for private1:**_ + +~~~ shell +skupper init +~~~ + +_**Console for private2:**_ + +~~~ shell +skupper init +~~~ + +_Sample output:_ + +~~~ console +$ skupper init +Waiting for LoadBalancer IP or hostname... +Skupper is now installed in namespace ''. Use 'skupper status' to get more information. +~~~ -The basis for the demonstration is to depict the operation of multiple http server deployment in both a private and public cluster and http client access to the servers from any of the namespaces (public and private) on the Virtal Application Network. As an example, the cluster deployment might be comprised of: +## Step 6: Check the status of your namespaces -* Two "private cloud" cluster running on your local machine or in a data center -* Two public cloud clusters running in public cloud providers +Use `skupper status` in each console to check that Skupper is +installed. -While the detailed steps are not included here, this demonstration can alternatively be performed with four separate namespaces on a single cluster. +_**Console for public1:**_ -## Step 1: Set up the demo +~~~ shell +skupper status +~~~ -1. On your local machine, make a directory for this tutorial and clone the example repo: +_**Console for public2:**_ - ```bash - mkdir http-demo - cd http-demo - git clone https://github.com/skupperproject/skupper-example-http-load-balancing.git - ``` +~~~ shell +skupper status +~~~ -2. Prepare the target clusters. +_**Console for private1:**_ - 1. On your local machine, log in to each cluster in a separate terminal session. - 2. In each cluster, create a namespace to use for the demo. - 3. In each cluster, set the kubectl config context to use the demo namespace [(see kubectl cheat sheet)](https://kubernetes.io/docs/reference/kubectl/cheatsheet/) +~~~ shell +skupper status +~~~ -## Step 2: Deploy the Virtual Application Network +_**Console for private2:**_ -On each cluster, define the virtual application network and the connectivity for the peer clusters. +~~~ shell +skupper status +~~~ -1. In the terminal for the first public cluster, deploy the **public1** application router and create three connection tokens for linking from the **public2** cluster, the **private1** cluster and the **private2** cluster: +_Sample output:_ - ```bash - skupper init --site-name public1 - skupper token create private1-to-public1-token.yaml - skupper token create private2-to-public1-token.yaml - skupper token create public2-to-public1-token.yaml - ``` +~~~ console +Skupper is enabled for namespace "" in interior mode. It is connected to 1 other site. It has 1 exposed service. +The site console url is: +The credentials for internal console-auth mode are held in secret: 'skupper-console-users' +~~~ -2. In the terminal for the second public cluster, deploy the **public2** application router, create two connection tokens for linking from the **private1** and **private2** clusters, and link to the **public1** cluster: +As you move through the steps below, you can use `skupper status` at +any time to check your progress. - ```bash - skupper init --site-name public2 - skupper token create private1-to-public2-token.yaml - skupper token create private2-to-public2-token.yaml - skupper link create public2-to-public1-token.yaml - ``` +## Step 7: Link your namespaces -3. In the terminal for the first private cluster, deploy the **private1** application router and define its links to the **public1** and **public2** clusters +Creating a link requires use of two `skupper` commands in +conjunction, `skupper token create` and `skupper link create`. - ```bash - skupper init --site-name private1 - skupper link create private1-to-public1-token.yaml - skupper link create private1-to-public2-token.yaml - ``` +The `skupper token create` command generates a secret token that +signifies permission to create a link. The token also carries the +link details. Then, in a remote namespace, The `skupper link +create` command uses the token to create a link to the namespace +that generated it. -4. In the terminal for the second private cluster, deploy the **private2** application router and define its links to the **public1** and **public2** clusters +**Note:** The link token is truly a *secret*. Anyone who has the +token can link to your namespace. Make sure that only those you +trust have access to it. - ```bash - skupper init --site-name private2 - skupper link create private2-to-public1-token.yaml - skupper link create private2-to-public2-token.yaml - ``` +First, use `skupper token create` in one namespace to generate the +token. Then, use `skupper link create` in the other to create a +link. -## Step 3: Deploy the HTTP service +_**Console for public1:**_ + +~~~ shell +skupper token create ~/secret.token +~~~ + +_Sample output:_ + +~~~ console +$ skupper token create ~/secret.token +Token written to ~/secret.token +~~~ + +_**Console for public2:**_ + +~~~ shell +skupper link create ~/secret.token +~~~ + +_Sample output:_ + +~~~ console +$ skupper link create ~/secret.token +Site configured to link to https://10.105.193.154:8081/ed9c37f6-d78a-11ec-a8c7-04421a4c5042 (name=link1) +Check the status of the link using 'skupper link status'. +~~~ + +If your console sessions are on different machines, you may need +to use `sftp` or a similar tool to transfer the token securely. +By default, tokens expire after a single use or 15 minutes after +creation. + +## Step 8: Link the private clusters + +Use `skupper token create` +and create links + +_**Console for public1:**_ + +~~~ shell +skupper token create /tmp/public1.yaml --uses 2 +~~~ + +_Sample output:_ + +~~~ console +$ skupper token create /tmp/public1.yaml --uses 2 +token created +~~~ + +_**Console for private1:**_ + +~~~ shell +skupper link create /tmp/public1.yaml +~~~ + +_Sample output:_ + +~~~ console +$ skupper link create /tmp/public1.yaml +link created +~~~ + +_**Console for private2:**_ + +~~~ shell +skupper link create /tmp/public1.yaml +~~~ + +_Sample output:_ + +~~~ console +$ skupper link create /tmp/public1.yaml +link created +~~~ + +## Step 9: Deploy the HTTP service After creating the application router network, deploy the HTTP services. The **private1** and **public1** clusters will be used to deploy the HTTP servers and the **public2** and **private2** clusters will be used to enable client http communications to the servers. -1. In the terminal for the **public1** cluster, deploy the following: +_**Console for public1:**_ + +~~~ shell +kubectl apply -f ./server.yaml +~~~ + +_Sample output:_ + +~~~ console +$ kubectl apply -f ./server.yaml +created +~~~ + +_**Console for private1:**_ + +~~~ shell +kubectl apply -f ./server.yaml +~~~ + +_Sample output:_ + +~~~ console +$ kubectl apply -f ./server.yaml +created +~~~ - ```bash - kubectl apply -f ~/http-demo/skupper-example-http-load-balancing/server.yaml - ``` +## Step 10: Expose the HTTP service -2. In the terminal for the **private1** cluster, deploy the following: +Use `skupper create` to create a service. - ```bash - kubectl apply -f ~/http-demo/skupper-example-http-load-balancing/server.yaml - ``` +_**Console for public1:**_ -## Step 4: Create Skupper service for the Virtual Application Network +~~~ shell +skupper service create httpsvc 8080 --protocol tcp +~~~ -1. In the terminal for the **public1** cluster, create the httpsvc service: +_Sample output:_ - ```bash - skupper service create httpsvc 8080 --mapping http - ``` +~~~ console +$ skupper service create httpsvc 8080 --protocol tcp +service created +~~~ -2. In each of the cluster terminals, verify the service created is present +## Step 11: Bind the service to the deployment - ```bash - skupper service status - ``` +Bind services to deployments -## Step 5: Bind the Skupper service to the deployment target on the Virtual Application Network +_**Console for public1:**_ -1. In the terminal for the **public1** cluster, bind the httpsvc to the http-server deployment: +~~~ shell +skupper service bind httpsvc deployment http-server +~~~ - ```bash - skupper service bind httpsvc deployment http-server - ``` +_Sample output:_ -2. In the terminal for the **private1** cluster, bind the httpsvc to the http-server deployment: +~~~ console +$ skupper service bind httpsvc deployment http-server +bind +~~~ - ```bash - skupper service bind httpsvc deployment http-server - ``` +_**Console for private1:**_ -## Step 6: Deploy HTTP client +~~~ shell +skupper service bind httpsvc deployment http-server +~~~ -1. In the terminal for the **public2** cluster, deploy the following: +_Sample output:_ - ```bash - kubectl apply -f ~/http-demo/skupper-example-http-load-balancing/client.yaml - ``` +~~~ console +$ skupper service bind httpsvc deployment http-server +bind +~~~ -2. In the terminal for the **private2** cluster, deploy the following: +## Step 12: Deploy the HTTP clients - ```bash - kubectl apply -f ~/http-demo/skupper-example-http-load-balancing/client.yaml - ``` +Deploy clients -## Step 7: Review HTTP client metrics +_**Console for public2:**_ -The deployed http clients issue concurrent requests to the httpsvc. The http client -monitors which of the http server pods deployed on the **public1** and **private1** clusters -served the request and calculates the rates per server-pod. +~~~ shell +kubectl apply -f ./client.yaml +~~~ -1. In the terminal for the **public2** cluster, review the logs generated by the http client: +_Sample output:_ - ```bash - kubectl logs $(kubectl get pod -l application=http-client -o=jsonpath='{.items[0].metadata.name}') - ``` +~~~ console +$ kubectl apply -f ./client.yaml +bind +~~~ -2. In the terminal for the **private2** cluster, review the logs generated by the http client: +_**Console for private2:**_ - ```bash - kubectl logs $(kubectl get pod -l application=http-client -o=jsonpath='{.items[0].metadata.name}') - ``` +~~~ shell +kubectl apply -f ./client.yaml +~~~ -## Cleaning Up +_Sample output:_ -Restore your cluster environment by returning the resources created in the demonstration. On each cluster, delete the demo resources and the virtual application network: +~~~ console +$ kubectl apply -f ./client.yaml +bind +~~~ -1. In the terminal for the **public1** cluster, delete the resources: +## Step 13: Review client logs - ```bash - $ kubectl delete -f ~/http-demo/skupper-example-http-load-balancing/server.yaml - $ skupper delete - ``` +Write client logs to /tmp -2. In the terminal for the **public2** cluster, delete the resources: +_**Console for public2:**_ - ```bash - $ kubectl delete -f ~/http-demo/skupper-example-http-load-balancing/client.yaml - $ skupper delete - ``` +~~~ shell +kubectl logs $(kubectl get pod -l application=http-client -o=jsonpath='{.items[0].metadata.name}') +~~~ -3. In the terminal for the **private1** cluster, delete the resources: +_Sample output:_ - ```bash - $ kubectl delete -f ~/http-demo/skupper-example-http-load-balancing/server.yaml - $ skupper delete - ``` +~~~ console +$ kubectl logs $(kubectl get pod -l application=http-client -o=jsonpath='{.items[0].metadata.name}') +Service Name: HTTPSVC +Service Host: 10.105.108.176 +Service Port: 8080 +Configured concurrency: 50 +Query URL: http://10.105.108.176:8080/request -4. In the terminal for the **private2** cluster, delete the resources: +======== Rates per server-pod ======== +http-server-774567c64f-n2qt9: 75.5 +http-server-774567c64f-qw9kw: 84.5 +http-server-774567c64f-2mm88: 87 +http-server-774567c64f-mxfhx: 73 +~~~ - ```bash - $ kubectl delete -f ~/http-demo/skupper-example-http-load-balancing/client.yaml - $ skupper delete - ``` +_**Console for private2:**_ + +~~~ shell +kubectl logs $(kubectl get pod -l application=http-client -o=jsonpath='{.items[0].metadata.name}') +~~~ + +_Sample output:_ + +~~~ console +$ kubectl logs $(kubectl get pod -l application=http-client -o=jsonpath='{.items[0].metadata.name}') +Service Name: HTTPSVC +Service Host: 10.105.108.176 +Service Port: 8080 +Configured concurrency: 50 +Query URL: http://10.105.108.176:8080/request + +======== Rates per server-pod ======== +http-server-774567c64f-n2qt9: 75.5 +http-server-774567c64f-qw9kw: 84.5 +http-server-774567c64f-2mm88: 87 +http-server-774567c64f-mxfhx: 73 +~~~ + +## Accessing the web console + +Skupper includes a web console you can use to view the application +network. To access it, use `skupper status` to look up the URL of +the web console. Then use `kubectl get +secret/skupper-console-users` to look up the console admin +password. + +**Note:** The `` and `` fields in the +following output are placeholders. The actual values are specific +to your environment. + +_**Console for public1:**_ + +~~~ shell +skupper status +kubectl get secret/skupper-console-users -o jsonpath={.data.admin} | base64 -d +~~~ + +_Sample output:_ + +~~~ console +$ skupper status +Skupper is enabled for namespace "public1" in interior mode. It is connected to 1 other site. It has 1 exposed service. +The site console url is: +The credentials for internal console-auth mode are held in secret: 'skupper-console-users' + +$ kubectl get secret/skupper-console-users -o jsonpath={.data.admin} | base64 -d + +~~~ + +Navigate to `` in your browser. When prompted, log +in as user `admin` and enter the password. + +## Cleaning up + +To remove Skupper and the other resources from this exercise, use +the following commands. + +_**Console for public1:**_ + +~~~ shell +skupper delete +kubectl delete service/frontend +kubectl delete deployment/frontend +~~~ + +_**Console for private1:**_ + +~~~ shell +skupper delete +kubectl delete deployment/backend +~~~ + +## Summary + +This example locates the frontend and backend services in different +namespaces, on different clusters. Ordinarily, this means that they +have no way to communicate unless they are exposed to the public +internet. + +Introducing Skupper into each namespace allows us to create a virtual +application network that can connect services in different clusters. +Any service exposed on the application network is represented as a +local service in all of the linked namespaces. + +The backend service is located in `east`, but the frontend service +in `west` can "see" it as if it were local. When the frontend +sends a request to the backend, Skupper forwards the request to the +namespace where the backend is running and routes the response back to +the frontend. + + ## Next steps - - [Try the Bookinfo example for distributing application http services](https://github.com/skupperproject/skupper-example-bookinfo) - - [Try the Hipster Shop example for distributing application microservices](https://github.com/skupperproject/skupper-example-microservices) - - [Find more examples](https://skupper.io/examples/) +Check out the other [examples][examples] on the Skupper website. + +## About this example + +This example was produced using [Skewer][skewer], a library for +documenting and testing Skupper examples. + +[skewer]: https://github.com/skupperproject/skewer + +Skewer provides utility functions for generating the README and +running the example steps. Use the `./plano` command in the project +root to see what is available. + +To quickly stand up the example using Minikube, try the `./plano demo` +command. diff --git a/skewer.yaml b/skewer.yaml index 5709801..80b86c9 100644 --- a/skewer.yaml +++ b/skewer.yaml @@ -1,34 +1,23 @@ -title: Skupper Hello World -subtitle: A minimal HTTP application deployed across Kubernetes clusters using Skupper +title: Skupper HTTP load balancing +subtitle: Deploying multiple http services for anycast access across cluster github_actions_url: https://github.com/skupperproject/skupper-example-hello-world/actions/workflows/main.yaml overview: | - This example is a very simple multi-service HTTP application - deployed across Kubernetes clusters using Skupper. + This tutorial demonstrates how to deploy a set of http servers across multiple clusters and observe anycast application routing over a Virtual Application Network. - It contains two services: - - * A backend service that exposes an `/api/hello` endpoint. It - returns greetings of the form `Hi, . I am - ()`. - - * A frontend service that sends greetings to the backend and - fetches new greetings in response. - - With Skupper, you can place the backend in one cluster and the - frontend in another and maintain connectivity between the two - services without exposing the backend to the public internet. - - + In this tutorial, you will deploy http servers to both a public and a private cluster. You will also create http clients that will access the http servers via the same address. You will observe how the VAN supports anycast application addressing by balancing client requests across the https servers on both the public and private cluster. sites: - west: - kubeconfig: ~/.kube/config-west - namespace: west - east: - kubeconfig: ~/.kube/config-east - namespace: east - east2: - kubeconfig: ~/.kube/config-east2 - namespace: east2 + public1: + kubeconfig: ~/.kube/config-public1 + namespace: public1 + public2: + kubeconfig: ~/.kube/config-public2 + namespace: public2 + private1: + kubeconfig: ~/.kube/config-private1 + namespace: private1 + private2: + kubeconfig: ~/.kube/config-private2 + namespace: private2 steps: - standard: install_the_skupper_command_line_tool - standard: configure_separate_console_sessions @@ -37,85 +26,102 @@ steps: - standard: install_skupper_in_your_namespaces - standard: check_the_status_of_your_namespaces - standard: link_your_namespaces - - title: Link east2 + - title: Link the private clusters preamble: | Use `skupper token create` - in `west` and `skupper link create` in `east2`. + and create links commands: - west: - - run: skupper token create /tmp/hw.yaml + public1: + - run: skupper token create /tmp/public1.yaml --uses 2 output: token created - east2: - - run: skupper link create /tmp/hw.yaml + private1: + - run: skupper link create /tmp/public1.yaml + output: link created + private2: + - run: skupper link create /tmp/public1.yaml output: link created - - title: Deploy the frontend and backend services + - title: Deploy the HTTP service preamble: | - Use `kubectl create deployment` to deploy the frontend service - in `west` and the backend service in both namespaces. + After creating the application router network, deploy the HTTP services. The **private1** and **public1** clusters will be used to deploy the HTTP servers and the **public2** and **private2** clusters will be used to enable client http communications to the servers. commands: - west: - - run: kubectl create deployment frontend --image quay.io/skupper/hello-world-frontend - output: deployment.apps/frontend created - east: - - run: kubectl create deployment backend --image quay.io/skupper/hello-world-backend --replicas 1 - output: deployment.apps/backend created - east2: - - run: kubectl create deployment backend --image quay.io/skupper/hello-world-backend --replicas 1 - output: deployment.apps/backend created - - title: Expose the backend service + public1: + - run: kubectl apply -f ./server.yaml + output: created + private1: + - run: kubectl apply -f ./server.yaml + output: created + - title: Expose the HTTP service preamble: | - We now have two namespaces linked to form a Skupper network, but - no services are exposed on it. Skupper uses the `skupper - expose` command to select a service from one namespace for - exposure on all the linked namespaces. - - **Note:** You can expose services that are not in the same - namespace where you installed Skupper as described in the - [Exposing services from a different namespace][different-ns] - documentation. - - [different-ns]: https://skupper.io/docs/cli/index.html#exposing-services-from-different-ns - - Use `skupper expose` to expose the backend service to the - frontend service. + Use `skupper create` to create a service. commands: - west: - - run: skupper service create backend 8080 --protocol tcp + public1: + - run: skupper service create httpsvc 8080 --protocol tcp output: service created - east: - - await: deployment/backend - - run: skupper service bind backend deployment backend - output: service bound - east2: - - await: deployment/backend - - run: skupper service bind backend deployment backend - output: service bound - - title: Expose the frontend service + - title: Bind the service to the deployment preamble: | - We have established connectivity between the two namespaces and - made the backend in `east` available to the frontend in `west`. - Before we can test the application, we need external access to - the frontend. - - Use `kubectl expose` with `--type LoadBalancer` to open network - access to the frontend service. + Bind services to deployments commands: - west: - - await: deployment/frontend - - run: kubectl expose deployment/frontend --port 8080 --type LoadBalancer - output: service/frontend exposed - - standard: test_the_application + public1: + - await: deployment/http-server + - run: skupper service bind httpsvc deployment http-server + output: bind + private1: + - await: deployment/http-server + - run: skupper service bind httpsvc deployment http-server + output: bind + - title: Deploy the HTTP clients + preamble: | + Deploy clients + commands: + public2: + - run: kubectl apply -f ./client.yaml + output: bind + private2: + - run: kubectl apply -f ./client.yaml + output: bind + - title: Review client logs + preamble: | + Write client logs to /tmp + commands: + public2: + - await: deployment/http-client + - run: kubectl logs $(kubectl get pod -l application=http-client -o=jsonpath='{.items[0].metadata.name}') + output: | + Service Name: HTTPSVC + Service Host: 10.105.108.176 + Service Port: 8080 + Configured concurrency: 50 + Query URL: http://10.105.108.176:8080/request + + ======== Rates per server-pod ======== + http-server-774567c64f-n2qt9: 75.5 + http-server-774567c64f-qw9kw: 84.5 + http-server-774567c64f-2mm88: 87 + http-server-774567c64f-mxfhx: 73 + private2: + - await: deployment/http-client + - run: kubectl logs $(kubectl get pod -l application=http-client -o=jsonpath='{.items[0].metadata.name}') + output: | + Service Name: HTTPSVC + Service Host: 10.105.108.176 + Service Port: 8080 + Configured concurrency: 50 + Query URL: http://10.105.108.176:8080/request + + ======== Rates per server-pod ======== + http-server-774567c64f-n2qt9: 75.5 + http-server-774567c64f-qw9kw: 84.5 + http-server-774567c64f-2mm88: 87 + http-server-774567c64f-mxfhx: 73 + - standard: accessing_the_web_console - standard: cleaning_up commands: - west: + public1: - run: skupper delete - run: kubectl delete service/frontend - run: kubectl delete deployment/frontend - east: - - run: skupper delete - - run: kubectl delete deployment/backend - east2: + private1: - run: skupper delete - run: kubectl delete deployment/backend From 0c12b13eae914c3e9f4b29c612c5485ed5089905 Mon Sep 17 00:00:00 2001 From: Paul Wright Date: Mon, 29 May 2023 16:00:25 +0100 Subject: [PATCH 05/10] update text --- skewer.yaml | 105 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 63 insertions(+), 42 deletions(-) diff --git a/skewer.yaml b/skewer.yaml index 80b86c9..05297ce 100644 --- a/skewer.yaml +++ b/skewer.yaml @@ -25,63 +25,91 @@ steps: - standard: set_up_your_namespaces - standard: install_skupper_in_your_namespaces - standard: check_the_status_of_your_namespaces - - standard: link_your_namespaces - - title: Link the private clusters + - title: Link your namespaces preamble: | - Use `skupper token create` - and create links + Creating a link requires use of two `skupper` commands in + conjunction, `skupper token create` and `skupper link create`. + + The `skupper token create` command generates a secret token that + signifies permission to create a link. The token also carries the + link details. Then, in a remote namespace, The `skupper link + create` command uses the token to create a link to the namespace + that generated it. + + **Note:** The link token is truly a *secret*. Anyone who has the + token can link to your namespace. Make sure that only those you + trust have access to it. + + First, use `skupper token create` in one namespace to generate the + token. Then, use `skupper link create` in the other namespaces to create a + link. commands: public1: - run: skupper token create /tmp/public1.yaml --uses 2 - output: token created + output: Token written to ~/secret.token + public2: + - run: skupper link create /tmp/public1.yaml + output: | + Site configured to link to https://10.105.193.154:8081/ed9c37f6-d78a-11ec-a8c7-04421a4c5042 (name=link1) + Check the status of the link using 'skupper link status'. + - run: skupper link status --wait 60 + apply: test private1: - run: skupper link create /tmp/public1.yaml - output: link created + output: | + Site configured to link to https://10.105.193.154:8081/ed9c37f6-d78a-11ec-a8c7-04421a4c5042 (name=link1) + Check the status of the link using 'skupper link status'. private2: - run: skupper link create /tmp/public1.yaml - output: link created - - title: Deploy the HTTP service + output: | + Site configured to link to https://10.105.193.154:8081/ed9c37f6-d78a-11ec-a8c7-04421a4c5042 (name=link1) + Check the status of the link using 'skupper link status'. + - title: Deploy the HTTP servers preamble: | - After creating the application router network, deploy the HTTP services. The **private1** and **public1** clusters will be used to deploy the HTTP servers and the **public2** and **private2** clusters will be used to enable client http communications to the servers. + In the **private1** and **public1** clusters, use the `kubectl apply` command + to install the servers. commands: public1: - run: kubectl apply -f ./server.yaml - output: created + output: deployment.apps/http-server created private1: - run: kubectl apply -f ./server.yaml - output: created - - title: Expose the HTTP service + output: deployment.apps/http-server created + - title: Expose the HTTP servers preamble: | - Use `skupper create` to create a service. + Use `skupper create` to create a service that is accessible from any site. commands: public1: - - run: skupper service create httpsvc 8080 --protocol tcp - output: service created + - run: skupper service create httpsvc 8080 --protocol http + output: - title: Bind the service to the deployment preamble: | - Bind services to deployments + Bind the new service to the HTTP server deployments. commands: public1: - await: deployment/http-server - run: skupper service bind httpsvc deployment http-server - output: bind + output: private1: - await: deployment/http-server - run: skupper service bind httpsvc deployment http-server output: bind - title: Deploy the HTTP clients preamble: | - Deploy clients + In the **private2** and **public2** clusters, use the `kubectl apply` command + to install the clients. commands: public2: - run: kubectl apply -f ./client.yaml - output: bind + output: deployment.apps/http-client created private2: - run: kubectl apply -f ./client.yaml - output: bind - - title: Review client logs + output: deployment.apps/http-client created + - title: Review the client logs preamble: | - Write client logs to /tmp + The client pods contain logs showing which server reponded to requests. + Use the `kubectl logs` command to inspect these logs and see how the traffic + was balanced. commands: public2: - await: deployment/http-client @@ -119,27 +147,20 @@ steps: commands: public1: - run: skupper delete - - run: kubectl delete service/frontend - - run: kubectl delete deployment/frontend + - run: kubectl delete -f ./server.yaml private1: - run: skupper delete - - run: kubectl delete deployment/backend - -summary: | - This example locates the frontend and backend services in different - namespaces, on different clusters. Ordinarily, this means that they - have no way to communicate unless they are exposed to the public - internet. - - Introducing Skupper into each namespace allows us to create a virtual - application network that can connect services in different clusters. - Any service exposed on the application network is represented as a - local service in all of the linked namespaces. + - run: kubectl delete -f ./server.yaml + public2: + - run: skupper delete + - run: kubectl delete -f ./client.yaml + private2: + - run: skupper delete + - run: kubectl delete -f ./client.yaml - The backend service is located in `east`, but the frontend service - in `west` can "see" it as if it were local. When the frontend - sends a request to the backend, Skupper forwards the request to the - namespace where the backend is running and routes the response back to - the frontend. - +summary: | + This example shows how you can deploy HTTP servers in private + and public clusters. Using Skupper you can then call those + servers from private and public clusters and achieve load + balancing for the requests. From 58211b718e0a126a2cb5b2849dd91aa562b9b408 Mon Sep 17 00:00:00 2001 From: Paul Wright Date: Mon, 29 May 2023 18:22:32 +0100 Subject: [PATCH 06/10] workaround no output from service create/bind cli --- README.md | 141 +++++++++++++++++++--------------------------------- skewer.yaml | 5 +- 2 files changed, 52 insertions(+), 94 deletions(-) diff --git a/README.md b/README.md index 5da8d23..115b93c 100644 --- a/README.md +++ b/README.md @@ -22,12 +22,11 @@ across cloud providers, data centers, and edge sites. * [Step 5: Install Skupper in your namespaces](#step-5-install-skupper-in-your-namespaces) * [Step 6: Check the status of your namespaces](#step-6-check-the-status-of-your-namespaces) * [Step 7: Link your namespaces](#step-7-link-your-namespaces) -* [Step 8: Link the private clusters](#step-8-link-the-private-clusters) -* [Step 9: Deploy the HTTP service](#step-9-deploy-the-http-service) -* [Step 10: Expose the HTTP service](#step-10-expose-the-http-service) -* [Step 11: Bind the service to the deployment](#step-11-bind-the-service-to-the-deployment) -* [Step 12: Deploy the HTTP clients](#step-12-deploy-the-http-clients) -* [Step 13: Review client logs](#step-13-review-client-logs) +* [Step 8: Deploy the HTTP servers](#step-8-deploy-the-http-servers) +* [Step 9: Expose the HTTP servers](#step-9-expose-the-http-servers) +* [Step 10: Bind the service to the deployment](#step-10-bind-the-service-to-the-deployment) +* [Step 11: Deploy the HTTP clients](#step-11-deploy-the-http-clients) +* [Step 12: Review the client logs](#step-12-review-the-client-logs) * [Accessing the web console](#accessing-the-web-console) * [Cleaning up](#cleaning-up) * [Summary](#summary) @@ -259,59 +258,36 @@ token can link to your namespace. Make sure that only those you trust have access to it. First, use `skupper token create` in one namespace to generate the -token. Then, use `skupper link create` in the other to create a +token. Then, use `skupper link create` in the other namespaces to create a link. _**Console for public1:**_ ~~~ shell -skupper token create ~/secret.token +skupper token create /tmp/public1.yaml --uses 3 ~~~ _Sample output:_ ~~~ console -$ skupper token create ~/secret.token +$ skupper token create /tmp/public1.yaml --uses 3 Token written to ~/secret.token ~~~ _**Console for public2:**_ ~~~ shell -skupper link create ~/secret.token +skupper link create /tmp/public1.yaml ~~~ _Sample output:_ ~~~ console -$ skupper link create ~/secret.token +$ skupper link create /tmp/public1.yaml Site configured to link to https://10.105.193.154:8081/ed9c37f6-d78a-11ec-a8c7-04421a4c5042 (name=link1) Check the status of the link using 'skupper link status'. ~~~ -If your console sessions are on different machines, you may need -to use `sftp` or a similar tool to transfer the token securely. -By default, tokens expire after a single use or 15 minutes after -creation. - -## Step 8: Link the private clusters - -Use `skupper token create` -and create links - -_**Console for public1:**_ - -~~~ shell -skupper token create /tmp/public1.yaml --uses 2 -~~~ - -_Sample output:_ - -~~~ console -$ skupper token create /tmp/public1.yaml --uses 2 -token created -~~~ - _**Console for private1:**_ ~~~ shell @@ -322,7 +298,8 @@ _Sample output:_ ~~~ console $ skupper link create /tmp/public1.yaml -link created +Site configured to link to https://10.105.193.154:8081/ed9c37f6-d78a-11ec-a8c7-04421a4c5042 (name=link1) +Check the status of the link using 'skupper link status'. ~~~ _**Console for private2:**_ @@ -335,12 +312,14 @@ _Sample output:_ ~~~ console $ skupper link create /tmp/public1.yaml -link created +Site configured to link to https://10.105.193.154:8081/ed9c37f6-d78a-11ec-a8c7-04421a4c5042 (name=link1) +Check the status of the link using 'skupper link status'. ~~~ -## Step 9: Deploy the HTTP service +## Step 8: Deploy the HTTP servers -After creating the application router network, deploy the HTTP services. The **private1** and **public1** clusters will be used to deploy the HTTP servers and the **public2** and **private2** clusters will be used to enable client http communications to the servers. +In the **private1** and **public1** clusters, use the `kubectl apply` command +to install the servers. _**Console for public1:**_ @@ -352,7 +331,7 @@ _Sample output:_ ~~~ console $ kubectl apply -f ./server.yaml -created +deployment.apps/http-server created ~~~ _**Console for private1:**_ @@ -365,29 +344,22 @@ _Sample output:_ ~~~ console $ kubectl apply -f ./server.yaml -created +deployment.apps/http-server created ~~~ -## Step 10: Expose the HTTP service +## Step 9: Expose the HTTP servers -Use `skupper create` to create a service. +Use `skupper create` to create a service that is accessible from any site. _**Console for public1:**_ ~~~ shell -skupper service create httpsvc 8080 --protocol tcp +skupper service create httpsvc 8080 --protocol http ~~~ -_Sample output:_ - -~~~ console -$ skupper service create httpsvc 8080 --protocol tcp -service created -~~~ +## Step 10: Bind the service to the deployment -## Step 11: Bind the service to the deployment - -Bind services to deployments +Bind the new service to the HTTP server deployments. _**Console for public1:**_ @@ -395,29 +367,16 @@ _**Console for public1:**_ skupper service bind httpsvc deployment http-server ~~~ -_Sample output:_ - -~~~ console -$ skupper service bind httpsvc deployment http-server -bind -~~~ - _**Console for private1:**_ ~~~ shell skupper service bind httpsvc deployment http-server ~~~ -_Sample output:_ - -~~~ console -$ skupper service bind httpsvc deployment http-server -bind -~~~ - -## Step 12: Deploy the HTTP clients +## Step 11: Deploy the HTTP clients -Deploy clients +In the **private2** and **public2** clusters, use the `kubectl apply` command +to install the clients. _**Console for public2:**_ @@ -429,7 +388,7 @@ _Sample output:_ ~~~ console $ kubectl apply -f ./client.yaml -bind +deployment.apps/http-client created ~~~ _**Console for private2:**_ @@ -442,12 +401,14 @@ _Sample output:_ ~~~ console $ kubectl apply -f ./client.yaml -bind +deployment.apps/http-client created ~~~ -## Step 13: Review client logs +## Step 12: Review the client logs -Write client logs to /tmp +The client pods contain logs showing which server reponded to requests. +Use the `kubectl logs` command to inspect these logs and see how the traffic +was balanced. _**Console for public2:**_ @@ -538,36 +499,36 @@ _**Console for public1:**_ ~~~ shell skupper delete -kubectl delete service/frontend -kubectl delete deployment/frontend +kubectl delete -f ./server.yaml ~~~ _**Console for private1:**_ ~~~ shell skupper delete -kubectl delete deployment/backend +kubectl delete -f ./server.yaml ~~~ -## Summary +_**Console for public2:**_ -This example locates the frontend and backend services in different -namespaces, on different clusters. Ordinarily, this means that they -have no way to communicate unless they are exposed to the public -internet. +~~~ shell +skupper delete +kubectl delete -f ./client.yaml +~~~ -Introducing Skupper into each namespace allows us to create a virtual -application network that can connect services in different clusters. -Any service exposed on the application network is represented as a -local service in all of the linked namespaces. +_**Console for private2:**_ -The backend service is located in `east`, but the frontend service -in `west` can "see" it as if it were local. When the frontend -sends a request to the backend, Skupper forwards the request to the -namespace where the backend is running and routes the response back to -the frontend. +~~~ shell +skupper delete +kubectl delete -f ./client.yaml +~~~ + +## Summary - +This example shows how you can deploy HTTP servers in private +and public clusters. Using Skupper you can then call those +servers from private and public clusters and achieve load +balancing for the requests. ## Next steps diff --git a/skewer.yaml b/skewer.yaml index 05297ce..59572ff 100644 --- a/skewer.yaml +++ b/skewer.yaml @@ -45,7 +45,7 @@ steps: link. commands: public1: - - run: skupper token create /tmp/public1.yaml --uses 2 + - run: skupper token create /tmp/public1.yaml --uses 3 output: Token written to ~/secret.token public2: - run: skupper link create /tmp/public1.yaml @@ -81,7 +81,6 @@ steps: commands: public1: - run: skupper service create httpsvc 8080 --protocol http - output: - title: Bind the service to the deployment preamble: | Bind the new service to the HTTP server deployments. @@ -89,11 +88,9 @@ steps: public1: - await: deployment/http-server - run: skupper service bind httpsvc deployment http-server - output: private1: - await: deployment/http-server - run: skupper service bind httpsvc deployment http-server - output: bind - title: Deploy the HTTP clients preamble: | In the **private2** and **public2** clusters, use the `kubectl apply` command From 174333dfd1418520bf5e8634d2712f1235595dc5 Mon Sep 17 00:00:00 2001 From: Paul Wright Date: Tue, 30 May 2023 19:04:02 +0100 Subject: [PATCH 07/10] change protocol to tcp --- skewer.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/skewer.yaml b/skewer.yaml index 59572ff..fb0af17 100644 --- a/skewer.yaml +++ b/skewer.yaml @@ -80,7 +80,7 @@ steps: Use `skupper create` to create a service that is accessible from any site. commands: public1: - - run: skupper service create httpsvc 8080 --protocol http + - run: skupper service create httpsvc 8080 - title: Bind the service to the deployment preamble: | Bind the new service to the HTTP server deployments. From 262fa3a61057ec8119268c261ff81b45d9c99162 Mon Sep 17 00:00:00 2001 From: Paul Wright Date: Tue, 30 May 2023 19:06:14 +0100 Subject: [PATCH 08/10] update readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 115b93c..0bf4c3f 100644 --- a/README.md +++ b/README.md @@ -354,7 +354,7 @@ Use `skupper create` to create a service that is accessible from any site. _**Console for public1:**_ ~~~ shell -skupper service create httpsvc 8080 --protocol http +skupper service create httpsvc 8080 ~~~ ## Step 10: Bind the service to the deployment From b92f00253517e56e351f3843c84a200ce435206f Mon Sep 17 00:00:00 2001 From: Paul Wright Date: Wed, 31 May 2023 13:07:17 +0100 Subject: [PATCH 09/10] update text --- README.md | 6 +++--- skewer.yaml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 0bf4c3f..8a3e0d3 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [![main](https://github.com/skupperproject/skupper-example-hello-world/actions/workflows/main.yaml/badge.svg)](https://github.com/skupperproject/skupper-example-hello-world/actions/workflows/main.yaml) -#### Deploying multiple http services for anycast access across cluster +#### Deploying multiple HTTP services and load balancing across clusters This example is part of a [suite of examples][examples] showing the different ways you can use [Skupper][website] to connect services @@ -34,9 +34,9 @@ across cloud providers, data centers, and edge sites. ## Overview -This tutorial demonstrates how to deploy a set of http servers across multiple clusters and observe anycast application routing over a Virtual Application Network. +This tutorial demonstrates how to deploy a set of http servers across multiple clusters and observe load balancing over a Virtual Application Network. -In this tutorial, you will deploy http servers to both a public and a private cluster. You will also create http clients that will access the http servers via the same address. You will observe how the VAN supports anycast application addressing by balancing client requests across the https servers on both the public and private cluster. +In this tutorial, you will deploy http servers to both a public and a private cluster. You will also create HTTP clients that will access the HTTP servers via the same address. You will observe how the VAN supports balancing client requests across the HTTP servers on both the public and private cluster. ## Prerequisites diff --git a/skewer.yaml b/skewer.yaml index fb0af17..18ac0c4 100644 --- a/skewer.yaml +++ b/skewer.yaml @@ -1,10 +1,10 @@ title: Skupper HTTP load balancing -subtitle: Deploying multiple http services for anycast access across cluster +subtitle: Deploying multiple HTTP services and load balancing across clusters github_actions_url: https://github.com/skupperproject/skupper-example-hello-world/actions/workflows/main.yaml overview: | - This tutorial demonstrates how to deploy a set of http servers across multiple clusters and observe anycast application routing over a Virtual Application Network. + This tutorial demonstrates how to deploy a set of http servers across multiple clusters and observe load balancing over a Virtual Application Network. - In this tutorial, you will deploy http servers to both a public and a private cluster. You will also create http clients that will access the http servers via the same address. You will observe how the VAN supports anycast application addressing by balancing client requests across the https servers on both the public and private cluster. + In this tutorial, you will deploy http servers to both a public and a private cluster. You will also create HTTP clients that will access the HTTP servers via the same address. You will observe how the VAN supports balancing client requests across the HTTP servers on both the public and private cluster. sites: public1: kubeconfig: ~/.kube/config-public1 From a702317e49768f9583449e048e1185a2c9c6bb89 Mon Sep 17 00:00:00 2001 From: Paul Wright Date: Thu, 1 Jun 2023 14:41:48 +0100 Subject: [PATCH 10/10] fix typo --- README.md | 2 +- skewer.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8a3e0d3..f8ca66a 100644 --- a/README.md +++ b/README.md @@ -406,7 +406,7 @@ deployment.apps/http-client created ## Step 12: Review the client logs -The client pods contain logs showing which server reponded to requests. +The client pods contain logs showing which server responded to requests. Use the `kubectl logs` command to inspect these logs and see how the traffic was balanced. diff --git a/skewer.yaml b/skewer.yaml index 18ac0c4..52fc69e 100644 --- a/skewer.yaml +++ b/skewer.yaml @@ -104,7 +104,7 @@ steps: output: deployment.apps/http-client created - title: Review the client logs preamble: | - The client pods contain logs showing which server reponded to requests. + The client pods contain logs showing which server responded to requests. Use the `kubectl logs` command to inspect these logs and see how the traffic was balanced. commands: