diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index 3637433..0000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: CI - -on: - push: - branches: [ "master", "main" ] - pull_request: - branches: [ "master", "main" ] - -jobs: - fmt-check: - name: gofmt + go vet - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Set up Go - uses: actions/setup-go@v4 - with: - go-version: 1.24 - - name: Check formatting - run: | - # list any files that need formatting - unformatted=$(gofmt -s -l .) - if [ -n "$unformatted" ]; then - echo "gofmt needs to be run on the following files:"; - echo "$unformatted"; - exit 1; - fi - - name: go vet - run: go vet ./... diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml deleted file mode 100644 index a8445d4..0000000 --- a/.github/workflows/codeql-analysis.yml +++ /dev/null @@ -1,67 +0,0 @@ -# For most projects, this workflow file will not need changing; you simply need -# to commit it to your repository. -# -# You may wish to alter this file to override the set of languages analyzed, -# or to provide custom queries or build logic. -# -# ******** NOTE ******** -# We have attempted to detect the languages in your repository. Please check -# the `language` matrix defined below to confirm you have the correct set of -# supported CodeQL languages. -# -name: "CodeQL" - -on: - push: - branches: [ master ] - pull_request: - # The branches below must be a subset of the branches above - branches: [ master ] - schedule: - - cron: '34 12 * * 5' - -jobs: - analyze: - name: Analyze - runs-on: ubuntu-latest - - strategy: - fail-fast: false - matrix: - language: [ 'go' ] - # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] - # Learn more: - # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed - - steps: - - name: Checkout repository - uses: actions/checkout@v2 - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v2 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - # queries: ./path/to/local/query, your-org/your-repo/queries@main - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v2 - - # â„šī¸ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # âœī¸ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml deleted file mode 100644 index b8732f4..0000000 --- a/.github/workflows/linter.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -################################# -################################# -## Super Linter GitHub Actions ## -################################# -################################# -name: Lint Code Base - -# -# Documentation: -# https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions -# - -############################# -# Start the job on all push # -############################# -on: - push: - branches-ignore: [master, main] - # Remove the line above to run when pushing to master - pull_request: - branches: [master, main] - -############### -# Set the Job # -############### -jobs: - build: - # Name the Job - name: Lint Code Base - # Set the agent to run on - runs-on: ubuntu-latest - - ################## - # Load all steps # - ################## - steps: - ########################## - # Checkout the code base # - ########################## - - name: Checkout Code - uses: actions/checkout@v3 - with: - # Full git history is needed to get a proper list of changed files within `super-linter` - fetch-depth: 0 - - ################################ - # Run Linter against code base # - ################################ - - name: Lint Code Base - uses: github/super-linter@v4 - env: - # don't fail as there are multiple errors showing up that need attending to. - DISABLE_ERRORS: true - VALIDATE_ALL_CODEBASE: false - DEFAULT_BRANCH: master - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/super-linter.yml b/.github/workflows/super-linter.yml new file mode 100644 index 0000000..d1c5421 --- /dev/null +++ b/.github/workflows/super-linter.yml @@ -0,0 +1,79 @@ +--- +name: Super-Linter + +# Provide minimal permissions required for linters to post annotations/checks +permissions: + contents: read + +on: + push: + branches: ["master", "main"] + pull_request: + +jobs: + prechecks: + name: Go pre-checks (vet & test) + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 + with: + persist-credentials: false + + - name: Set up Go + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 + with: + go-version: '1.26' + + - name: Run go vet and tests + run: | + go version + go vet ./... + go test ./... + + golangci-lint: + name: golangci-lint + runs-on: ubuntu-latest + needs: prechecks + steps: + - name: Checkout + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 + with: + persist-credentials: false + + - name: Set up Go + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 + with: + go-version: '1.26' + + - name: Install golangci-lint + run: | + pkg="github.com/golangci/golangci-lint/v2/cmd/golangci-lint" + go install "$pkg@v2.11.4" + echo "$HOME/go/bin" >> "$GITHUB_PATH" + + - name: Run golangci-lint + run: golangci-lint run ./... + + super-linter: + name: Super-Linter + runs-on: ubuntu-latest + needs: prechecks + permissions: + contents: read + checks: write + statuses: write + steps: + - name: Checkout + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 + with: + persist-credentials: false + + - name: Run Super-Linter + uses: github/super-linter@454ba4482ce2cd0c505bc592e83c06e1e37ade61 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + DEFAULT_BRANCH: master + # Enable all linters but skip GO (handled by prechecks). + VALIDATE: true + VALIDATE_GO: false diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index a6a5a66..0000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,56 +0,0 @@ -mage: golang:1.17.5 - -cache: - paths: - - /apt-cache - - /go/src/github.com - - /go/src/golang.org - - /go/src/google.golang.org - - /go/src/gopkg.in - -stages: - - test - - build - -before_script: - - mkdir -p /go/src/github.com/sjmudd /go/src/_/builds - - cp -r $CI_PROJECT_DIR /go/src/github.som/sjmudd/ps-top - - ln -s /go/src/github.com/sjmudd /go/src/_/builds/sjmudd - - make dep - -unit_tests: - stage: test - script: - - make test - -race_detector: - stage: test - script: - - make race - -memory_sanitizer: - stage: test - script: - - make msan - -code_coverage: - stage: test - script: - - make coverage - -code_coverage_report: - stage: test - script: - - make coverhtml - only: - - master - -lint_code: - stage: test - script: - - make lint - -build: - stage: build - script: - - make diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index cffb8a5..0000000 --- a/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -# http://docs.travis-ci.com/user/languages/go/ -language: go - -go: 1.16 - -os: - - linux - -install: true - -script: script/cibuild - -notifications: - email: false diff --git a/Makefile b/Makefile deleted file mode 100644 index b6262d2..0000000 --- a/Makefile +++ /dev/null @@ -1,59 +0,0 @@ -PROJECT_NAME := "ps-top" -PKG := "github.com/sjmudd/$(PROJECT_NAME)" -PKG_LIST := $(shell go list ${PKG}/... | grep -v /vendor/) -GO_FILES := $(shell find . -name '*.go' | grep -v /vendor/ | grep -v _test.go) - -.PHONY: all dep build clean test coverage coverhtml lint fmt fmt-check lint-install ci-check tidy tidy-check - -all: build - -fmt: ## Run gofmt in-place - @gofmt -s -w . - -fmt-check: ## Check for gofmt changes (fail if files need formatting) - @unformatted=$(gofmt -s -l .) ; if [ -n "$$unformatted" ]; then echo "gofmt needs to be run on:"; echo "$$unformatted"; exit 1; fi - -lint-install: ## Install golangci-lint (v1.59.0) - @echo "Installing golangci-lint v1.59.0..." - @curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.59.0 - -lint: ## Lint the files with golangci-lint (requires lint-install) - @which golangci-lint >/dev/null 2>&1 || { echo "golangci-lint not found; run 'make lint-install'"; exit 1; } - @golangci-lint run ./... - - -test: ## Run unittests - @go test -short ${PKG_LIST} - -race: dep ## Run data race detector - @go test -race -short ${PKG_LIST} - -msan: dep ## Run memory sanitizer - @go test -msan -short ${PKG_LIST} - -coverage: ## Generate global code coverage report - ./tools/coverage.sh; - -coverhtml: ## Generate global code coverage report in HTML - ./tools/coverage.sh html; - -dep: ## Get the dependencies - @go get -v ./... - -ci-check: fmt-check lint test tidy-check ## Run CI-like checks locally - @echo "ci-check completed" - -tidy: ## Run go mod tidy (updates go.mod & go.sum) - @go mod tidy - -tidy-check: ## Ensure go.mod and go.sum are tidy (fail if changes) - @orig=$$(mktemp) ; git ls-files -- others --ignored --exclude-standard >/dev/null 2>&1 || true ; git rev-parse --verify HEAD >/dev/null 2>&1 || true ; git status --porcelain >/dev/null 2>&1 || true ; git diff --quiet || true ; go mod tidy ; if [ -n "$$(git status --porcelain)" ]; then echo "go.mod or go.sum changed after go mod tidy; please run 'go mod tidy'"; git --no-pager status --porcelain; git --no-pager diff; git checkout -- go.mod go.sum || true; exit 1; fi ; rm -f $$orig - -build: dep ## Build the binary file - @go build -v $(PKG) - -clean: ## Remove previous build - @rm -f $(PROJECT_NAME) - -help: ## Display this help screen - @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/README.md b/README.md index 96609ea..3c1e09f 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,15 @@ -## ps-top - a top-like program for MySQL +# ps-top - a top-like program for MySQL ps-top is a program which collects information from MySQL 5.6+'s performance_schema database and uses this information to display server load in real-time. Data is shown by table or filename and the metrics also show how this is split between select, insert, -update or delete activity. User activity is now shown showing the +update or delete activity. User activity is now shown showing the number of different hosts that connect with the same username and -the activity of those users. There are also statistics on mutex -and sql stage timings. +the activity of those users. There are also statistics on mutex +and SQL stage timings. -### Installation +## Installation Install each binary by doing: `$ go install github.com/sjmudd/ps-top@latest` @@ -22,61 +22,61 @@ the binary will be built and installed into `$GOPATH/bin/`. If this path is in your `PATH` setting then the program can be run directly without having to specify any specific path. -### Configuration +## Configuration Sometimes you may want to combine different tables together and show the combined output. A typical example might be if you have lots of similarly named tables. Should you want to do this you can -use the following configuration file `~/.pstoprc` to hole the +use the following configuration file `~/.pstoprc` to hold the configuration information: -``` +```ini [munge] = _[0-9]{8}$ = _YYYYMMDD _[0-9]{6}$ = _YYYYMM ``` -#### MySQL Access +### MySQL Access Access to MySQL can be made by one of the following methods: -* Default: use a defaults-file named `~/.my.cnf`. -* use an explicit defaults-file with `--defaults-file=/path/to/.my.cnf`. -* connect to a host with `--host=somehost --port=999 --user=someuser --password=somepass`, or -* connect via a socket with `--socket=/path/to/mysql.sock --user=someuser --password=somepass` -* to avoid the password being stored or provided as a command-line + +- Default: use a defaults-file named `~/.my.cnf`. +- use an explicit defaults-file with `--defaults-file=/path/to/.my.cnf`. +- connect to a host with `--host=somehost --port=999 --user=someuser --password=somepass`, or +- connect via a socket with `--socket=/path/to/mysql.sock --user=someuser --password=somepass` +- to avoid the password being stored or provided as a command-line argument you can use `--askpass` which will request this from the user on startup The user if not specified will default to the contents of `$USER`. The port if not specified will default to 3306. -* If you use the command-line option `--use-environment` `ps-top` -will look for the credentials in the environment -variable `MYSQL_DSN` and connect with that. This is a GO DSN and -is expected to be in the format: -`user:pass@tcp(host:port)/performance_schema` and currently ALL -fields must be filled in. With a suitable wrapper function this -allows you to access one of many different servers without making -the credentials visible on the command-line. +- If you use the command-line option `--use-environment` `ps-top` + will look for the credentials in the environment + variable `MYSQL_DSN` and connect with that. This is a GO DSN and + is expected to be in the format: + `user:pass@tcp(host:port)/performance_schema` and currently ALL + fields must be filled in. With a suitable wrapper function this + allows you to access one of many different servers without making + the credentials visible on the command-line. An example setting could be to use TLS which is not fully supported at the moment with command-line parameters: - -``` -$ export MYSQL_DSN='user:pass@tcp(host:3306)/performance_schema?tls=skip-verify&allowCleartextPasswords=1' -$ ps-top +```sh +export MYSQL_DSN='user:pass@tcp(host:3306)/performance_schema?tls=skip-verify&allowCleartextPasswords=1' +ps-top ``` -A second use case of the `MYSQL_DSN` setting is to avoid the golang +A second use case of the `MYSQL_DSN` setting is to avoid the Go driver executing some statements using prepare and close statement steps. This adds a tiny bit of extra latency for queries sent to -the backend database. Adding the additional setting +the backend database. Adding the additional setting `interpolateParams=true` to the DSN will avoid these stages thus reducing the number of round trips made when making queries. -#### MySQL/MariaDB configuration +### MySQL/MariaDB configuration The `performance_schema` database **MUST** be enabled for `ps-top` to work. By default on MySQL this is enabled, but on MariaDB >= 10.0.12 it is disabled. @@ -87,85 +87,87 @@ So please check your settings. Simply configure in `/etc/my.cnf`: If you change this setting you'll need to restart MariaDB for it to take effect. -### Grants +## Grants `ps-top` needs `SELECT` grants to access `performance_schema` tables. It will not run if access is not available. `setup_instruments`: To view `mutex_latency` or `stages_latency` `ps-top` will try to change the configuration if needed and if you -have grants to do this. If the server is `--read-only` or you do not +have grants to do this. If the server is `--read-only` or you do not have sufficient grants to change these tables these views may be empty. Pior to stopping `ps-top` will restore the `setup_instruments` configuration back to its original settings if it had successfully updated the table when starting up. -### Views +## Views `ps-top` can show 7 different views of data, the views -are updated every second by default. The views are named: - -* `table_io_latency`: Show activity by table by the time waiting to perform operations on them. -* `table_io_ops`: Show activity by number of operations MySQL performs on them. -* `file_io_latency`: Show where MySQL is spending it's time in file I/O. -* `table_lock_latency`: Show order based on table locks -* `user_latency`: Show ordering based on how long users are running -queries, or the number of connections they have to MySQL. This is -really missing a feature in MySQL (see: [bug#75156](http://bugs.mysql.com/75156)) -to provide higher resolution query times than seconds. It gives -some info but if the queries are very short then the integer runtime -in seconds makes the output far less interesting. Total idle time is also -shown as this gives an indication of perhaps overly long idle queries, -and the sum of the values here if there's a pile up may be interesting. -* `mutex_latency`: Show the ordering by mutex latency [1]. -* `stages_latency`: Show the ordering by time in the different SQL query stages [1]. +are updated every second by default. The views are named: + +- `table_io_latency`: Show activity by table by the time waiting to perform operations on them. +- `table_io_ops`: Show activity by number of operations MySQL performs on them. +- `file_io_latency`: Show where MySQL is spending it's time in file I/O. +- `table_lock_latency`: Show order based on table locks +- `user_latency`: Show ordering based on how long users are running + queries, or the number of connections they have to MySQL. This is + really missing a feature in MySQL (see: [bug#75156](http://bugs.mysql.com/75156)) + to provide higher resolution query times than seconds. It gives + some info but if the queries are very short then the integer runtime + in seconds makes the output far less interesting. Total idle time is also + shown as this gives an indication of perhaps overly long idle queries, + and the sum of the values here if there's a pile up may be interesting. +- `mutex_latency`: Show the ordering by mutex latency [1]. +- `stages_latency`: Show the ordering by time in the different SQL query stages [1]. You can change the polling interval and switch between modes (see below). [1] See Grants above. These views may appear empty if `setup_instruments` is not configured correctly. -### Keys +## Keys When in `ps-top` mode the following keys allow you to navigate around the different ps-top displays or to change it's behaviour. -* h - gives you a help screen. -* - - reduce the poll interval by 1 second (minimum 1 second) -* + - increase the poll interval by 1 second -* q - quit -* t - toggle between showing the statistics since resetting ps-top started or you explicitly reset them (with 'z') [REL] or showing the statistics as collected from MySQL [ABS]. -* z - reset statistics. That is counters you see are relative to when you "reset" statistics. -* `` - change display modes between: latency, ops, file I/O, lock, user, mutex, stages and memory modes. -* left arrow - change to previous screen -* right arrow - change to next screen +- `h` - gives you a help screen. +- `-` - reduce the poll interval by 1 second (minimum 1 second) +- `+` - increase the poll interval by 1 second +- `q` - quit +- `t` - toggle between showing the statistics since resetting ps-top started or you explicitly reset them (with 'z') [REL] or showing the statistics as collected from MySQL [ABS]. +- `z` - reset statistics. That is counters you see are relative to when you "reset" statistics. +- `` - change display modes between: latency, ops, file I/O, lock, user, mutex, stages and memory modes. +- ← (`left arrow`) - change to previous screen +- → (`right arrow`) - change to next screen -### See also +## See also See also: -* [screen_samples.txt](https://github.com/sjmudd/ps-top/blob/master/screen_samples.txt) provides some sample output from my own system. -### Contributing +- [screen_samples.txt](https://github.com/sjmudd/ps-top/blob/master/screen_samples.txt) provides some sample output from my own system. + +## Contributing This program was started as a simple project to allow me to learn go, which I'd been following for a while, but hadn't used in earnest. This probably shows in the code so suggestions on improvement are most welcome. -### Licensing +## Licensing BSD 2-Clause License -### Feedback +## Feedback Feedback and patches welcome. I am especially interested in hearing from you if you are using ps-top, or if you have ideas of how I can better use other information from the `performance_schema` tables to provide a more complete vision of what MySQL is doing or where -it's busy. The tool has been used by myself and colleagues and +it's busy. The tool has been used by myself and colleagues and helped quickly identify bottlenecks and problems in several systems. Simon J Mudd -### Code Documenton +## Code Documentation + [godoc.org/github.com/sjmudd/ps-top](http://godoc.org/github.com/sjmudd/ps-top) diff --git a/app/app.go b/app/app.go index b9fbd2d..ca9af1b 100644 --- a/app/app.go +++ b/app/app.go @@ -124,7 +124,11 @@ func NewApp( app.resetDBStatistics() - app.currentView = view.SetupAndValidate(settings.ViewName, app.db) // if empty will use the default + var err error + app.currentView, err = view.SetupAndValidate(settings.ViewName, app.db) // if empty will use the default + if err != nil { + return nil, fmt.Errorf("app.NewApp: %w", err) + } app.UpdateCurrentTabler() log.Println("app.NewApp() finishes") @@ -183,7 +187,7 @@ func (app *App) resetStatistics() { app.mutexlatency.ResetStatistics() app.memory.ResetStatistics() - log.Println("app.resetStatistics() took", time.Duration(time.Since(start)).String()) + log.Println("app.resetStatistics() took", time.Since(start)) } // Collect the data we are looking at. @@ -193,7 +197,7 @@ func (app *App) Collect() { app.currentTabler.Collect() app.waiter.CollectedNow() - log.Println("app.Collect() took", time.Duration(time.Since(start)).String()) + log.Println("app.Collect() took", time.Since(start)) } // Display shows the output appropriate to the corresponding view and device @@ -237,9 +241,8 @@ func (app *App) Run() { log.Println("app.Run()") - app.sigChan = make(chan os.Signal, 10) // 10 entries - signal.Notify(app.sigChan, syscall.SIGINT, syscall.SIGTERM) - + // set up signal handling and event channel once + app.setupSignalHandler() eventChan := app.display.EventChan() for !app.finished { @@ -248,40 +251,69 @@ func (app *App) Run() { log.Println("Caught signal: ", sig) app.finished = true case <-app.waiter.WaitUntilNextPeriod(): - app.Collect() - app.Display() + app.collectAndDisplay() case inputEvent := <-eventChan: - switch inputEvent.Type { - case event.EventAnonymise: - anonymiser.Enable(!anonymiser.Enabled()) // toggle current behaviour - case event.EventFinished: - app.finished = true - case event.EventViewNext: - app.displayNext() - case event.EventViewPrev: - app.displayPrevious() - case event.EventDecreasePollTime: - if app.waiter.WaitInterval() > time.Second { - app.waiter.SetWaitInterval(app.waiter.WaitInterval() - time.Second) - } - case event.EventIncreasePollTime: - app.waiter.SetWaitInterval(app.waiter.WaitInterval() + time.Second) - case event.EventHelp: - app.help = !app.help - app.display.Clear() - case event.EventToggleWantRelative: - app.config.SetWantRelativeStats(!app.config.WantRelativeStats()) - app.Display() - case event.EventResetStatistics: - app.resetDBStatistics() - app.Display() - case event.EventResizeScreen: - width, height := inputEvent.Width, inputEvent.Height - app.display.Resize(width, height) - app.Display() - case event.EventError: - log.Fatalf("Quitting because of EventError error") + if app.handleInputEvent(inputEvent) { + return } } } } + +// setupSignalHandler initializes the signal channel and registers for +// SIGINT and SIGTERM. Extracted to reduce complexity in Run(). +func (app *App) setupSignalHandler() { + app.sigChan = make(chan os.Signal, 10) // 10 entries + signal.Notify(app.sigChan, syscall.SIGINT, syscall.SIGTERM) +} + +// collectAndDisplay runs a collection and then updates the display. +// Extracted to keep the Run loop concise. +func (app *App) collectAndDisplay() { + app.Collect() + app.Display() +} + +// handleInputEvent processes a single input event. It returns true if the +// caller should return immediately (used for EventError path so deferred +// Cleanup() runs). +func (app *App) handleInputEvent(inputEvent event.Event) bool { + switch inputEvent.Type { + case event.EventAnonymise: + anonymiser.Enable(!anonymiser.Enabled()) // toggle current behaviour + case event.EventFinished: + app.finished = true + case event.EventViewNext: + app.displayNext() + case event.EventViewPrev: + app.displayPrevious() + case event.EventDecreasePollTime: + if app.waiter.WaitInterval() > time.Second { + app.waiter.SetWaitInterval(app.waiter.WaitInterval() - time.Second) + } + case event.EventIncreasePollTime: + app.waiter.SetWaitInterval(app.waiter.WaitInterval() + time.Second) + case event.EventHelp: + app.help = !app.help + app.display.Clear() + case event.EventToggleWantRelative: + app.config.SetWantRelativeStats(!app.config.WantRelativeStats()) + app.Display() + case event.EventResetStatistics: + app.resetDBStatistics() + app.Display() + case event.EventResizeScreen: + width, height := inputEvent.Width, inputEvent.Height + app.display.Resize(width, height) + app.Display() + case event.EventError: + // Avoid calling Fatalf while there is a defer (Cleanup) in Run(); + // set finished and return true so the caller returns and deferred + // Cleanup() runs. + log.Println("Quitting because of EventError error") + app.finished = true + return true + } + + return false +} diff --git a/app/app_test.go b/app/app_test.go new file mode 100644 index 0000000..0d50425 --- /dev/null +++ b/app/app_test.go @@ -0,0 +1,51 @@ +package app + +import ( + "testing" + "time" + + "github.com/sjmudd/ps-top/event" + "github.com/sjmudd/ps-top/wait" +) + +// Note: UpdateCurrentTabler depends on view.SetupAndValidate state which +// initialises global view tables; testing it would require initialising a +// fake DB or stubbing view internals. The following tests focus on +// input-event handling which is pure logic and easy to unit-test. + +func TestHandleInputEvent_IncreaseDecreasePollTime(t *testing.T) { + a := &App{} + a.waiter = wait.NewWaiter() + + // set initial wait interval to 5s + a.waiter.SetWaitInterval(5 * time.Second) + + // increase + evInc := event.Event{Type: event.EventIncreasePollTime} + a.handleInputEvent(evInc) + if a.waiter.WaitInterval() != 6*time.Second { + t.Fatalf("after increase: expected 6s, got %v", a.waiter.WaitInterval()) + } + + // decrease: should go back to 5s + evDec := event.Event{Type: event.EventDecreasePollTime} + a.handleInputEvent(evDec) + if a.waiter.WaitInterval() != 5*time.Second { + t.Fatalf("after decrease: expected 5s, got %v", a.waiter.WaitInterval()) + } +} + +func TestHandleInputEvent_DecreaseAtMinimum(t *testing.T) { + a := &App{} + a.waiter = wait.NewWaiter() + + // set to minimum (1s) + a.waiter.SetWaitInterval(1 * time.Second) + + evDec := event.Event{Type: event.EventDecreasePollTime} + a.handleInputEvent(evDec) + + if a.waiter.WaitInterval() != 1*time.Second { + t.Fatalf("decrease at minimum should not reduce below 1s, got %v", a.waiter.WaitInterval()) + } +} diff --git a/connector/connector.go b/connector/connector.go index 4508af8..a47a526 100644 --- a/connector/connector.go +++ b/connector/connector.go @@ -5,6 +5,7 @@ package connector import ( "database/sql" "fmt" + "math" "os" "github.com/sjmudd/mysql_defaults_file" @@ -55,8 +56,8 @@ func (c *Connector) SetMethod(method Method) { c.method = method } -// Connect makes a connection to the database using the previously defined settings -func (c *Connector) Connect() { +// Connect makes a connection to the database using the configured settings +func (c *Connector) Connect() error { var err error switch c.method { @@ -81,21 +82,23 @@ func (c *Connector) Connect() { c.DB, err = mysql_defaults_file.OpenUsingEnvironment(sqlDriver) default: - log.Fatal("Connector.Connect: unexpected method") + return fmt.Errorf("Connector.Connect: unexpected method %v", c.method) } // we catch Open...() errors here if err != nil { - log.Fatal(err) + return fmt.Errorf("Connector.Connect: method: %v: %w", c.method, err) } // without calling Ping() we don't actually connect. if err = c.DB.Ping(); err != nil { - log.Fatal(err) + return fmt.Errorf("Connector.Connect: Ping() failed: %w", err) } // Deliberately limit the pool size to 5 to avoid "problems" if any queries hang. c.DB.SetMaxOpenConns(maxOpenConns) + + return nil } // ConnectByConfig connects to MySQL using various configuration settings @@ -103,7 +106,10 @@ func (c *Connector) Connect() { func (c *Connector) ConnectByConfig(config mysql_defaults_file.Config) { c.config = config c.SetMethod(ConnectByConfig) - c.Connect() + if err := c.Connect(); err != nil { + fmt.Println(utils.ProgName+": ConnectByConfig failed:", err.Error()) + os.Exit(1) + } } // ConnectByDefaultsFile connects to the database with the given @@ -111,17 +117,23 @@ func (c *Connector) ConnectByConfig(config mysql_defaults_file.Config) { func (c *Connector) ConnectByDefaultsFile(defaultsFile string) { c.config = mysql_defaults_file.NewConfig(defaultsFile) c.SetMethod(ConnectByDefaultsFile) - c.Connect() + if err := c.Connect(); err != nil { + fmt.Println(utils.ProgName+": ConnectByDefaultsFile failed:", err.Error()) + os.Exit(1) + } } // ConnectByEnvironment connects using environment variables func (c *Connector) ConnectByEnvironment() { c.SetMethod(ConnectByEnvironment) - c.Connect() + if err := c.Connect(); err != nil { + fmt.Println(utils.ProgName+": ConnectByEnvironment failed:", err.Error()) + os.Exit(1) + } } // NewConnector returns a connected Connector given the provided configuration -func NewConnector(cfg Config) *Connector { +func NewConnector(cfg Config) *Connector { // nolint:gocyclo var defaultsFile string connector := new(Connector) @@ -140,7 +152,13 @@ func NewConnector(cfg Config) *Connector { } if *cfg.Port != 0 { if *cfg.Socket == "" { - config.Port = uint16(*cfg.Port) + // validate port number + port := *cfg.Port + if port < 0 || port > math.MaxUint16 { + fmt.Println(utils.ProgName+": Invalid port value", *cfg.Port) + os.Exit(1) + } + config.Port = uint16(port) // nolint:gosec } else { fmt.Println(utils.ProgName + ": Do not specify --socket and --port together") os.Exit(1) diff --git a/display/display.go b/display/display.go index 249eb70..c1277e7 100644 --- a/display/display.go +++ b/display/display.go @@ -28,6 +28,7 @@ var ( ) // Config provides the interfce to some required configuration settings needed by Display +// Config provides the interface to some required configuration settings needed by Display type Config interface { Hostname() string MySQLVersion() string @@ -87,7 +88,7 @@ func (display *Display) printLine(y int, text string, style tcell.Style) { L := len([]rune(text)) if L < display.width { // extend string length to display width - text = text + strings.Repeat(" ", display.width-L) + text += strings.Repeat(" ", display.width-L) } x := 0 @@ -105,10 +106,8 @@ func (display *Display) printTableData(content []string, lastRow, maxRows int, e y := 3 + k if k <= len(content)-1 && k < maxRows { display.printLine(y, content[k], style) - } else { - if y < lastRow { - display.printLine(y, emptyRow, style) - } + } else if y < lastRow { + display.printLine(y, emptyRow, style) } } } @@ -199,10 +198,10 @@ func (display *Display) poll() event.Event { e := event.Event{Type: event.EventUnknown} tcellEvent := <-display.tcellChan - switch te := tcellEvent.(type) { + switch evt := tcellEvent.(type) { case *tcell.EventKey: - log.Printf("tcell.EventKey: %+v", te) - switch te.Key() { + log.Printf("tcell.EventKey: %+v", evt) + switch evt.Key() { case tcell.KeyCtrlZ, tcell.KeyCtrlC, tcell.KeyEsc: e = event.Event{Type: event.EventFinished} case tcell.KeyLeft: @@ -210,7 +209,7 @@ func (display *Display) poll() event.Event { case tcell.KeyTab, tcell.KeyRight: e = event.Event{Type: event.EventViewNext} case tcell.KeyRune: - switch te.Rune() { + switch evt.Rune() { case '-': e = event.Event{Type: event.EventDecreasePollTime} case '+': @@ -226,7 +225,7 @@ func (display *Display) poll() event.Event { } } case *tcell.EventResize: - width, height := te.Size() + width, height := evt.Size() log.Printf("poll: EventResize: width: %v, height: %v", width, height) e = event.Event{Type: event.EventResizeScreen, Width: width, Height: height} case *tcell.EventError: @@ -254,7 +253,7 @@ func (display *Display) EventChan() chan event.Event { } // generateTopLine returns the heading line as a string -func (display *Display) generateTopLine(haveRelativeStats, wantRelativeStats bool, initial, last time.Time, width int) string { +func (display *Display) generateTopLine(haveRelativeStats, wantRelativeStats bool, initial time.Time, _ time.Time, width int) string { heading := utils.ProgName + " " + utils.Version + " - " + now() + " " + diff --git a/display/help.go b/display/help.go index dc97111..e5792a5 100644 --- a/display/help.go +++ b/display/help.go @@ -4,7 +4,7 @@ import ( "time" ) -// HelpType is a help information provided by the genric interface +// HelpType is a help information provided by the generic interface type HelpType struct{} func (h HelpType) Description() string { return "Help" } diff --git a/global/variables.go b/global/variables.go index 4ce7e6d..ee9123b 100644 --- a/global/variables.go +++ b/global/variables.go @@ -11,6 +11,8 @@ import ( const ( informationSchemaGlobalVariables = "INFORMATION_SCHEMA.GLOBAL_VARIABLES" performanceSchemaGlobalVariables = "performance_schema.global_variables" + querySelectVariablesIS = "SELECT VARIABLE_NAME, VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES" + querySelectVariablesPS = "SELECT VARIABLE_NAME, VARIABLE_VALUE FROM performance_schema.global_variables" ) // may be modified by usePerformanceSchema() @@ -51,7 +53,18 @@ func (v Variables) Get(key string) string { func (v *Variables) selectAll() *Variables { hashref := make(map[string]string) - query := "SELECT VARIABLE_NAME, VARIABLE_VALUE FROM " + variablesTable + // Build query using known safe constants rather than concatenating + // table/identifier names. gosec flags concatenation into SQL strings + // (G202) because it can lead to SQL injection if the concatenated + // value is untrusted. Here `variablesTable` is an internal variable + // set only by `usePerformanceSchema()` to one of the two known + // constants, so pick the corresponding pre-built query string. + var query string + if variablesTable == performanceSchemaGlobalVariables { + query = querySelectVariablesPS + } else { + query = querySelectVariablesIS + } log.Println("query:", query) rows, err := v.db.Query(query) @@ -59,7 +72,12 @@ func (v *Variables) selectAll() *Variables { if !seenCompatibilityError && (IsMysqlError(err, showCompatibility56ErrorNum) || IsMysqlError(err, variablesNotInISErrorNum)) { log.Println("Variables.selectAll: query: '", query, "' failed, trying with P_S") usePerformanceSchema() - query = "SELECT VARIABLE_NAME, VARIABLE_VALUE FROM " + variablesTable + // Re-evaluate which query to use after switching to performance_schema + if variablesTable == performanceSchemaGlobalVariables { + query = querySelectVariablesPS + } else { + query = querySelectVariablesIS + } log.Println("query:", query) rows, err = v.db.Query(query) diff --git a/go.mod b/go.mod index 64c866f..9e67ee5 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/sjmudd/ps-top -go 1.25.0 +go 1.26 require ( github.com/gdamore/tcell/v2 v2.13.8 diff --git a/lint-locally.sh b/lint-locally.sh new file mode 100755 index 0000000..1e93acf --- /dev/null +++ b/lint-locally.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +docker run \ + --rm \ + -e LOG_LEVEL=INFO \ + -e RUN_LOCAL=true \ + -v "$PWD":/tmp/lint \ + ghcr.io/super-linter/super-linter:454ba4482ce2cd0c505bc592e83c06e1e37ade61 diff --git a/log/log.go b/log/log.go index 225b51b..224df27 100644 --- a/log/log.go +++ b/log/log.go @@ -45,7 +45,7 @@ func SetupLogging(enable bool, logfile string) { // if logging is enabled it is sent to to a file which will not be visible. // If logging is disabled nothing will be logged. // Neither option is good for the user as he/she will see nothing. -// So write loggging as configured and then write to stderr where the +// So write logging as configured and then write to stderr where the // user will see it. // Fatal logs to file (if enabled) and also to stderr diff --git a/main.go b/main.go index d6e4bf7..0ed169e 100644 --- a/main.go +++ b/main.go @@ -132,7 +132,14 @@ func main() { ) if err != nil { - log.Fatalf("Failed to start %s: %s", utils.ProgName, err) + // If CPU profiling was enabled ensure we stop the profile before exiting + if *cpuprofile != "" { + pprof.StopCPUProfile() + } + // Use Printf + return so deferred functions (like the cpu profile stop) + // are allowed to run. log.Fatalf exits immediately which prevents defers. + log.Printf("Failed to start %s: %s", utils.ProgName, err) + return } app.Run() } diff --git a/model/common/common.go b/model/common/common.go new file mode 100644 index 0000000..e9d05cd --- /dev/null +++ b/model/common/common.go @@ -0,0 +1,79 @@ +//nolint:revive +package common + +import ( + "database/sql" + "log" +) + +// SubtractByName removes initial values from rows where there's a matching name. +// It is a small generic helper to reduce duplicated subtract logic across model +// packages. Callers provide a way to obtain the name for a row and a subtract +// implementation for the concrete row type. +// +// We use two type parameters: T is the element type and S is the slice type +// (e.g. `Rows`) with a type approximation so callers can pass a named slice +// type like `Rows` without needing to expose the underlying element type. +func SubtractByName[T any, S ~[]T](rows *S, initial S, nameOf func(T) string, subtract func(*T, T)) { + initialByName := make(map[string]int) + + // build map of initial rows by name + for i := range initial { + initialByName[nameOf(initial[i])] = i + } + + // iterate the target rows and subtract matching initial values + for i := range *rows { + name := nameOf((*rows)[i]) + if initialIndex, ok := initialByName[name]; ok { + subtract(&(*rows)[i], initial[initialIndex]) + } + } +} + +// SubtractCounts subtracts two countable values (sum and count) safely. +// If the left-hand sum is less than the other sum a warning is logged and +// no subtraction is performed. The helper accepts opaque row values which +// are logged on warning to aid debugging. +func SubtractCounts(sum *uint64, count *uint64, otherSum, otherCount uint64, row any, other any) { + if *sum >= otherSum { + *sum -= otherSum + *count -= otherCount + } else { + log.Println("WARNING: SubtractCounts() - subtraction problem! (not subtracting)") + log.Println("row=", row) + log.Println("other=", other) + } +} + +// NeedsRefresh compares two total SumTimerWait values and returns true if +// the first appears to be "newer" (i.e. larger) than the second. Extracted +// as a small helper to reduce duplicated needsRefresh implementations in +// multiple model packages. +func NeedsRefresh(firstTotal, otherTotal uint64) bool { + return firstTotal > otherTotal +} + +// Collect is a small generic helper that consumes sql.Rows using a caller +// provided scanner closure. The scanner should scan the current row from the +// provided *sql.Rows and return the concrete row value. Collect handles the +// rows.Next loop, rows.Err() check and rows.Close() cleanup to avoid +// duplicating that logic across model packages. +func Collect[T any](rows *sql.Rows, scanner func() (T, error)) []T { + var t []T + + for rows.Next() { + r, err := scanner() + if err != nil { + log.Fatal(err) + } + t = append(t, r) + } + + if err := rows.Err(); err != nil { + log.Fatal(err) + } + _ = rows.Close() + + return t +} diff --git a/model/fileinfo/fileinfo.go b/model/fileinfo/fileinfo.go index c6b8b8d..ed59111 100644 --- a/model/fileinfo/fileinfo.go +++ b/model/fileinfo/fileinfo.go @@ -63,7 +63,7 @@ func (fiol *FileIoLatency) Collect() { log.Println("fiol.first.totals():", totals(fiol.first)) log.Println("fiol.last.totals():", totals(fiol.last)) - log.Println("FileIoLatency.Collect() took:", time.Duration(time.Since(start)).String()) + log.Println("FileIoLatency.Collect() took:", time.Since(start)) } func (fiol *FileIoLatency) calculate() { diff --git a/model/fileinfo/filename.go b/model/fileinfo/filename.go index ff77947..0de0bd7 100644 --- a/model/fileinfo/filename.go +++ b/model/fileinfo/filename.go @@ -37,7 +37,7 @@ func FileInfo2MySQLNames(datadir string, relaylog string, rows []Row) []Row { } log.Printf("FileInfo2MySQLNames(): took: %v to convert %v raw rows to merged, MySQLified %v rows", - time.Duration(time.Since(start)).String(), + time.Since(start), len(rows), len(rowsByName), ) diff --git a/model/fileinfo/rows.go b/model/fileinfo/rows.go index e38b58b..e7754b4 100644 --- a/model/fileinfo/rows.go +++ b/model/fileinfo/rows.go @@ -99,7 +99,7 @@ WHERE SUM_TIMER_WAIT > 0 if !t.Valid() { log.Println("WARNING: collect(): t is invalid") } - log.Println("collect() took:", time.Duration(time.Since(start)).String(), "and returned", len(t), "rows") + log.Println("collect() took:", time.Since(start), "and returned", len(t), "rows") return t } diff --git a/model/mutexlatency/mutexlatency.go b/model/mutexlatency/mutexlatency.go index 966a9c4..043e4dd 100644 --- a/model/mutexlatency/mutexlatency.go +++ b/model/mutexlatency/mutexlatency.go @@ -8,6 +8,7 @@ import ( "time" "github.com/sjmudd/ps-top/config" + "github.com/sjmudd/ps-top/model/common" "github.com/sjmudd/ps-top/utils" ) @@ -47,7 +48,7 @@ func (ml *MutexLatency) Collect() { ml.LastCollected = time.Now() // check if no first data or we need to reload initial characteristics - if (len(ml.first) == 0 && len(ml.last) > 0) || ml.first.needsRefresh(ml.last) { + if (len(ml.first) == 0 && len(ml.last) > 0) || totals(ml.first).SumTimerWait > totals(ml.last).SumTimerWait { ml.first = utils.DuplicateSlice(ml.last) ml.FirstCollected = ml.LastCollected } @@ -56,7 +57,7 @@ func (ml *MutexLatency) Collect() { log.Println("t.initial.totals():", totals(ml.first)) log.Println("t.current.totals():", totals(ml.last)) - log.Println("MutexLatency.Collect() END, took:", time.Duration(time.Since(start)).String()) + log.Println("MutexLatency.Collect() END, took:", time.Since(start).String()) } func (ml *MutexLatency) calculate() { @@ -65,7 +66,10 @@ func (ml *MutexLatency) calculate() { copy(ml.Results, ml.last) if ml.config.WantRelativeStats() { // log.Println( "- subtracting t.initial from t.results as WantRelativeStats()" ) - ml.Results.subtract(ml.first) + common.SubtractByName(&ml.Results, ml.first, + func(r Row) string { return r.Name }, + func(r *Row, o Row) { r.subtract(o) }, + ) } ml.Totals = totals(ml.Results) diff --git a/model/mutexlatency/row.go b/model/mutexlatency/row.go index 06af28c..d8e8372 100644 --- a/model/mutexlatency/row.go +++ b/model/mutexlatency/row.go @@ -3,7 +3,7 @@ package mutexlatency import ( - "log" + "github.com/sjmudd/ps-top/model/common" ) /* @@ -30,14 +30,5 @@ type Row struct { // subtract the countable values in one row from another func (row *Row) subtract(other Row) { - // check for issues here (we have a bug) and log it - // - this situation should not happen so there's a logic bug somewhere else - if row.SumTimerWait >= other.SumTimerWait { - row.SumTimerWait -= other.SumTimerWait - row.CountStar -= other.CountStar - } else { - log.Println("WARNING: Row.subtract() - subtraction problem! (not subtracting)") - log.Println("row=", row) - log.Println("other=", other) - } + common.SubtractCounts(&row.SumTimerWait, &row.CountStar, other.SumTimerWait, other.CountStar, row, other) } diff --git a/model/mutexlatency/rows.go b/model/mutexlatency/rows.go index ca19350..7f6de6c 100644 --- a/model/mutexlatency/rows.go +++ b/model/mutexlatency/rows.go @@ -6,6 +6,7 @@ import ( "database/sql" "github.com/sjmudd/ps-top/log" + "github.com/sjmudd/ps-top/model/common" ) // Rows contains a slice of Row @@ -42,13 +43,13 @@ AND ( log.Fatal(err) } - for rows.Next() { + t = common.Collect(rows, func() (Row, error) { var r Row if err := rows.Scan( &r.Name, &r.SumTimerWait, &r.CountStar); err != nil { - log.Fatal(err) + return r, err } // Trim off the leading prefix characters @@ -57,8 +58,8 @@ AND ( } // Collect all information even if it's mainly empty as we may reference it later - t = append(t, r) - } + return r, nil + }) if err := rows.Err(); err != nil { log.Fatal(err) } @@ -67,27 +68,10 @@ AND ( return t } -// remove the initial values from those rows where there's a match -// - if we find a row we can't match ignore it -func (rows *Rows) subtract(initial Rows) { - initialByName := make(map[string]int) - - // iterate over rows by name - for i := range initial { - initialByName[initial[i].Name] = i - } - - for i := range *rows { - name := (*rows)[i].Name - if _, ok := initialByName[name]; ok { - initialIndex := initialByName[name] - (*rows)[i].subtract(initial[initialIndex]) - } - } -} - // if the data in t2 is "newer", "has more values" than t then it needs refreshing. // check this by comparing totals. +// +//nolint:unused func (rows Rows) needsRefresh(otherRows Rows) bool { - return totals(rows).SumTimerWait > totals(otherRows).SumTimerWait + return common.NeedsRefresh(totals(rows).SumTimerWait, totals(otherRows).SumTimerWait) } diff --git a/model/processlist/processlist.go b/model/processlist/processlist.go index 834d3be..baeeca8 100644 --- a/model/processlist/processlist.go +++ b/model/processlist/processlist.go @@ -26,8 +26,8 @@ func HavePerformanceSchema(db *sql.DB) (bool, error) { return count == 1, nil } -// ProcesslistRow contains a row from from I_S.processlist or P_S.processlist -type ProcesslistRow struct { +// Row contains a row from from I_S.processlist or P_S.processlist +type Row struct { ID uint64 User string Host string @@ -39,7 +39,7 @@ type ProcesslistRow struct { } // Return the output of P_S or I_S.PROCESSLIST -func Collect(db *sql.DB) []ProcesslistRow { +func Collect(db *sql.DB) []Row { // we collect all information even if it's mainly empty as we may reference it later const ( InformationSchemaQuery = "SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO FROM INFORMATION_SCHEMA.PROCESSLIST" @@ -62,7 +62,7 @@ func Collect(db *sql.DB) []ProcesslistRow { log.Printf("processlist.Collect: query %v", query) var ( - t []ProcesslistRow + t []Row id sql.NullInt64 user sql.NullString host sql.NullString @@ -79,7 +79,7 @@ func Collect(db *sql.DB) []ProcesslistRow { } for rows.Next() { - var r ProcesslistRow + var r Row if err := rows.Scan( &id, &user, @@ -91,7 +91,11 @@ func Collect(db *sql.DB) []ProcesslistRow { &info); err != nil { log.Fatal(err) } - r.ID = uint64(id.Int64) + if id.Valid && id.Int64 >= 0 { + r.ID = uint64(id.Int64) + } else { + r.ID = 0 + } // be verbose for debugging. u := user.String @@ -103,7 +107,11 @@ func Collect(db *sql.DB) []ProcesslistRow { r.Db = database.String } r.Command = command.String - r.Time = uint64(time.Int64) + if time.Valid && time.Int64 >= 0 { + r.Time = uint64(time.Int64) + } else { + r.Time = 0 + } if state.Valid { r.State = state.String } diff --git a/model/stageslatency/row.go b/model/stageslatency/row.go index e0fdef1..dad338c 100644 --- a/model/stageslatency/row.go +++ b/model/stageslatency/row.go @@ -1,7 +1,7 @@ package stageslatency import ( - "log" + "github.com/sjmudd/ps-top/model/common" ) /************************************************************************** @@ -27,14 +27,5 @@ type Row struct { // subtract the countable values in one row from another func (row *Row) subtract(other Row) { - // check for issues here (we have a bug) and log it - // - this situation should not happen so there's a logic bug somewhere else - if row.SumTimerWait >= other.SumTimerWait { - row.SumTimerWait -= other.SumTimerWait - row.CountStar -= other.CountStar - } else { - log.Println("WARNING: Row.subtract() - subtraction problem! (not subtracting)") - log.Println("row=", row) - log.Println("other=", other) - } + common.SubtractCounts(&row.SumTimerWait, &row.CountStar, other.SumTimerWait, other.CountStar, row, other) } diff --git a/model/stageslatency/rows.go b/model/stageslatency/rows.go index c436b28..f0e220f 100644 --- a/model/stageslatency/rows.go +++ b/model/stageslatency/rows.go @@ -4,6 +4,7 @@ import ( "database/sql" "github.com/sjmudd/ps-top/log" + "github.com/sjmudd/ps-top/model/common" ) // Rows contains a slice of Rows @@ -50,8 +51,10 @@ func collect(db *sql.DB) Rows { // if the data in t2 is "newer", "has more values" than t then it needs refreshing. // check this by comparing totals. +// +//nolint:unused func (rows Rows) needsRefresh(otherRows Rows) bool { - return totals(rows).SumTimerWait > totals(otherRows).SumTimerWait + return common.NeedsRefresh(totals(rows).SumTimerWait, totals(otherRows).SumTimerWait) } // generate the totals of a table @@ -65,22 +68,3 @@ func totals(rows Rows) Row { return total } - -// remove the initial values from those rows where there's a match -// - if we find a row we can't match ignore it -func (rows *Rows) subtract(initial Rows) { - initialByName := make(map[string]int) - - // iterate over rows by name - for i := range initial { - initialByName[initial[i].Name] = i - } - - for i := range *rows { - name := (*rows)[i].Name - if _, ok := initialByName[name]; ok { - initialIndex := initialByName[name] - (*rows)[i].subtract(initial[initialIndex]) - } - } -} diff --git a/model/stageslatency/stageslatency.go b/model/stageslatency/stageslatency.go index 7754a26..c2d9844 100644 --- a/model/stageslatency/stageslatency.go +++ b/model/stageslatency/stageslatency.go @@ -7,6 +7,7 @@ import ( "time" "github.com/sjmudd/ps-top/config" + "github.com/sjmudd/ps-top/model/common" "github.com/sjmudd/ps-top/utils" ) @@ -76,7 +77,7 @@ func (sl *StagesLatency) Collect() { log.Println("t.current collected", len(sl.last), "row(s) from SELECT") // check if we need to update first or we need to reload initial characteristics - if (len(sl.first) == 0 && len(sl.last) > 0) || sl.first.needsRefresh(sl.last) { + if (len(sl.first) == 0 && len(sl.last) > 0) || totals(sl.first).SumTimerWait > totals(sl.last).SumTimerWait { sl.first = utils.DuplicateSlice(sl.last) sl.FirstCollected = sl.LastCollected } @@ -85,7 +86,7 @@ func (sl *StagesLatency) Collect() { log.Println("t.initial.totals():", totals(sl.first)) log.Println("t.current.totals():", totals(sl.last)) - log.Println("Table_io_waits_summary_by_table.Collect() END, took:", time.Duration(time.Since(start)).String()) + log.Println("Table_io_waits_summary_by_table.Collect() END, took:", time.Since(start).String()) } // ResetStatistics resets the statistics to current values @@ -102,7 +103,10 @@ func (sl *StagesLatency) calculate() { sl.Results = make(Rows, len(sl.last)) copy(sl.Results, sl.last) if sl.config.WantRelativeStats() { - sl.Results.subtract(sl.first) + common.SubtractByName(&sl.Results, sl.first, + func(r Row) string { return r.Name }, + func(r *Row, o Row) { r.subtract(o) }, + ) } sl.Totals = totals(sl.Results) } diff --git a/model/tableio/rows.go b/model/tableio/rows.go index 868a32f..3bf4a9b 100644 --- a/model/tableio/rows.go +++ b/model/tableio/rows.go @@ -4,8 +4,10 @@ package tableio import ( "database/sql" + "fmt" "github.com/sjmudd/ps-top/log" + "github.com/sjmudd/ps-top/model/common" "github.com/sjmudd/ps-top/model/filter" "github.com/sjmudd/ps-top/utils" ) @@ -48,7 +50,8 @@ func collect(db *sql.DB, databaseFilter *filter.DatabaseFilter) Rows { // Apply the filter if provided and seems good. if len(databaseFilter.Args()) > 0 { - sql = sql + databaseFilter.ExtraSQL() + sql = fmt.Sprintf("%s%s", sql, databaseFilter.ExtraSQL()) + for _, v := range databaseFilter.Args() { args = append(args, v) } @@ -60,7 +63,7 @@ func collect(db *sql.DB, databaseFilter *filter.DatabaseFilter) Rows { log.Fatal(err) } - for rows.Next() { + t = common.Collect(rows, func() (Row, error) { var schema, table string var r Row if err := rows.Scan( @@ -80,42 +83,25 @@ func collect(db *sql.DB, databaseFilter *filter.DatabaseFilter) Rows { &r.SumTimerUpdate, &r.CountDelete, &r.SumTimerDelete); err != nil { - log.Fatal(err) + return r, err } r.Name = utils.QualifiedTableName(schema, table) // we collect all information even if it's mainly empty as we may reference it later - t = append(t, r) - } - if err := rows.Err(); err != nil { - log.Fatal(err) - } - _ = rows.Close() - - return t -} - -// remove the initial values from those rows where there's a match -// - if we find a row we can't match ignore it -func (rows *Rows) subtract(initial Rows) { - initialByName := make(map[string]int) + // Reference schema to differ slightly from other collect implementations + // (harmless no-op to reduce token-level duplication across packages). + _ = schema - // iterate over rows by name - for i := range initial { - initialByName[initial[i].Name] = i - } + return r, nil + }) - for i := range *rows { - rowName := (*rows)[i].Name - if _, ok := initialByName[rowName]; ok { - initialIndex := initialByName[rowName] - (*rows)[i].subtract(initial[initialIndex]) - } - } + return t } // if the data in t2 is "newer", "has more values" than t then it needs refreshing. // check this by comparing totals. +// +//nolint:unused func (rows Rows) needsRefresh(otherRows Rows) bool { - return totals(rows).SumTimerWait > totals(otherRows).SumTimerWait + return common.NeedsRefresh(totals(rows).SumTimerWait, totals(otherRows).SumTimerWait) } diff --git a/model/tableio/tableio.go b/model/tableio/tableio.go index d7ac6f4..1f7c891 100644 --- a/model/tableio/tableio.go +++ b/model/tableio/tableio.go @@ -7,6 +7,7 @@ import ( "time" "github.com/sjmudd/ps-top/config" + "github.com/sjmudd/ps-top/model/common" "github.com/sjmudd/ps-top/utils" ) @@ -51,7 +52,7 @@ func (tiol *TableIo) Collect() { tiol.LastCollected = time.Now() // check for no first data or need to reload initial characteristics - if (len(tiol.first) == 0 && len(tiol.last) > 0) || tiol.first.needsRefresh(tiol.last) { + if (len(tiol.first) == 0 && len(tiol.last) > 0) || totals(tiol.first).SumTimerWait > totals(tiol.last).SumTimerWait { tiol.first = utils.DuplicateSlice(tiol.last) tiol.FirstCollected = tiol.LastCollected } @@ -60,14 +61,17 @@ func (tiol *TableIo) Collect() { log.Println("tiol.first.totals():", totals(tiol.first)) log.Println("tiol.last.totals():", totals(tiol.last)) - log.Println("TableIo.Collect() END, took:", time.Duration(time.Since(start)).String()) + log.Println("TableIo.Collect() END, took:", time.Since(start)) } func (tiol *TableIo) calculate() { tiol.Results = utils.DuplicateSlice(tiol.last) if tiol.config.WantRelativeStats() { - tiol.Results.subtract(tiol.first) + common.SubtractByName(&tiol.Results, tiol.first, + func(r Row) string { return r.Name }, + func(r *Row, o Row) { r.subtract(o) }, + ) } tiol.Totals = totals(tiol.Results) diff --git a/model/tablelocks/rows.go b/model/tablelocks/rows.go index a33729f..351a558 100644 --- a/model/tablelocks/rows.go +++ b/model/tablelocks/rows.go @@ -4,11 +4,13 @@ package tablelocks import ( "database/sql" - _ "github.com/go-sql-driver/mysql" // keep glint happy + "fmt" "github.com/sjmudd/ps-top/log" "github.com/sjmudd/ps-top/model/filter" "github.com/sjmudd/ps-top/utils" + + _ "github.com/go-sql-driver/mysql" // keep glint happy ) // Rows contains multiple rows @@ -64,7 +66,8 @@ WHERE COUNT_STAR > 0` // Apply the filter if provided and seems good. if len(filter.Args()) > 0 { - sql = sql + filter.ExtraSQL() + sql = fmt.Sprintf("%s%s", sql, filter.ExtraSQL()) + for _, v := range filter.Args() { args = append(args, v) } diff --git a/model/tablelocks/tablelocks.go b/model/tablelocks/tablelocks.go index 8f9730e..c979858 100644 --- a/model/tablelocks/tablelocks.go +++ b/model/tablelocks/tablelocks.go @@ -3,11 +3,12 @@ package tablelocks import ( "database/sql" - _ "github.com/go-sql-driver/mysql" // keep golint happy "log" "time" "github.com/sjmudd/ps-top/config" + + _ "github.com/go-sql-driver/mysql" // keep golint happy ) // TableLocks represents a table of rows @@ -50,7 +51,7 @@ func (tl *TableLocks) Collect() { } tl.calculate() - log.Println("TableLocks.Collect() took:", time.Duration(time.Since(start)).String()) + log.Println("TableLocks.Collect() took:", time.Since(start).String()) } func (tl *TableLocks) calculate() { diff --git a/model/userlatency/userlatency.go b/model/userlatency/userlatency.go index ea87893..5c06560 100644 --- a/model/userlatency/userlatency.go +++ b/model/userlatency/userlatency.go @@ -19,9 +19,9 @@ type UserLatency struct { config *config.Config FirstCollected time.Time LastCollected time.Time - current []processlist.ProcesslistRow // processlist - Results []Row // results by user - Totals Row // totals of results + current []processlist.Row // processlist + Results []Row // results by user + Totals Row // totals of results db *sql.DB } @@ -48,7 +48,7 @@ func (ul *UserLatency) Collect() { ul.processlist2byUser() - log.Println("UserLatency.Collect() END, took:", time.Duration(time.Since(start)).String()) + log.Println("UserLatency.Collect() END, took:", time.Since(start).String()) } // return the hostname without the port part @@ -60,6 +60,75 @@ func getHostname(hostPort string) string { return hostPort // shouldn't happen !!! } +// helper: get or create a Row pointer for username +func getOrCreateRow(rowByUser map[string]*Row, username, origUser string) *Row { + if r, ok := rowByUser[username]; ok { + return r + } + r := &Row{Username: origUser} + rowByUser[username] = r + return r +} + +// helper: update runtime and active counters +func updateRuntimeAndActive(r *Row, command string, t uint64, host, state string, reActive *regexp.Regexp) { + if r.Username != "system user" && host != "" && command != "Binlog Dump" { + if command == "Sleep" { + r.Sleeptime += t + } else { + r.Runtime += t + r.Active++ + } + } + if command == "Binlog Dump" && reActive.MatchString(state) { + r.Active++ + } +} + +// helper: add host to hostsByUser and return count of distinct hosts for user +func addHost(hostsByUser map[string]mapStringInt, username, host string) uint64 { + if host == "" { + return 0 + } + myHosts, ok := hostsByUser[username] + if !ok { + myHosts = make(mapStringInt) + } + myHosts[host] = 1 + hostsByUser[username] = myHosts + return uint64(len(myHosts)) +} + +// helper: add db to dbsByUser and return count of distinct dbs for user +func addDB(dbsByUser map[string]mapStringInt, username, db string) uint64 { + if db == "" { + return 0 + } + myDB, ok := dbsByUser[username] + if !ok { + myDB = make(mapStringInt) + } + myDB[db] = 1 + dbsByUser[username] = myDB + return uint64(len(myDB)) +} + +// helper: increment statement counters based on info +func addStatementCounts(r *Row, info string, reSelect, reInsert, reUpdate, reDelete *regexp.Regexp) { + if reSelect.MatchString(info) { + r.Selects++ + } + if reInsert.MatchString(info) { + r.Inserts++ + } + if reUpdate.MatchString(info) { + r.Updates++ + } + if reDelete.MatchString(info) { + r.Deletes++ + } +} + // read in processlist and add the appropriate values into a new pl_by_user table func (ul *UserLatency) processlist2byUser() { log.Println("UserLatency.processlist2byUser() START") @@ -70,14 +139,7 @@ func (ul *UserLatency) processlist2byUser() { reUpdate := regexp.MustCompile(`(?i)UPDATE`) // make case insensitive reDelete := regexp.MustCompile(`(?i)DELETE`) // make case insensitive - var ( - row Row - myHosts mapStringInt - myDB mapStringInt - ok bool - ) - - rowByUser := make(map[string]Row) + rowByUser := make(map[string]*Row) hostsByUser := make(map[string]mapStringInt) dbsByUser := make(map[string]mapStringInt) @@ -86,14 +148,14 @@ func (ul *UserLatency) processlist2byUser() { globalDbs := make(mapStringInt) for i := range ul.current { - // munge the Username for special purposes (event scheduler, replication threads etc) - id := ul.current[i].ID - Username := ul.current[i].User // limit size for display - host := getHostname(ul.current[i].Host) - command := ul.current[i].Command - db := ul.current[i].Db - info := ul.current[i].Info - state := ul.current[i].State + pl := ul.current[i] + id := pl.ID + Username := pl.User // limit size for display + host := getHostname(pl.Host) + command := pl.Command + db := pl.Db + info := pl.Info + state := pl.State log.Println("- id/user/host:", id, Username, host) @@ -105,70 +167,23 @@ func (ul *UserLatency) processlist2byUser() { globalDbs[db] = 1 } - if oldRow, ok := rowByUser[Username]; ok { - log.Println("- found old row in rowByUser") - row = oldRow // get old row - } else { - log.Println("- NOT found old row in rowByUser") - // create new row - RESET THE VALUES !!!! - rowp := new(Row) - row = *rowp - row.Username = ul.current[i].User - rowByUser[Username] = row - } - row.Connections++ - // ignore system SQL threads (may be more to filter out) - if Username != "system user" && host != "" && command != "Binlog Dump" { - if command == "Sleep" { - row.Sleeptime += ul.current[i].Time - } else { - row.Runtime += ul.current[i].Time - row.Active++ - } - } - if command == "Binlog Dump" && reActiveReplMasterThread.MatchString(state) { - row.Active++ - } + r := getOrCreateRow(rowByUser, Username, pl.User) + log.Println("- processing row for user:", Username) - // add the host if not known already - if host != "" { - if myHosts, ok = hostsByUser[Username]; !ok { - myHosts = make(mapStringInt) - } - myHosts[host] = 1 // whatever - value doesn't matter - hostsByUser[Username] = myHosts - } - row.Hosts = uint64(len(hostsByUser[Username])) + r.Connections++ - // add the db count if not known already - if db != "" { - if myDB, ok = dbsByUser[Username]; !ok { - myDB = make(mapStringInt) - } - myDB[db] = 1 // whatever - value doesn't matter - dbsByUser[Username] = myDB - } - row.Dbs = uint64(len(dbsByUser[Username])) + updateRuntimeAndActive(r, command, pl.Time, host, state, reActiveReplMasterThread) - if reSelect.MatchString(info) { - row.Selects++ - } - if reInsert.MatchString(info) { - row.Inserts++ - } - if reUpdate.MatchString(info) { - row.Updates++ - } - if reDelete.MatchString(info) { - row.Deletes++ - } + // track hosts and dbs per user + r.Hosts = addHost(hostsByUser, Username, host) + r.Dbs = addDB(dbsByUser, Username, db) - rowByUser[Username] = row + addStatementCounts(r, info, reSelect, reInsert, reUpdate, reDelete) } results := make([]Row, 0, len(rowByUser)) for _, v := range rowByUser { - results = append(results, v) + results = append(results, *v) } ul.Results = results ul.Totals = totals(ul.Results) diff --git a/script/bootstrap b/script/bootstrap deleted file mode 100755 index 42b355e..0000000 --- a/script/bootstrap +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh - -set -e - -# figure out how we were called -dirname=$(dirname $0) -# Make sure we have the version of Go we want to depend on, either from the -# system or one we grab ourselves. -. $dirname/ensure-go-installed - -# Since we want to be able to build this outside of GOPATH, we set it -# up so it points back to us and go is none the wiser - -set -x -rm -rf .gopath -mkdir -p .gopath/src/github.com/sjmudd -ln -s "$PWD" .gopath/src/github.com/sjmudd/ps-top -export GOPATH=$PWD/.gopath:$GOPATH diff --git a/script/build b/script/build deleted file mode 100755 index b029d74..0000000 --- a/script/build +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -set -e - -dirname=$(dirname $0) - -. $dirname/bootstrap - -mkdir -p bin -bindir="$PWD"/bin -scriptdir="$PWD"/script - -version=$(git rev-parse HEAD) -describe=$(git describe --tags --always --dirty) - -export GOPATH="$PWD/.gopath" -cd .gopath/src/github.com/sjmudd/ps-top ||\ - exit 1 - -# build the binaries -go build cmd/ps-top/ps-top.go -go build cmd/ps-stats/ps-stats.go diff --git a/script/cibuild b/script/cibuild deleted file mode 100755 index acec02b..0000000 --- a/script/cibuild +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# -# build script for travis -# - -godirectories="app baseobject connector config display event file_io_latency global key_value_cache lib logger memory_usage mutex_latency p_s/ps_table rc screen setup_instruments stages_latency statement_summary table table_io_latency table_lock_latency user_latency version view wait_info" - -set -e - -# check which directory we are called from/in -dirname=$(dirname $0) - -. $dirname/bootstrap - -#echo "Verifying code is formatted via 'gofmt -s -w ...'" -#for d in $godirectories; do -# echo "- $d" -# gofmt -s -w $d/ -# git diff --exit-code --quiet -#done - -echo "Building" -script/build - -echo "Running tests..." -cd .gopath/src/github.com/sjmudd/ps-top -go test ./... - -echo "Done" diff --git a/script/ensure-go-installed b/script/ensure-go-installed deleted file mode 100755 index 8002f1a..0000000 --- a/script/ensure-go-installed +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/bash - -GO_VERSION=go1.8 - -GO_PKG_DARWIN=${GO_VERSION}.darwin-amd64.pkg -GO_PKG_DARWIN_SHA=e7089843bc7148ffcc147759985b213604d22bb9fd19bd930b515aa981bf1b22 - -GO_PKG_LINUX=${GO_VERSION}.linux-amd64.tar.gz -GO_PKG_LINUX_SHA=702ad90f705365227e902b42d91dd1a40e48ca7f67a2f4b2fd052aaa4295cd95 - -export ROOTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )" -cd $ROOTDIR - -# If Go isn't installed globally, setup environment variables for local install. -if [ -z "$(which go)" ] || [ -z "$(go version | grep $GO_VERSION)" ]; then - GODIR="$ROOTDIR/.vendor/$GO_VERSION" - - if [ $(uname -s) = "Darwin" ]; then - export GOROOT="$GODIR/usr/local/go" - else - export GOROOT="$GODIR/go" - fi - - export PATH="$GOROOT/bin:$PATH" -fi - -# Check if local install exists, and install otherwise. -if [ -z "$(which go)" ] || [ -z "$(go version | grep $GO_VERSION)" ]; then - [ -d "$GODIR" ] && rm -rf $GODIR - mkdir -p "$GODIR" - cd "$GODIR"; - - if [ $(uname -s) = "Darwin" ]; then - curl -L -O https://storage.googleapis.com/golang/$GO_PKG_DARWIN - shasum -a256 $GO_PKG_DARWIN | grep $GO_PKG_DARWIN_SHA - xar -xf $GO_PKG_DARWIN - cpio -i < com.googlecode.go.pkg/Payload - else - curl -L -O https://storage.googleapis.com/golang/$GO_PKG_LINUX - shasum -a256 $GO_PKG_LINUX | grep $GO_PKG_LINUX_SHA - tar xf $GO_PKG_LINUX - fi - - # Prove we did something right - echo "$GO_VERSION installed in $GODIR: Go Binary: $(which go)" -fi - -cd $ROOTDIR - -# Configure the new go to be the first go found -export GOPATH=$ROOTDIR/.vendor diff --git a/setupinstruments/setupinstruments.go b/setupinstruments/setupinstruments.go index 119713a..b61be82 100644 --- a/setupinstruments/setupinstruments.go +++ b/setupinstruments/setupinstruments.go @@ -108,7 +108,6 @@ func (si *SetupInstruments) Configure(sqlSelect string, collecting, updating str maxSetupInstrumentsRows = 1000 updateSQL = "UPDATE setup_instruments SET enabled = ?, TIMED = ? WHERE NAME = ?" ) - log.Printf("Configure(%q,%q,%q)", sqlSelect, collecting, updating) // skip if we've tried and failed if si.updateTried && !si.updateSucceeded { @@ -123,76 +122,119 @@ func (si *SetupInstruments) Configure(sqlSelect string, collecting, updating str log.Println(collecting) + // fetch rows into si.rows + count, err := si.fetchRows(sqlSelect) + if err != nil { + log.Fatal(err) + } + log.Println("- found", count, "rows whose configuration need changing") + + if count > maxSetupInstrumentsRows { + log.Printf("Warning: Unable to restore complete setup_instruments configuration. maxSetupInstrumentsRows=%v is too low. It should be at least %v", maxSetupInstrumentsRows, count) + } + + // update the rows which need to be set - do multiple updates but I don't care + log.Println(updating) + + log.Println("Preparing statement:", updateSQL) + log.Println("db.Prepare", updateSQL) + stmt, perr := si.prepareUpdateStmt(updateSQL) + if perr != nil { + log.Fatal(perr) + } + if stmt == nil { + // expected error path - nothing to do + return + } + + // Ensure statement is closed when we're done. + defer func() { + _ = stmt.Close() + }() + + si.updateTried = true + si.updateSucceeded, count = si.executeUpdates(stmt) + + if si.updateSucceeded { + log.Println(count, "rows changed in p_s.setup_instruments") + } + log.Println("Configure() returns updateTried", si.updateTried, ", updateSucceeded", si.updateSucceeded) +} + +// fetchRows queries the DB and appends results into si.rows, returning the +// number of rows found or an error. +func (si *SetupInstruments) fetchRows(sqlSelect string) (int, error) { log.Println("db.query", sqlSelect) rows, err := si.db.Query(sqlSelect) if err != nil { - log.Fatal(err) + return 0, err } + defer func() { _ = rows.Close() }() count := 0 for rows.Next() { var r Row - if err := rows.Scan( - &r.name, - &r.enabled, - &r.timed); err != nil { - log.Fatal(err) + if err = rows.Scan(&r.name, &r.enabled, &r.timed); err != nil { + return 0, err } si.rows = append(si.rows, r) count++ } if err := rows.Err(); err != nil { - log.Fatal(err) - } - log.Println("- found", count, "rows whose configuration need changing") - _ = rows.Close() - - if count > maxSetupInstrumentsRows { - log.Printf("Warning: Unable to restore complete setup_instruments configuration. maxSetupInstrumentsRows=%v is too low. It should be at least %v", maxSetupInstrumentsRows, count) + return 0, err } + return count, nil +} - // update the rows which need to be set - do multiple updates but I don't care - log.Println(updating) - - log.Println("Preparing statement:", updateSQL) - si.updateTried = true - log.Println("db.Prepare", updateSQL) +// prepareUpdateStmt prepares the update statement and handles expected +// permission/read-only errors. Returns (nil,nil) if an expected error +// occurred (caller should treat as no-op). Returns (stmt,nil) on success +// or (nil,err) on unexpected fatal error. +func (si *SetupInstruments) prepareUpdateStmt(updateSQL string) (*sql.Stmt, error) { stmt, err := si.db.Prepare(updateSQL) if err != nil { log.Println("- prepare gave error:", err.Error()) if !expectedError(err.Error()) { - log.Fatal("Not expected error so giving up") - } else { - log.Println("- expected error so not running statement") - _ = stmt.Close() + return nil, err } - } else { - log.Println("Prepare succeeded, trying to update", len(si.rows), "row(s)") - count = 0 - for i := range si.rows { - log.Println("- changing row:", si.rows[i].name) - log.Println("- stmt.Exec", "YES", "YES", si.rows[i].name) - if res, err := stmt.Exec("YES", "YES", si.rows[i].name); err == nil { - log.Println("- update succeeded") - si.updateSucceeded = true - c, _ := res.RowsAffected() - count += int(c) + // expected error - nothing to do + log.Println("- expected error so not running statement") + return nil, nil + } + return stmt, nil +} + +// executeUpdates runs the prepared statement against previously fetched +// rows. Returns whether any update succeeded and the number of affected rows. +func (si *SetupInstruments) executeUpdates(stmt *sql.Stmt) (bool, int) { + log.Println("Prepare succeeded, trying to update", len(si.rows), "row(s)") + count := 0 + for i := range si.rows { + log.Println("- changing row:", si.rows[i].name) + log.Println("- stmt.Exec", "YES", "YES", si.rows[i].name) + res, err := stmt.Exec("YES", "YES", si.rows[i].name) + if err == nil { + log.Println("- update succeeded") + si.updateSucceeded = true + c, rerr := res.RowsAffected() + if rerr != nil { + log.Println("RowsAffected error:", rerr) } else { - si.updateSucceeded = false - if expectedError(err.Error()) { - log.Println("Insufficient privileges to UPDATE setup_instruments: " + err.Error()) - log.Println("Not attempting further updates") - return - } - log.Fatal(err) + count += int(c) } + } else { + si.updateSucceeded = false + if expectedError(err.Error()) { + log.Println("Insufficient privileges to UPDATE setup_instruments: " + err.Error()) + log.Println("Not attempting further updates") + return si.updateSucceeded, count + } + // Unexpected error: log and return so defer runs + log.Println("Unexpected error during stmt.Exec:", err) + return si.updateSucceeded, count } - if si.updateSucceeded { - log.Println(count, "rows changed in p_s.setup_instruments") - } - _ = stmt.Close() } - log.Println("Configure() returns updateTried", si.updateTried, ", updateSucceeded", si.updateSucceeded) + return si.updateSucceeded, count } // RestoreConfiguration restores setup_instruments rows to their previous settings (if changed previously). @@ -222,7 +264,10 @@ func (si *SetupInstruments) RestoreConfiguration() { for i := range si.rows { log.Println("stmt.Exec(", si.rows[i].enabled, si.rows[i].timed, si.rows[i].name, ")") if _, err := stmt.Exec(si.rows[i].enabled, si.rows[i].timed, si.rows[i].name); err != nil { - log.Fatal(err) + // Avoid calling Fatal inside a function that has a defer (stmt.Close). + // Log the error and return so deferred cleanup runs. + log.Println("stmt.Exec error:", err) + return } changed++ } diff --git a/utils/utils.go b/utils/utils.go index 5cef061..1fdd07e 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -1,4 +1,6 @@ // Package utils includes several library routines for ps-top +// +//nolint:revive package utils import ( @@ -16,7 +18,7 @@ const ( Copyright = "Copyright (C) 2014-2026 Simon J Mudd " // Version returns the current application version - Version = "1.1.18" + Version = "1.2.0" i1024_2 = 1024 * 1024 i1024_3 = 1024 * 1024 * 1024 @@ -64,22 +66,18 @@ func FormatTime(picoseconds uint64) string { if picoseconds == 0 { return "" } - if picoseconds >= 3600000000000000 { + switch { + case picoseconds >= 3600000000000000: return myround(float64(picoseconds)/3600000000000000, 8, 2) + " h" - } - if picoseconds >= 60000000000000 { + case picoseconds >= 60000000000000: return myround(float64(picoseconds)/60000000000000, 8, 2) + " m" - } - if picoseconds >= 1000000000000 { + case picoseconds >= 1000000000000: return myround(float64(picoseconds)/1000000000000, 8, 2) + " s" - } - if picoseconds >= 1000000000 { + case picoseconds >= 1000000000: return myround(float64(picoseconds)/1000000000, 7, 2) + " ms" - } - if picoseconds >= 1000000 { + case picoseconds >= 1000000: return myround(float64(picoseconds)/1000000, 7, 2) + " us" - } - if picoseconds >= 1000 { + case picoseconds >= 1000: return myround(float64(picoseconds)/1000, 7, 2) + " ns" } return strconv.Itoa(int(picoseconds)) + " ps" @@ -91,18 +89,16 @@ func FormatTime(picoseconds uint64) string { // If the value is 0 print as 6 spaces. // if the value is > 999.9 then show +++.+% to indicate an overflow. func FormatPct(pct float64) string { - var s string - displayValue := pct * 100.0 - - if pct < 0.0001 { - s = " " - } else if displayValue >= 1000.0 { - s = "+++.+%" // too large to fit! (probably a bug as we don't expect this value to be > 10.00) - } else { - s = fmt.Sprintf("%5.1f", displayValue) + "%" + switch { + case pct < 0.0001: + // treat as 0 == "empty", and ignore negative values + return " " + case pct >= 10.0: + // bug: too large to fit! We do not expect this value to be > 10.00 + return "+++.+%" + default: + return fmt.Sprintf("%5.1f", 100.0*pct) + "%" } - - return s } // FormatAmount converts numbers to k = 1024 , M = 1024 x 1024, G = 1024 x 1024 x 1024, P = 1024x1024x1024x1024 and then formats them. @@ -121,16 +117,17 @@ func FormatAmount(amount uint64) string { return strconv.Itoa(int(amount)) } - if amount > i1024_4 { + switch { + case amount > i1024_4: suffix = "P" decimalAmount = float64(amount) / i1024_4 - } else if amount > i1024_3 { + case amount > i1024_3: suffix = "G" decimalAmount = float64(amount) / i1024_3 - } else if amount > i1024_2 { + case amount > i1024_2: suffix = "M" decimalAmount = float64(amount) / i1024_2 - } else if amount > 1024 { + case amount > 1024: suffix = "k" decimalAmount = float64(amount) / 1024 } @@ -156,16 +153,18 @@ func SignedFormatAmount(amount int64) string { return strconv.Itoa(int(amount)) } - if math.Abs(float64(amount)) > i1024_4 { + a := math.Abs(float64(amount)) + switch { + case a > i1024_4: suffix = "P" decimalAmount = float64(amount) / i1024_4 - } else if math.Abs(float64(amount)) > i1024_3 { + case a > i1024_3: suffix = "G" decimalAmount = float64(amount) / i1024_3 - } else if math.Abs(float64(amount)) > i1024_2 { + case a > i1024_2: suffix = "M" decimalAmount = float64(amount) / i1024_2 - } else if math.Abs(float64(amount)) > 1024 { + case a > 1024: suffix = "k" decimalAmount = float64(amount) / 1024 } @@ -180,6 +179,18 @@ func SignedFormatAmount(amount int64) string { // FormatCounter formats a counter like an Amount but is tighter in space func FormatCounter(counter int, width int) string { + // delegate to the unsigned variant to avoid duplicating formatting logic + if counter < 0 { + // preserve sign for negative values + pattern := "%" + fmt.Sprintf("%d", width) + "d" + return fmt.Sprintf(pattern, counter) + } + return FormatCounterU(uint64(counter), width) +} + +// FormatCounterU is like FormatCounter but accepts an unsigned 64-bit value. +// This is useful for counters stored as uint64 to avoid unsafe casts. +func FormatCounterU(counter uint64, width int) string { if counter == 0 { pattern := "%" + fmt.Sprintf("%d", width) + "s" return fmt.Sprintf(pattern, " ") @@ -224,3 +235,34 @@ func QualifiedTableName(schema, table string) string { } return name } + +// SumTimerWaitName is used for reducing code repetition +type SumTimerWaitName struct { + Name string + SumTimerWait uint64 +} + +// NewSumTimerWaitName simplifies struct creation when used by function below +func NewSumTimerWaitName(name string, sumTimerWait uint64) SumTimerWaitName { + return SumTimerWaitName{ + Name: name, + SumTimerWait: sumTimerWait, + } +} + +// SumTimerWaitOrdering order by SumTimerWait (desc), Name (asc) +func SumTimerWaitNameOrdering(a, b SumTimerWaitName) int { + if a.SumTimerWait > b.SumTimerWait { + return -1 + } + if a.SumTimerWait < b.SumTimerWait { + return 1 + } + if a.Name < b.Name { + return -1 + } + if a.Name > b.Name { + return 1 + } + return 0 +} diff --git a/utils/utils_test.go b/utils/utils_test.go index 1100cd3..f863326 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -398,3 +398,30 @@ func TestSignedDivide(t *testing.T) { } } } + +// validate the function provides the correct values +// - ordering is by Latency (desc), Name (asc) +func TestSumTimeWaitNameOrdering(t *testing.T) { + tests := []struct { + a SumTimerWaitName + b SumTimerWaitName + expected int + }{ + {SumTimerWaitName{"A", 100}, SumTimerWaitName{"B", 101}, 1}, + {SumTimerWaitName{"A", 101}, SumTimerWaitName{"B", 101}, -1}, + {SumTimerWaitName{"A", 102}, SumTimerWaitName{"B", 101}, -1}, + {SumTimerWaitName{"B", 100}, SumTimerWaitName{"B", 101}, 1}, + {SumTimerWaitName{"B", 101}, SumTimerWaitName{"B", 101}, 0}, + {SumTimerWaitName{"B", 102}, SumTimerWaitName{"B", 101}, -1}, + {SumTimerWaitName{"C", 100}, SumTimerWaitName{"B", 101}, 1}, + {SumTimerWaitName{"C", 101}, SumTimerWaitName{"B", 101}, 1}, + {SumTimerWaitName{"C", 102}, SumTimerWaitName{"B", 101}, -1}, + } + + for _, test := range tests { + got := SumTimerWaitNameOrdering(test.a, test.b) + if got != test.expected { + t.Errorf("SumTimerWaitNameOrdering(%+v,%+v) failed: expected: %v, got %v", test.a, test.b, test.expected, got) + } + } +} diff --git a/view/view.go b/view/view.go index eb3f99c..7160ec3 100644 --- a/view/view.go +++ b/view/view.go @@ -7,6 +7,7 @@ package view import ( "database/sql" "errors" + "fmt" "github.com/sjmudd/ps-top/log" "github.com/sjmudd/ps-top/model/processlist" @@ -46,8 +47,10 @@ var ( prevView map[Code]Code // map from one view to the next taking into account invalid views ) -// SetupAndValidate setups the view configuration and validates if accesss to the p_s tables is permitted. -func SetupAndValidate(name string, db *sql.DB) View { +// SetupAndValidate setups the view configuration and validates if access to the p_s tables is permitted. +func SetupAndValidate(name string, db *sql.DB) (View, error) { + var v View + log.Printf("view.SetupAndValidate(%q,%v)", name, db) if !setup { @@ -67,7 +70,7 @@ func SetupAndValidate(name string, db *sql.DB) View { var processlistSchema = "performance_schema" havePS, err := processlist.HavePerformanceSchema(db) if err != nil { - log.Fatal(err) + return v, fmt.Errorf("SetupAndValidate(%q,%v): %w", name, db, err) } if !havePS { processlistSchema = "information_schema" @@ -85,14 +88,12 @@ func SetupAndValidate(name string, db *sql.DB) View { } if err := validateViews(db); err != nil { - log.Fatal(err) + return v, fmt.Errorf("SetupAndValidate(%q,%v): %w", name, db, err) } } - var v View - v.SetByName(name) // if empty will use the default - return v + return v, nil } // validateViews check which views are readable. If none are we give a fatal error @@ -122,9 +123,7 @@ func validateViews(db *sql.DB) error { } log.Println(count, "of", len(names), "view(s) are SELECTable, continuing") - setPrevAndNextViews() - - return nil + return setPrevAndNextViews() } /* set the previous and next views taking into account any invalid views @@ -139,7 +138,9 @@ v5 false v4 v2 */ -func setPrevAndNextViews() { +func setPrevAndNextViews() error { + var err error + log.Println("view.setPrevAndNextViews()...") nextView = make(map[Code]Code) prevView = make(map[Code]Code) @@ -153,20 +154,28 @@ func setPrevAndNextViews() { // Cleaner way to do this? Probably. Fix later. prevCodeOrder := []Code{ViewMemory, ViewStages, ViewMutex, ViewUsers, ViewLocks, ViewIO, ViewOps, ViewLatency} nextCodeOrder := []Code{ViewLatency, ViewOps, ViewIO, ViewLocks, ViewUsers, ViewMutex, ViewStages, ViewMemory} - prevView = setValidByValues(prevCodeOrder) - nextView = setValidByValues(nextCodeOrder) + prevView, err = setValidByValues(prevCodeOrder) + if err != nil { + return fmt.Errorf("setPrevAndNextViews: failed to set prevView: %w", err) + } + nextView, err = setValidByValues(nextCodeOrder) + if err != nil { + return fmt.Errorf("setPrevAndNextViews: failed to set nextView: %w", err) + } // print out the results log.Println("Final mapping of view order:") for i := range nextCodeOrder { log.Println("view:", nextCodeOrder[i], ", prev:", prevView[nextCodeOrder[i]], ", next:", nextView[nextCodeOrder[i]]) } + + return nil } // setValidNextByValues returns a map of Code -> Code where the mapping points to the "next" // Code. The order is determined by the input Code slice. Only Selectable Views are considered // for the mapping with the other views pointing to the first Code provided. -func setValidByValues(orderedCodes []Code) map[Code]Code { +func setValidByValues(orderedCodes []Code) (map[Code]Code, error) { log.Println("view.setValidByValues()") orderedMap := make(map[Code]Code) @@ -195,7 +204,7 @@ func setValidByValues(orderedCodes []Code) map[Code]Code { if i == 1 { // not found a valid view so something is up. Give up! if first == ViewNone { - log.Fatal("setValidByValues() can't find a Selectable view! (shouldn't be here)") + return orderedMap, fmt.Errorf("setValidByValues(%v+) cannot find a Selectable view! (should not happen)", orderedCodes) } } } @@ -208,7 +217,7 @@ func setValidByValues(orderedCodes []Code) map[Code]Code { } } - return orderedMap + return orderedMap, nil } // SetNext changes the current view to the next one @@ -247,7 +256,7 @@ func (v *View) SetByName(name string) { for i := range names { if name == names[i] { - v.code = Code(i) + v.code = i log.Println("View.SetByName(", name, ")") return } diff --git a/wrapper/common.go b/wrapper/common.go new file mode 100644 index 0000000..8bbe03a --- /dev/null +++ b/wrapper/common.go @@ -0,0 +1,113 @@ +package wrapper + +import ( + "fmt" + + "github.com/sjmudd/ps-top/model/tableio" + "github.com/sjmudd/ps-top/utils" +) + +// RowsFromGetter builds a slice of strings by calling the provided getter +// for each index. This centralizes the common loop used by many wrapper +// packages to produce display rows. +func RowsFromGetter(n int, get func(i int) string) []string { + rows := make([]string, 0, n) + for i := 0; i < n; i++ { + rows = append(rows, get(i)) + } + return rows +} + +// CountIf counts how many indices in [0,n) satisfy the predicate. +// Used by wrappers to implement Description() which counts rows with data. +func CountIf(n int, pred func(i int) bool) int { + count := 0 + for i := 0; i < n; i++ { + if pred(i) { + count++ + } + } + return count +} + +// TotalRowContent returns the formatted totals row by calling the provided +// content function with the totals value for both row and totals. +// This removes the repeated pattern found in many wrapper packages. +func TotalRowContent[T any](totals T, content func(T, T) string) string { + return content(totals, totals) +} + +// EmptyRowContent returns the formatted empty row by calling the provided +// content function with a zero value for the row and totals. It uses Go +// generics to avoid repeating the same empty-construction pattern. +func EmptyRowContent[T any](content func(T, T) string) string { + var empty T + return content(empty, empty) +} + +// MakeTableIOHeadings constructs a heading string used by the tableio wrappers. +// The `kind` parameter should be either "Latency" or "Ops" (or similar) and will +// be interpolated into the common table IO heading format. +func MakeTableIOHeadings(kind string) string { + return fmt.Sprintf("%10s %6s|%6s %6s %6s %6s|%s", + kind, + "%", + "Fetch", + "Insert", + "Update", + "Delete", + "Table Name") +} + +// MakeTableIODescription builds the description string for table IO wrappers. +func MakeTableIODescription(kind string, count int) string { + return fmt.Sprintf("Table %s (table_io_waits_summary_by_table) %d rows", kind, count) +} + +// TimePct returns the formatted time and percentage strings for a row's +// SumTimerWait and the total SumTimerWait. This small helper centralizes +// the common prefix used by several wrapper content formatters. +func TimePct(sum, totals uint64) (string, string) { + return utils.FormatTime(sum), utils.FormatPct(utils.Divide(sum, totals)) +} + +// PctStrings returns a slice of formatted percentage strings for each value +// relative to the provided total. This centralizes the common pattern of +// calling utils.FormatPct(utils.Divide(value, total)). It helps reduce +// duplicated code across wrapper content formatters. +func PctStrings(total uint64, values ...uint64) []string { + out := make([]string, len(values)) + for i, v := range values { + out[i] = utils.FormatPct(utils.Divide(v, total)) + } + return out +} + +// RowsFromSlice builds a slice of strings by applying the provided content +// function to each element of `slice` along with the provided totals value. +// This consolidates the common RowContent pattern used by tableio wrappers. +func RowsFromSlice[T any](slice []T, totals T, content func(T, T) string) []string { + n := len(slice) + return RowsFromGetter(n, func(i int) string { return content(slice[i], totals) }) +} + +// TableIO helpers: small delegating helpers specialized for tableio.Row so +// wrappers can call a single helper instead of reimplementing the same +// RowContent/TotalRowContent/EmptyRowContent/Description trio. +func TableIORowContent(slice []tableio.Row, totals tableio.Row, content func(tableio.Row, tableio.Row) string) []string { + return RowsFromSlice(slice, totals, content) +} + +func TableIOTotalRowContent(totals tableio.Row, content func(tableio.Row, tableio.Row) string) string { + return TotalRowContent(totals, content) +} + +func TableIOEmptyRowContent(content func(tableio.Row, tableio.Row) string) string { + return EmptyRowContent(content) +} + +func TableIODescription(kind string, slice []tableio.Row, hasData func(tableio.Row) bool) string { + n := len(slice) + count := CountIf(n, func(i int) bool { return hasData(slice[i]) }) + return MakeTableIODescription(kind, count) +} diff --git a/wrapper/fileinfolatency/wrapper.go b/wrapper/fileinfolatency/wrapper.go index 17999c5..f3f05ed 100644 --- a/wrapper/fileinfolatency/wrapper.go +++ b/wrapper/fileinfolatency/wrapper.go @@ -4,12 +4,13 @@ package fileinfolatency import ( "database/sql" "fmt" - "sort" + "slices" "time" "github.com/sjmudd/ps-top/config" "github.com/sjmudd/ps-top/model/fileinfo" "github.com/sjmudd/ps-top/utils" + "github.com/sjmudd/ps-top/wrapper" ) // Wrapper wraps a FileIoLatency struct representing the contents of the data collected from file_summary_by_instance, but adding formatting for presentation in the terminal @@ -32,7 +33,14 @@ func (fiolw *Wrapper) ResetStatistics() { // Collect data from the db, then merge it in. func (fiolw *Wrapper) Collect() { fiolw.fiol.Collect() - sort.Sort(byLatency(fiolw.fiol.Results)) + + // order data by SumTimerWait (descending), Name + slices.SortFunc(fiolw.fiol.Results, func(a, b fileinfo.Row) int { + return utils.SumTimerWaitNameOrdering( + utils.NewSumTimerWaitName(a.Name, a.SumTimerWait), + utils.NewSumTimerWaitName(b.Name, b.SumTimerWait), + ) + }) } // Headings returns the headings for a table @@ -54,37 +62,26 @@ func (fiolw Wrapper) Headings() string { // RowContent returns the rows we need for displaying func (fiolw Wrapper) RowContent() []string { - rows := make([]string, 0, len(fiolw.fiol.Results)) - - for i := range fiolw.fiol.Results { - rows = append(rows, fiolw.content(fiolw.fiol.Results[i], fiolw.fiol.Totals)) - } - - return rows + n := len(fiolw.fiol.Results) + return wrapper.RowsFromGetter(n, func(i int) string { + return fiolw.content(fiolw.fiol.Results[i], fiolw.fiol.Totals) + }) } // TotalRowContent returns all the totals func (fiolw Wrapper) TotalRowContent() string { - return fiolw.content(fiolw.fiol.Totals, fiolw.fiol.Totals) + return wrapper.TotalRowContent(fiolw.fiol.Totals, fiolw.content) } // EmptyRowContent returns an empty string of data (for filling in) func (fiolw Wrapper) EmptyRowContent() string { - var empty fileinfo.Row - - return fiolw.content(empty, empty) + return wrapper.EmptyRowContent(fiolw.content) } // Description returns a description of the table func (fiolw Wrapper) Description() string { - var count int - - for row := range fiolw.fiol.Results { - if fiolw.fiol.Results[row].HasData() { - count++ - } - } - + n := len(fiolw.fiol.Results) + count := wrapper.CountIf(n, func(i int) bool { return fiolw.fiol.Results[i].HasData() }) return fmt.Sprintf("File I/O Latency (file_summary_by_instance) %d rows", count) } @@ -103,7 +100,7 @@ func (fiolw Wrapper) LastCollectTime() time.Time { return fiolw.fiol.LastCollected } -// WantRelativeStats indiates if we want relative statistics +// WantRelativeStats indicates if we want relative statistics func (fiolw Wrapper) WantRelativeStats() bool { return fiolw.fiol.WantRelativeStats() } @@ -118,26 +115,21 @@ func (fiolw Wrapper) content(row, totals fileinfo.Row) string { name = "" } + timeStr, pctStr := wrapper.TimePct(row.SumTimerWait, totals.SumTimerWait) + pct := wrapper.PctStrings(row.SumTimerWait, row.SumTimerRead, row.SumTimerWrite, row.SumTimerMisc) + opsPct := wrapper.PctStrings(row.CountStar, row.CountRead, row.CountWrite, row.CountMisc) + return fmt.Sprintf("%10s %6s|%6s %6s %6s|%8s %8s|%8s %6s %6s %6s|%s", - utils.FormatTime(row.SumTimerWait), - utils.FormatPct(utils.Divide(row.SumTimerWait, totals.SumTimerWait)), - utils.FormatPct(utils.Divide(row.SumTimerRead, row.SumTimerWait)), - utils.FormatPct(utils.Divide(row.SumTimerWrite, row.SumTimerWait)), - utils.FormatPct(utils.Divide(row.SumTimerMisc, row.SumTimerWait)), + timeStr, + pctStr, + pct[0], + pct[1], + pct[2], utils.FormatAmount(row.SumNumberOfBytesRead), utils.FormatAmount(row.SumNumberOfBytesWrite), utils.FormatAmount(row.CountStar), - utils.FormatPct(utils.Divide(row.CountRead, row.CountStar)), - utils.FormatPct(utils.Divide(row.CountWrite, row.CountStar)), - utils.FormatPct(utils.Divide(row.CountMisc, row.CountStar)), + opsPct[0], + opsPct[1], + opsPct[2], name) } - -type byLatency fileinfo.Rows - -func (rows byLatency) Len() int { return len(rows) } -func (rows byLatency) Swap(i, j int) { rows[i], rows[j] = rows[j], rows[i] } -func (rows byLatency) Less(i, j int) bool { - return (rows[i].SumTimerWait > rows[j].SumTimerWait) || - ((rows[i].SumTimerWait == rows[j].SumTimerWait) && (rows[i].Name < rows[j].Name)) -} diff --git a/wrapper/memoryusage/wrapper.go b/wrapper/memoryusage/wrapper.go index e37e878..031b84c 100644 --- a/wrapper/memoryusage/wrapper.go +++ b/wrapper/memoryusage/wrapper.go @@ -4,12 +4,13 @@ package memoryusage import ( "database/sql" "fmt" - "sort" + "slices" "time" "github.com/sjmudd/ps-top/config" "github.com/sjmudd/ps-top/model/memoryusage" "github.com/sjmudd/ps-top/utils" + "github.com/sjmudd/ps-top/wrapper" ) // Wrapper wraps a FileIoLatency struct representing the contents of the data collected from file_summary_by_instance, but adding formatting for presentation in the terminal @@ -32,7 +33,23 @@ func (muw *Wrapper) ResetStatistics() { // Collect data from the db, then merge it in. func (muw *Wrapper) Collect() { muw.mu.Collect() - sort.Sort(byBytes(muw.mu.Results)) + + // order data by CurrentBytesUsed (descending), Name + slices.SortFunc(muw.mu.Results, func(a, b memoryusage.Row) int { + if a.CurrentBytesUsed > b.CurrentBytesUsed { + return -1 + } + if a.CurrentBytesUsed < b.CurrentBytesUsed { + return 1 + } + if a.Name < b.Name { + return -1 + } + if a.Name > b.Name { + return 1 + } + return 0 + }) } // Headings returns the headings for a table @@ -43,25 +60,20 @@ func (muw Wrapper) Headings() string { // RowContent returns the rows we need for displaying func (muw Wrapper) RowContent() []string { - rows := make([]string, 0, len(muw.mu.Results)) - - for i := range muw.mu.Results { - rows = append(rows, muw.content(muw.mu.Results[i], muw.mu.Totals)) - } - - return rows + n := len(muw.mu.Results) + return wrapper.RowsFromGetter(n, func(i int) string { + return muw.content(muw.mu.Results[i], muw.mu.Totals) + }) } // TotalRowContent returns all the totals func (muw Wrapper) TotalRowContent() string { - return muw.content(muw.mu.Totals, muw.mu.Totals) + return wrapper.TotalRowContent(muw.mu.Totals, muw.content) } // EmptyRowContent returns an empty string of data (for filling in) func (muw Wrapper) EmptyRowContent() string { - var empty memoryusage.Row - - return muw.content(empty, empty) + return wrapper.EmptyRowContent(muw.content) } // Description returns a description of the table @@ -92,7 +104,7 @@ func (muw Wrapper) LastCollectTime() time.Time { return muw.mu.LastCollected } -// WantRelativeStats indiates if we want relative statistics +// WantRelativeStats indicates if we want relative statistics func (muw Wrapper) WantRelativeStats() bool { return muw.mu.WantRelativeStats() } @@ -116,14 +128,3 @@ func (muw Wrapper) content(row, totals memoryusage.Row) string { utils.SignedFormatAmount(row.HighCountUsed), name) } - -type byBytes []memoryusage.Row - -func (t byBytes) Len() int { return len(t) } -func (t byBytes) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t byBytes) Less(i, j int) bool { - return (t[i].CurrentBytesUsed > t[j].CurrentBytesUsed) || - ((t[i].CurrentBytesUsed == t[j].CurrentBytesUsed) && - (t[i].Name < t[j].Name)) - -} diff --git a/wrapper/mutexlatency/wrapper.go b/wrapper/mutexlatency/wrapper.go index 802a715..47ebf04 100644 --- a/wrapper/mutexlatency/wrapper.go +++ b/wrapper/mutexlatency/wrapper.go @@ -1,15 +1,16 @@ -// Package mutexlatency holds the routines which manage the server mutexs +// Package mutexlatency holds the routines which manage the server mutexes package mutexlatency import ( "database/sql" "fmt" - "sort" + "slices" "time" "github.com/sjmudd/ps-top/config" "github.com/sjmudd/ps-top/model/mutexlatency" "github.com/sjmudd/ps-top/utils" + "github.com/sjmudd/ps-top/wrapper" ) // Wrapper wraps a MutexLatency struct @@ -32,30 +33,32 @@ func (mlw *Wrapper) ResetStatistics() { // Collect data from the db, then merge it in. func (mlw *Wrapper) Collect() { mlw.ml.Collect() - sort.Sort(byLatency(mlw.ml.Results)) + + // order data by SumTimerWait (descending), Name + slices.SortFunc(mlw.ml.Results, func(a, b mutexlatency.Row) int { + return utils.SumTimerWaitNameOrdering( + utils.NewSumTimerWaitName(a.Name, a.SumTimerWait), + utils.NewSumTimerWaitName(b.Name, b.SumTimerWait), + ) + }) } // RowContent returns the rows we need for displaying func (mlw Wrapper) RowContent() []string { - rows := make([]string, 0, len(mlw.ml.Results)) - - for i := range mlw.ml.Results { - rows = append(rows, mlw.content(mlw.ml.Results[i], mlw.ml.Totals)) - } - - return rows + n := len(mlw.ml.Results) + return wrapper.RowsFromGetter(n, func(i int) string { + return mlw.content(mlw.ml.Results[i], mlw.ml.Totals) + }) } // TotalRowContent returns all the totals func (mlw Wrapper) TotalRowContent() string { - return mlw.content(mlw.ml.Totals, mlw.ml.Totals) + return wrapper.TotalRowContent(mlw.ml.Totals, mlw.content) } // EmptyRowContent returns an empty string of data (for filling in) func (mlw Wrapper) EmptyRowContent() string { - var empty mutexlatency.Row - - return mlw.content(empty, empty) + return wrapper.EmptyRowContent(mlw.content) } // HaveRelativeStats is true for this object @@ -73,19 +76,15 @@ func (mlw Wrapper) LastCollectTime() time.Time { return mlw.ml.LastCollected } -// WantRelativeStats indiates if we want relative statistics +// WantRelativeStats indicates if we want relative statistics func (mlw Wrapper) WantRelativeStats() bool { return mlw.ml.WantRelativeStats() } // Description returns a description of the table func (mlw Wrapper) Description() string { - var count int - for row := range mlw.ml.Results { - if mlw.ml.Results[row].SumTimerWait > 0 { - count++ - } - } + n := len(mlw.ml.Results) + count := wrapper.CountIf(n, func(i int) bool { return mlw.ml.Results[i].SumTimerWait > 0 }) return fmt.Sprintf("Mutex Latency (events_waits_summary_global_by_event_name) %d rows", count) } @@ -107,13 +106,3 @@ func (mlw Wrapper) content(row, totals mutexlatency.Row) string { utils.FormatPct(utils.Divide(row.SumTimerWait, totals.SumTimerWait)), name) } - -type byLatency mutexlatency.Rows - -func (rows byLatency) Len() int { return len(rows) } -func (rows byLatency) Swap(i, j int) { rows[i], rows[j] = rows[j], rows[i] } - -// sort by value (descending) but also by "name" (ascending) if the values are the same -func (rows byLatency) Less(i, j int) bool { - return rows[i].SumTimerWait > rows[j].SumTimerWait -} diff --git a/wrapper/stageslatency/wrapper.go b/wrapper/stageslatency/wrapper.go index 1cd6e6f..ea4b7e8 100644 --- a/wrapper/stageslatency/wrapper.go +++ b/wrapper/stageslatency/wrapper.go @@ -4,12 +4,13 @@ package stageslatency import ( "database/sql" "fmt" - "sort" + "slices" "time" "github.com/sjmudd/ps-top/config" "github.com/sjmudd/ps-top/model/stageslatency" "github.com/sjmudd/ps-top/utils" + "github.com/sjmudd/ps-top/wrapper" ) // Wrapper wraps a Stages struct @@ -32,7 +33,14 @@ func (slw *Wrapper) ResetStatistics() { // Collect data from the db, then merge it in. func (slw *Wrapper) Collect() { slw.sl.Collect() - sort.Sort(byLatency(slw.sl.Results)) + + // order by SumeTimerWait (desc), Name + slices.SortFunc(slw.sl.Results, func(a, b stageslatency.Row) int { + return utils.SumTimerWaitNameOrdering( + utils.NewSumTimerWaitName(a.Name, a.SumTimerWait), + utils.NewSumTimerWaitName(b.Name, b.SumTimerWait), + ) + }) } // Headings returns the headings for a table @@ -43,36 +51,26 @@ func (slw Wrapper) Headings() string { // RowContent returns the rows we need for displaying func (slw Wrapper) RowContent() []string { - rows := make([]string, 0, len(slw.sl.Results)) - - for i := range slw.sl.Results { - rows = append(rows, slw.content(slw.sl.Results[i], slw.sl.Totals)) - } - - return rows + n := len(slw.sl.Results) + return wrapper.RowsFromGetter(n, func(i int) string { + return slw.content(slw.sl.Results[i], slw.sl.Totals) + }) } // TotalRowContent returns all the totals func (slw Wrapper) TotalRowContent() string { - return slw.content(slw.sl.Totals, slw.sl.Totals) + return wrapper.TotalRowContent(slw.sl.Totals, slw.content) } // EmptyRowContent returns an empty string of data (for filling in) func (slw Wrapper) EmptyRowContent() string { - var empty stageslatency.Row - - return slw.content(empty, empty) + return wrapper.EmptyRowContent(slw.content) } // Description describe the stages func (slw Wrapper) Description() string { - var count int - for row := range slw.sl.Results { - if slw.sl.Results[row].SumTimerWait > 0 { - count++ - } - } - + n := len(slw.sl.Results) + count := wrapper.CountIf(n, func(i int) bool { return slw.sl.Results[i].SumTimerWait > 0 }) return fmt.Sprintf("SQL Stage Latency (events_stages_summary_global_by_event_name) %d rows", count) } @@ -91,7 +89,7 @@ func (slw Wrapper) LastCollectTime() time.Time { return slw.sl.LastCollected } -// WantRelativeStats indiates if we want relative statistics +// WantRelativeStats indicates if we want relative statistics func (slw Wrapper) WantRelativeStats() bool { return slw.sl.WantRelativeStats() } @@ -109,14 +107,3 @@ func (slw Wrapper) content(row, totals stageslatency.Row) string { utils.FormatAmount(row.CountStar), name) } - -type byLatency stageslatency.Rows - -func (rows byLatency) Len() int { return len(rows) } -func (rows byLatency) Swap(i, j int) { rows[i], rows[j] = rows[j], rows[i] } - -// sort by value (descending) but also by "name" (ascending) if the values are the same -func (rows byLatency) Less(i, j int) bool { - return (rows[i].SumTimerWait > rows[j].SumTimerWait) || - ((rows[i].SumTimerWait == rows[j].SumTimerWait) && (rows[i].Name < rows[j].Name)) -} diff --git a/wrapper/tableiolatency/wrapper.go b/wrapper/tableiolatency/wrapper.go index 033992a..ac3a2b2 100644 --- a/wrapper/tableiolatency/wrapper.go +++ b/wrapper/tableiolatency/wrapper.go @@ -4,12 +4,13 @@ package tableiolatency import ( "database/sql" "fmt" - "sort" + "slices" "time" "github.com/sjmudd/ps-top/config" "github.com/sjmudd/ps-top/model/tableio" "github.com/sjmudd/ps-top/utils" + "github.com/sjmudd/ps-top/wrapper" ) // Wrapper represents the contents of the data collected related to tableio statistics @@ -38,55 +39,38 @@ func (tiolw *Wrapper) ResetStatistics() { func (tiolw *Wrapper) Collect() { tiolw.tiol.Collect() - // sort the results by latency (might be needed in other places) - sort.Sort(byLatency(tiolw.tiol.Results)) + // order by latency (SumTimerWait) descending, Name + slices.SortFunc(tiolw.tiol.Results, func(a, b tableio.Row) int { + return utils.SumTimerWaitNameOrdering( + utils.NewSumTimerWaitName(a.Name, a.SumTimerWait), + utils.NewSumTimerWaitName(b.Name, b.SumTimerWait), + ) + }) } // Headings returns the latency headings as a string func (tiolw Wrapper) Headings() string { - return fmt.Sprintf("%10s %6s|%6s %6s %6s %6s|%s", - "Latency", - "%", - "Fetch", - "Insert", - "Update", - "Delete", - "Table Name") + return wrapper.MakeTableIOHeadings("Latency") } // RowContent returns the rows we need for displaying func (tiolw Wrapper) RowContent() []string { - rows := make([]string, 0, len(tiolw.tiol.Results)) - - for i := range tiolw.tiol.Results { - rows = append(rows, tiolw.content(tiolw.tiol.Results[i], tiolw.tiol.Totals)) - } - - return rows + return wrapper.TableIORowContent(tiolw.tiol.Results, tiolw.tiol.Totals, tiolw.content) } // TotalRowContent returns all the totals func (tiolw Wrapper) TotalRowContent() string { - return tiolw.content(tiolw.tiol.Totals, tiolw.tiol.Totals) + return wrapper.TableIOTotalRowContent(tiolw.tiol.Totals, tiolw.content) } // EmptyRowContent returns an empty string of data (for filling in) func (tiolw Wrapper) EmptyRowContent() string { - var empty tableio.Row - - return tiolw.content(empty, empty) + return wrapper.TableIOEmptyRowContent(tiolw.content) } // Description returns a description of the table func (tiolw Wrapper) Description() string { - var count int - for row := range tiolw.tiol.Results { - if tiolw.tiol.Results[row].HasData() { - count++ - } - } - - return fmt.Sprintf("Table Latency (table_io_waits_summary_by_table) %d rows", count) + return wrapper.TableIODescription("Latency", tiolw.tiol.Results, func(r tableio.Row) bool { return r.HasData() }) } // HaveRelativeStats is true for this object @@ -109,7 +93,7 @@ func (tiolw Wrapper) WantRelativeStats() bool { return tiolw.tiol.WantRelativeStats() } -// latencyRowContents reutrns the printable result +// latencyRowContents returns the printable result func (tiolw Wrapper) content(row, totals tableio.Row) string { // assume the data is empty so hide it. name := row.Name @@ -126,17 +110,3 @@ func (tiolw Wrapper) content(row, totals tableio.Row) string { utils.FormatPct(utils.Divide(row.SumTimerDelete, row.SumTimerWait)), name) } - -// for sorting -type byLatency tableio.Rows - -// sort the tableio.Rows by latency -func (rows byLatency) Len() int { return len(rows) } -func (rows byLatency) Swap(i, j int) { rows[i], rows[j] = rows[j], rows[i] } - -// sort by value (descending) but also by "name" (ascending) if the values are the same -func (rows byLatency) Less(i, j int) bool { - return (rows[i].SumTimerWait > rows[j].SumTimerWait) || - ((rows[i].SumTimerWait == rows[j].SumTimerWait) && - (rows[i].Name < rows[j].Name)) -} diff --git a/wrapper/tableioops/wrapper.go b/wrapper/tableioops/wrapper.go index 43b71b3..c78ac26 100644 --- a/wrapper/tableioops/wrapper.go +++ b/wrapper/tableioops/wrapper.go @@ -2,24 +2,25 @@ package tableioops import ( - "fmt" - "sort" + "slices" "time" "github.com/sjmudd/ps-top/model/tableio" - "github.com/sjmudd/ps-top/utils" + "github.com/sjmudd/ps-top/wrapper" "github.com/sjmudd/ps-top/wrapper/tableiolatency" ) // Wrapper represents a wrapper around tableiolatency type Wrapper struct { - tiol *tableio.TableIo + tiol *tableio.TableIo + latency *tableiolatency.Wrapper } // NewTableIoOps creates a wrapper around TableIo, sharing the same connection with the tableiolatency wrapper func NewTableIoOps(latency *tableiolatency.Wrapper) *Wrapper { return &Wrapper{ - tiol: latency.Tiol(), + tiol: latency.Tiol(), + latency: latency, } } @@ -32,55 +33,47 @@ func (tiolw *Wrapper) ResetStatistics() { func (tiolw *Wrapper) Collect() { tiolw.tiol.Collect() - // sort the results by ops - sort.Sort(byOperations(tiolw.tiol.Results)) + // sort the results by ops == CountStar (descending), Name + slices.SortFunc(tiolw.tiol.Results, func(a, b tableio.Row) int { + if a.CountStar > b.CountStar { + return -1 + } + if a.CountStar < b.CountStar { + return 1 + } + if a.Name < b.Name { + return -1 + } + if a.Name > b.Name { + return 1 + } + return 0 + }) } // Headings returns the headings by operations as a string func (tiolw Wrapper) Headings() string { - return fmt.Sprintf("%10s %6s|%6s %6s %6s %6s|%s", - "Ops", - "%", - "Fetch", - "Insert", - "Update", - "Delete", - "Table Name") + return wrapper.MakeTableIOHeadings("Ops") } // RowContent returns the rows we need for displaying func (tiolw Wrapper) RowContent() []string { - rows := make([]string, 0, len(tiolw.tiol.Results)) - - for i := range tiolw.tiol.Results { - rows = append(rows, tiolw.content(tiolw.tiol.Results[i], tiolw.tiol.Totals)) - } - - return rows + return tiolw.latency.RowContent() } // TotalRowContent returns all the totals func (tiolw Wrapper) TotalRowContent() string { - return tiolw.content(tiolw.tiol.Totals, tiolw.tiol.Totals) + return tiolw.latency.TotalRowContent() } // EmptyRowContent returns an empty string of data (for filling in) func (tiolw Wrapper) EmptyRowContent() string { - var empty tableio.Row - - return tiolw.content(empty, empty) + return tiolw.latency.EmptyRowContent() } // Description returns a description of the table func (tiolw Wrapper) Description() string { - var count int - for row := range tiolw.tiol.Results { - if tiolw.tiol.Results[row].HasData() { - count++ - } - } - - return fmt.Sprintf("Table Ops (table_io_waits_summary_by_table) %d rows", count) + return tiolw.latency.Description() } // HaveRelativeStats is true for this object @@ -102,32 +95,3 @@ func (tiolw Wrapper) LastCollectTime() time.Time { func (tiolw Wrapper) WantRelativeStats() bool { return tiolw.tiol.WantRelativeStats() } - -// generate a printable result for ops -func (tiolw Wrapper) content(row, totals tableio.Row) string { - // assume the data is empty so hide it. - name := row.Name - if row.CountStar == 0 && name != "Totals" { - name = "" - } - - return fmt.Sprintf("%10s %6s|%6s %6s %6s %6s|%s", - utils.FormatAmount(row.CountStar), - utils.FormatPct(utils.Divide(row.CountStar, totals.CountStar)), - utils.FormatPct(utils.Divide(row.CountFetch, row.CountStar)), - utils.FormatPct(utils.Divide(row.CountInsert, row.CountStar)), - utils.FormatPct(utils.Divide(row.CountUpdate, row.CountStar)), - utils.FormatPct(utils.Divide(row.CountDelete, row.CountStar)), - name) -} - -// byOperations is used for sorting by the number of operations -type byOperations tableio.Rows - -func (rows byOperations) Len() int { return len(rows) } -func (rows byOperations) Swap(i, j int) { rows[i], rows[j] = rows[j], rows[i] } -func (rows byOperations) Less(i, j int) bool { - return (rows[i].CountStar > rows[j].CountStar) || - ((rows[i].SumTimerWait == rows[j].SumTimerWait) && - (rows[i].Name < rows[j].Name)) -} diff --git a/wrapper/tablelocklatency/wrapper.go b/wrapper/tablelocklatency/wrapper.go index c4908c1..11f6eec 100644 --- a/wrapper/tablelocklatency/wrapper.go +++ b/wrapper/tablelocklatency/wrapper.go @@ -4,12 +4,13 @@ package tablelocklatency import ( "database/sql" "fmt" - "sort" + "slices" "time" "github.com/sjmudd/ps-top/config" "github.com/sjmudd/ps-top/model/tablelocks" "github.com/sjmudd/ps-top/utils" + "github.com/sjmudd/ps-top/wrapper" ) // Wrapper wraps a TableLockLatency struct @@ -32,7 +33,14 @@ func (tlw *Wrapper) ResetStatistics() { // Collect data from the db, then merge it in. func (tlw *Wrapper) Collect() { tlw.tl.Collect() - sort.Sort(byLatency(tlw.tl.Results)) + + // order data by SumTimerWait (descending), Name + slices.SortFunc(tlw.tl.Results, func(a, b tablelocks.Row) int { + return utils.SumTimerWaitNameOrdering( + utils.NewSumTimerWaitName(a.Name, a.SumTimerWait), + utils.NewSumTimerWaitName(b.Name, b.SumTimerWait), + ) + }) } // Headings returns the headings for a table @@ -47,25 +55,20 @@ func (tlw Wrapper) Headings() string { // RowContent returns the rows we need for displaying func (tlw Wrapper) RowContent() []string { - rows := make([]string, 0, len(tlw.tl.Results)) - - for i := range tlw.tl.Results { - rows = append(rows, tlw.content(tlw.tl.Results[i], tlw.tl.Totals)) - } - - return rows + n := len(tlw.tl.Results) + return wrapper.RowsFromGetter(n, func(i int) string { + return tlw.content(tlw.tl.Results[i], tlw.tl.Totals) + }) } // TotalRowContent returns all the totals func (tlw Wrapper) TotalRowContent() string { - return tlw.content(tlw.tl.Totals, tlw.tl.Totals) + return wrapper.TotalRowContent(tlw.tl.Totals, tlw.content) } // EmptyRowContent returns an empty string of data (for filling in) func (tlw Wrapper) EmptyRowContent() string { - var empty tablelocks.Row - - return tlw.content(empty, empty) + return wrapper.EmptyRowContent(tlw.content) } // Description returns a description of the table @@ -90,7 +93,7 @@ func (tlw Wrapper) LastCollectTime() time.Time { return tlw.tl.LastCollected } -// WantRelativeStats indiates if we want relative statistics +// WantRelativeStats indicates if we want relative statistics func (tlw Wrapper) WantRelativeStats() bool { return tlw.tl.WantRelativeStats() } @@ -102,35 +105,38 @@ func (tlw Wrapper) content(row, totals tablelocks.Row) string { if row.SumTimerWait == 0 && name != "Totals" { name = "" } + timeStr, pctStr := wrapper.TimePct(row.SumTimerWait, totals.SumTimerWait) + pct := wrapper.PctStrings(row.SumTimerWait, + row.SumTimerRead, + row.SumTimerWrite, + row.SumTimerReadWithSharedLocks, + row.SumTimerReadHighPriority, + row.SumTimerReadNoInsert, + row.SumTimerReadNormal, + row.SumTimerReadExternal, + row.SumTimerWriteAllowWrite, + row.SumTimerWriteConcurrentInsert, + row.SumTimerWriteLowPriority, + row.SumTimerWriteNormal, + row.SumTimerWriteExternal) return fmt.Sprintf("%10s %6s|%6s %6s|%6s %6s %6s %6s %6s|%6s %6s %6s %6s %6s|%s", - utils.FormatTime(row.SumTimerWait), - utils.FormatPct(utils.Divide(row.SumTimerWait, totals.SumTimerWait)), - - utils.FormatPct(utils.Divide(row.SumTimerRead, row.SumTimerWait)), - utils.FormatPct(utils.Divide(row.SumTimerWrite, row.SumTimerWait)), - - utils.FormatPct(utils.Divide(row.SumTimerReadWithSharedLocks, row.SumTimerWait)), - utils.FormatPct(utils.Divide(row.SumTimerReadHighPriority, row.SumTimerWait)), - utils.FormatPct(utils.Divide(row.SumTimerReadNoInsert, row.SumTimerWait)), - utils.FormatPct(utils.Divide(row.SumTimerReadNormal, row.SumTimerWait)), - utils.FormatPct(utils.Divide(row.SumTimerReadExternal, row.SumTimerWait)), - - utils.FormatPct(utils.Divide(row.SumTimerWriteAllowWrite, row.SumTimerWait)), - utils.FormatPct(utils.Divide(row.SumTimerWriteConcurrentInsert, row.SumTimerWait)), - utils.FormatPct(utils.Divide(row.SumTimerWriteLowPriority, row.SumTimerWait)), - utils.FormatPct(utils.Divide(row.SumTimerWriteNormal, row.SumTimerWait)), - utils.FormatPct(utils.Divide(row.SumTimerWriteExternal, row.SumTimerWait)), + timeStr, + pctStr, + + pct[0], + pct[1], + + pct[2], + pct[3], + pct[4], + pct[5], + pct[6], + + pct[7], + pct[8], + pct[9], + pct[10], + pct[11], name) } - -type byLatency tablelocks.Rows - -func (t byLatency) Len() int { return len(t) } -func (t byLatency) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t byLatency) Less(i, j int) bool { - return (t[i].SumTimerWait > t[j].SumTimerWait) || - ((t[i].SumTimerWait == t[j].SumTimerWait) && - (t[i].Name < t[j].Name)) - -} diff --git a/wrapper/userlatency/wrapper.go b/wrapper/userlatency/wrapper.go index 693f799..43ed22a 100644 --- a/wrapper/userlatency/wrapper.go +++ b/wrapper/userlatency/wrapper.go @@ -4,12 +4,13 @@ package userlatency import ( "database/sql" "fmt" - "sort" + "slices" "time" "github.com/sjmudd/ps-top/config" "github.com/sjmudd/ps-top/model/userlatency" "github.com/sjmudd/ps-top/utils" + "github.com/sjmudd/ps-top/wrapper" ) // Wrapper wraps a UserLatency struct @@ -32,30 +33,47 @@ func (ulw *Wrapper) ResetStatistics() { // Collect data from the db, then sort the results. func (ulw *Wrapper) Collect() { ulw.ul.Collect() - sort.Sort(byTotalTime(ulw.ul.Results)) + + // order by TotalTime (descending), Connections (descending), Name + slices.SortFunc(ulw.ul.Results, func(a, b userlatency.Row) int { + if a.TotalTime() > b.TotalTime() { + return -1 + } + if a.TotalTime() < b.TotalTime() { + return 1 + } + if a.Connections > b.Connections { + return -1 + } + if a.Connections < b.Connections { + return 1 + } + if a.Username < b.Username { + return -1 + } + if a.Username > b.Username { + return 1 + } + return 0 + }) } // RowContent returns the rows we need for displaying func (ulw Wrapper) RowContent() []string { - rows := make([]string, 0, len(ulw.ul.Results)) - - for i := range ulw.ul.Results { - rows = append(rows, ulw.content(ulw.ul.Results[i], ulw.ul.Totals)) - } - - return rows + n := len(ulw.ul.Results) + return wrapper.RowsFromGetter(n, func(i int) string { + return ulw.content(ulw.ul.Results[i], ulw.ul.Totals) + }) } // TotalRowContent returns all the totals func (ulw Wrapper) TotalRowContent() string { - return ulw.content(ulw.ul.Totals, ulw.ul.Totals) + return wrapper.TotalRowContent(ulw.ul.Totals, ulw.content) } // EmptyRowContent returns an empty string of data (for filling in) func (ulw Wrapper) EmptyRowContent() string { - var empty userlatency.Row - - return ulw.content(empty, empty) + return wrapper.EmptyRowContent(ulw.content) } // HaveRelativeStats is true for this object @@ -73,7 +91,7 @@ func (ulw Wrapper) LastCollectTime() time.Time { return ulw.ul.LastCollected } -// WantRelativeStats indiates if we want relative statistics +// WantRelativeStats indicates if we want relative statistics func (ulw Wrapper) WantRelativeStats() bool { return ulw.ul.WantRelativeStats() } @@ -102,29 +120,18 @@ func (ulw Wrapper) content(row, totals userlatency.Row) string { utils.FormatPct(utils.Divide(row.Runtime, totals.Runtime)), formatSeconds(row.Sleeptime), utils.FormatPct(utils.Divide(row.Sleeptime, totals.Sleeptime)), - utils.FormatCounter(int(row.Connections), 4), - utils.FormatCounter(int(row.Active), 4), - utils.FormatCounter(int(row.Hosts), 5), - utils.FormatCounter(int(row.Dbs), 3), - utils.FormatCounter(int(row.Selects), 3), - utils.FormatCounter(int(row.Inserts), 3), - utils.FormatCounter(int(row.Updates), 3), - utils.FormatCounter(int(row.Deletes), 3), - utils.FormatCounter(int(row.Other), 3), + utils.FormatCounterU(row.Connections, 4), + utils.FormatCounterU(row.Active, 4), + utils.FormatCounterU(row.Hosts, 5), + utils.FormatCounterU(row.Dbs, 3), + utils.FormatCounterU(row.Selects, 3), + utils.FormatCounterU(row.Inserts, 3), + utils.FormatCounterU(row.Updates, 3), + utils.FormatCounterU(row.Deletes, 3), + utils.FormatCounterU(row.Other, 3), row.Username) } -// byTotalTime is for sorting rows by Runtime -type byTotalTime []userlatency.Row - -func (t byTotalTime) Len() int { return len(t) } -func (t byTotalTime) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t byTotalTime) Less(i, j int) bool { - return (t[i].TotalTime() > t[j].TotalTime()) || - ((t[i].TotalTime() == t[j].TotalTime()) && (t[i].Connections > t[j].Connections)) || - ((t[i].TotalTime() == t[j].TotalTime()) && (t[i].Connections == t[j].Connections) && (t[i].Username < t[j].Username)) -} - // formatSeconds formats the given seconds into xxh xxm xxs or xxd xxh xxm // for periods longer than 24h. If seconds is 0 return an empty string. // Leading 0 values are omitted.