diff --git a/.github/workflows/find-data-by-id-tests.yml b/.github/workflows/find-data-by-id-tests.yml index a8840c442..55b51a289 100644 --- a/.github/workflows/find-data-by-id-tests.yml +++ b/.github/workflows/find-data-by-id-tests.yml @@ -83,7 +83,7 @@ jobs: # Extract variables from the api_test_counts.txt file while IFS= read -r line; do echo "::set-output name=${line%=*}::${line#*=}" - done < find_data_by_id_test_counts_${{ matrix.environment }}.txt + done < find_data_by_id_tests_counts_${{ matrix.environment }}.txt - name: Archive test results id: artifact-upload-step diff --git a/.gitignore b/.gitignore index d3b95614a..3ab517b78 100644 --- a/.gitignore +++ b/.gitignore @@ -245,12 +245,19 @@ Features/DataDictionary/testCases/report.html Features/DataDictionary/testCases/slack_charts Features/DataDictionary/test_cases/report.html + #Ignoring compliled files of Powerbi_integration_exports Features/Powerbi_integration_exports/settings.cfg Features/Powerbi_integration_exports/report.html Features/Powerbi_integration_exports/test_cases/report.html Features/Powerbi_integration_exports/testCases/slack_charts +#Ignoring compliled files of Find by Data id +Features/FindDataById/testCases/test_cases/report.html +Features/FindDataById/testCases/testCases/slack_charts +Features/FindDataById/test_cases/report.html +Features/FindDataById/settings.cfg + #Ignoring compliled files of QA_Requests QA_Requests/BHAStressTest/settings.cfg QA_Requests/BHAStressTest/user_inputs/*.csv diff --git a/Features/FindDataById/README.md b/Features/FindDataById/README.md new file mode 100644 index 000000000..72855f8a3 --- /dev/null +++ b/Features/FindDataById/README.md @@ -0,0 +1,57 @@ +## Commcare Find Data by ID Test Script + +These tests ensure that the [Find Data by ID] (https://dimagi.atlassian.net/wiki/spaces/commcarepublic/pages/2143955380/Find+Data+by+ID)features work as expected and that there are no regressions +The automated tests comprises of [these Find Data by ID] (https://docs.google.com/spreadsheets/d/1w25sl855-Tc-MBlQWKR8XZRmgE2TCtW8VH4M24RBvKw/edit?gid=594000179#gid=594000179) +## Executing Scripts + +### On Local Machine + +#### Setting up test environment + +```sh + +# create and activate a virtualenv using your preferred method. Example: +python -m venv venv +source venv/bin/activate + + +# install requirements +pip install -r requires.txt + +``` + +[More on setting up virtual environments](https://confluence.dimagi.com/display/GTD/QA+and+Python+Virtual+Environments) + + +#### Running Tests + + + - Copy `settings-sample.cfg` to `settings.cfg` and populate `settings.cfg` for +the environment you want to test. +- Run tests using pytest command like: + +```sh + +# To execute all the test cases +pytest -v --rootdir= Features/FindDataById/testCases + +``` +- You could also pass the following arguments + - ` -n 3 --dist=loadfile` - This will run the tests parallelly in 3 instances. The number of reruns is configurable. + - ` --reruns 1` - This will re-run the tests once in case of failures.The number of reruns is configurable too. + +### Trigger Manually on Gitaction + +clone this repository + +To manually trigger the script, + - Go to [FindById action](https://github.com/dimagi/dimagi-qa/actions/find-data-by-id-tests.yml) + - Run workflow + - Use workflow from ```master``` + - Run! + +If you are a part of the QA team, you'll receive emails for the result of the run after it's complete. + +clone this repository + +Besides, you should be able to find the zipped results in the **Artifacts** section, of the corresponding run (after it's complete). diff --git a/Features/FindDataById/__init__.py b/Features/FindDataById/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/Features/FindDataById/requires.txt b/Features/FindDataById/requires.txt new file mode 100644 index 000000000..dd326737a --- /dev/null +++ b/Features/FindDataById/requires.txt @@ -0,0 +1,22 @@ +## Stores information about all the libraries, modules, and packages that are used in this project. + + +flake8>=3.8.4 +pandas>=1.2.2 +pytest +py>=1.10.0 +pytest-html>=3.1.1 +pytest-json-report +selenium == 4.11.0 +openpyxl +matplotlib >= 3.3.4 +pytest-rerunfailures +pytest-xdist +pytest-xdist[psutil] +pytest-order +requests +imap-tools +beautifulsoup4 +html5lib +pytest-metadata +pyotp >=2.6.0 diff --git a/Features/FindDataById/settings-sample.cfg b/Features/FindDataById/settings-sample.cfg new file mode 100644 index 000000000..b6bb402bb --- /dev/null +++ b/Features/FindDataById/settings-sample.cfg @@ -0,0 +1,12 @@ +[default] +# This is the environment url of commcare +url = https://www.commcarehq.org/ +# Login username of the webuser +login_username = +# Login password of the webuser +login_password = +# This is a preconfigured authentication key used for 2FA tests on staging - If 2FA enabled on staging. +staging_auth_key = +# This is a preconfigured authentication key used for 2FA tests on prod +prod_auth_key = + diff --git a/Features/FindDataById/testCases/__init__.py b/Features/FindDataById/testCases/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/Features/FindDataById/testCases/conftest.py b/Features/FindDataById/testCases/conftest.py new file mode 100644 index 000000000..78972b652 --- /dev/null +++ b/Features/FindDataById/testCases/conftest.py @@ -0,0 +1,297 @@ +import os + +from configparser import ConfigParser +from pathlib import Path +from common_utilities.fixtures import * + +""""This file provides fixture functions for driver initialization""" + +global driver + + +@pytest.fixture(scope="session") +def environment_settings_lookup(): + """Load settings from os.environ + + Names of environment variables: + DIMAGIQA_URL + DIMAGIQA_LOGIN_USERNAME + DIMAGIQA_LOGIN_PASSWORD + DIMAGIQA_MAIL_USERNAME + DIMAGIQA_MAIL_PASSWORD + + See https://docs.github.com/en/actions/reference/encrypted-secrets + for instructions on how to set them. + """ + settings = {} + for name in ["url", "login_username", "login_password", "mail_username", + "mail_password", "staging_auth_key", "prod_auth_key"]: + + var = f"DIMAGIQA_{name.upper()}" + if var in os.environ: + settings[name] = os.environ[var] + if "url" not in settings: + env = os.environ.get("DIMAGIQA_ENV") or "staging" + subdomain = "www" if env == "production" else env + # updates the url with the project domain while testing in CI + project = "a/qa-automation-prod" if env == "production" else "a/qa-automation" + settings["url"] = f"https://{subdomain}.commcarehq.org/{project}" + return settings + + +@pytest.fixture(scope="session", autouse=True) +def settings(environment_settings_lookup): + if os.environ.get("CI") == "true": + settings = environment_settings_lookup + settings["CI"] = "true" + if any(x not in settings for x in ["url", "login_username", "login_password", + "mail_username", "mail_password", "staging_auth_key", + "prod_auth_key"]): + lines = environment_settings_lookup.__doc__.splitlines() + vars_ = "\n ".join(line.strip() for line in lines if "DIMAGIQA_" in line) + raise RuntimeError( + f"Environment variables not set:\n {vars_}\n\n" + "See https://docs.github.com/en/actions/reference/encrypted-secrets " + "for instructions on how to set them." + ) + return settings + path = Path(__file__).parent.parent / "settings.cfg" + if not path.exists(): + raise RuntimeError( + f"Not found: {path}\n\n" + "Copy settings-sample.cfg to settings.cfg and populate " + "it with values for the environment you want to test." + ) + settings = ConfigParser() + settings.read(path) + # updates the url with the project domain while testing in local + if settings["default"]["url"] == "https://www.commcarehq.org/": + settings["default"]["url"] = f"{settings['default']['url']}a/qa-automation-prod" + else: + settings["default"]["url"] = f"{settings['default']['url']}a/qa-automation" + return settings["default"] + +def pytest_terminal_summary(terminalreporter, exitstatus, config): + # Collect test counts + passed = terminalreporter.stats.get('passed', []) + failed = terminalreporter.stats.get('failed', []) + error = terminalreporter.stats.get('error', []) + skipped = terminalreporter.stats.get('skipped', []) + xfail = terminalreporter.stats.get('xfail', []) + + env = os.environ.get("DIMAGIQA_ENV", "default_env") + + # Define the filename based on the environment + filename = f'find_data_by_id_tests_counts_{env}.txt' + + # Write the counts to a file + with open(filename, 'w') as f: + f.write(f'PASSED={len(passed)}\n') + f.write(f'FAILED={len(failed)}\n') + f.write(f'ERROR={len(error)}\n') + f.write(f'SKIPPED={len(skipped)}\n') + f.write(f'XFAIL={len(xfail)}\n') + +# conftest.py +import pytest +import matplotlib.pyplot as plt +import base64 +from io import BytesIO + +_test_stats = {} + +def pytest_sessionfinish(session, exitstatus): + """Collect stats at the end of the test session.""" + tr = session.config.pluginmanager.get_plugin("terminalreporter") + global _test_stats + _test_stats = { + "passed": len(tr.stats.get("passed", [])), + "failed": len(tr.stats.get("failed", [])), + "skipped": len(tr.stats.get("skipped", [])), + "error": len(tr.stats.get("error", [])), + "xfail": len(tr.stats.get("xfail", [])), + "reruns": len(tr.stats.get("rerun", [])), + } + save_summary_charts(_test_stats) + +import base64 + +def save_summary_charts(stats): + from pathlib import Path + out_dir = Path("slack_charts") + out_dir.mkdir(exist_ok=True) + + passed = stats.get("passed", 0) + failed = stats.get("failed", 0) + skipped = stats.get("skipped", 0) + reruns = stats.get("reruns", 0) + + # --- Pie chart with legend --- + pie_labels = ["Passed", "Failed", "Skipped"] + pie_sizes = [passed, failed, skipped] + pie_colors = ["#66bb6a", "#ef5350", "#fad000"] + + fig, ax = plt.subplots() + wedges, texts = ax.pie( + pie_sizes, + labels=None, + colors=pie_colors, + startangle=90, + wedgeprops=dict(width=0.4) + ) + ax.axis("equal") + ax.set_title("Test Summary") + + # Add legend with counts + ax.legend( + [f"Passed: {passed}", f"Failed: {failed}", f"Skipped: {skipped}"], + loc="lower center", + ncol=3, + bbox_to_anchor=(0.5, -0.15) + ) + + fig.savefig(out_dir / "summary_pie.png", bbox_inches="tight") + plt.close(fig) + + # --- Bar chart with labels + legend --- + bar_path = None + if failed > 0 or reruns > 0: # ✅ only generate if needed + fig, ax = plt.subplots() + bars = ax.bar( + ["Failed", "Reruns"], + [failed, reruns], + color=["#ef5350", "#ffa726"] + ) + ax.set_ylabel("Number of Tests") + ax.set_title("Failures and Reruns") + + # Add counts above bars + for bar in bars: + height = bar.get_height() + ax.text( + bar.get_x() + bar.get_width() / 2, + height + 0.05, + str(int(height)), + ha="center", + va="bottom", + fontsize=10, + fontweight="bold" + ) + + # Legend with counts + ax.legend( + [f"Failed: {failed}", f"Reruns: {reruns}"], + loc="lower center", + ncol=2, + bbox_to_anchor=(0.5, -0.15) + ) + + bar_path = out_dir / "summary_bar.png" + fig.savefig(bar_path, bbox_inches="tight") + plt.close(fig) + + # --- Combine --- + combine_charts( + pie_path=out_dir / "summary_pie.png", + bar_path=bar_path, + combined_path=out_dir / "summary_combined.png" + ) + + +import matplotlib.pyplot as plt +from PIL import Image + +def combine_charts(pie_path="slack_charts/summary_pie.png", + bar_path=None, + combined_path="slack_charts/summary_combined.png"): + """Combine pie and bar charts side by side if bar exists, else only pie.""" + from PIL import Image + + pie = Image.open(pie_path) + + if bar_path and Path(bar_path).exists(): + bar = Image.open(bar_path) + bar = bar.resize((bar.width * pie.height // bar.height, pie.height)) + combined = Image.new("RGB", (pie.width + bar.width, pie.height), (255, 255, 255)) + combined.paste(pie, (0, 0)) + combined.paste(bar, (pie.width, 0)) + else: + # Only pie chart + combined = pie.copy() + + combined.save(combined_path) + print(f"✅ Combined chart saved to {combined_path}") + + + +def _matplotlib_img(fig) -> str: + """Convert a matplotlib figure to base64 string.""" + buf = BytesIO() + plt.tight_layout() + fig.savefig(buf, format="png") + plt.close(fig) + buf.seek(0) + return base64.b64encode(buf.read()).decode("utf-8") + +def pytest_html_results_summary(prefix, summary, postfix, session): + """Inject donut pie + bar chart with reruns support (parallel-safe).""" + tr = session.config.pluginmanager.get_plugin("terminalreporter") + stats = tr.stats if tr and hasattr(tr, "stats") else {} + + passed = len(stats.get("passed", [])) + failed = len(stats.get("failed", [])) + skipped = len(stats.get("skipped", [])) + + # Reruns are recorded separately by pytest-rerunfailures + reruns = len(stats.get("rerun", [])) + + # --- Donut Pie Chart (Passed, Failed, Skipped) --- + pie_labels = ["Passed", "Failed", "Skipped"] + pie_sizes = [passed, failed, skipped] + pie_colors = ["#66bb6a", "#ef5350", "#fad000"] + + fig, ax = plt.subplots() + wedges, texts = ax.pie( + pie_sizes, + labels=None, + colors=pie_colors, + startangle=90, + wedgeprops=dict(width=0.4) + ) + ax.axis("equal") + + # Legend below the donut + plt.legend( + wedges, + [f"{l}: {v}" for l, v in zip(pie_labels, pie_sizes)], + title="Results", + loc="upper center", + bbox_to_anchor=(0.5, -0.08), + ncol=len(pie_labels) + ) + pie_img = _matplotlib_img(fig) + + # --- Bar Chart (Failures + Reruns) --- + bar_img = None + if failed > 0 or reruns > 0: + fig, ax = plt.subplots() + bars = ax.bar(["Failed", "Reruns"], [failed, reruns], color=["#ef5350", "#ff9933"]) + ax.set_title("Failures and Reruns") + ax.set_ylabel("Number of Tests") + plt.legend( + bars, + [f"Failed: {failed}", f"Reruns: {reruns}"], + loc="upper center", + bbox_to_anchor=(0.5, -0.12), + ncol=2 + ) + bar_img = _matplotlib_img(fig) + + # --- Embed in HTML report --- + html = "
" + html += f"

Test Summary

" + if bar_img: + html += f"

Failures and Reruns

" + html += "
" + + summary.append(html) diff --git a/Features/FindDataById/testCases/test_01_find_data_by_id_tests.py b/Features/FindDataById/testCases/test_01_find_data_by_id_tests.py new file mode 100644 index 000000000..4eddbefb5 --- /dev/null +++ b/Features/FindDataById/testCases/test_01_find_data_by_id_tests.py @@ -0,0 +1,76 @@ +import pytest + +from HQSmokeTests.testPages.home.home_page import HomePage +from Features.FindDataById.testPages.data.find_data_page import FindDataPage +from HQSmokeTests.testPages.webapps.web_apps_page import WebAppsPage +from HQSmokeTests.testPages.reports.report_page import ReportPage +from Features.FindDataById.userInputs.user_inputs import UserData + +""""Contains test cases related to the Data module""" + +values = dict() + + +def test_case_01_verify_page_ui(driver,settings): + home = HomePage(driver, settings) + page = FindDataPage(driver) + home.data_menu() + page.find_data_by_id_page_ui() + +def test_case_02_verify_invalid_ids(driver,settings): + home = HomePage(driver, settings) + page = FindDataPage(driver) + home.data_menu() + page.find_data_by_id_page_ui() + page.search_invalid_ids("case") + page.search_invalid_ids("form") + +def test_case_03_validating_export_page(driver,settings): + home = HomePage(driver, settings) + page = FindDataPage(driver) + home.data_menu() + page.find_data_by_id_page_ui() + page.verify_data_exports_link("case") + page.find_data_by_id_page_ui() + page.verify_data_exports_link("form") + + +def test_case_04_finding_case_form_ids(driver,settings): + home = HomePage(driver, settings) + webapps = WebAppsPage(driver) + load = ReportPage(driver) + page = FindDataPage(driver) + driver.refresh() + home.web_apps_menu() + case_name = webapps.submit_case_form() + webapps.verify_apps_presence() + home.reports_menu() + case_id_value= load.verify_form_data_submit_history(case_name, settings['login_username'], "case", UserData.reassign_cases_app_data) + form_id_value = load.verify_form_data_submit_history(case_name, settings['login_username'], "form", UserData.reassign_cases_app_data ) + #user_id_value= load.verify_form_data_submit_history(case_name, settings['login_username'], "user", UserData.reassign_cases_app_data) + home.data_menu() + page.find_data_by_id_page_ui() + page.validate_web_user_location_group_data_pages("case","location",case_id_value) + page.find_data_by_id_page_ui() + page.validate_web_user_location_group_data_pages("case","location",form_id_value) + #page.find_data_by_id_page_ui() + #page.validate_web_user_location_group_data_pages("case","location",user_id_value) + + +def test_case_05_validating_correct_data_pages(driver,settings): + home = HomePage(driver, settings) + page = FindDataPage(driver) + home.data_menu() + page.find_data_by_id_page_ui() + page.validate_web_user_location_group_data_pages("case","location") + page.find_data_by_id_page_ui() + page.validate_web_user_location_group_data_pages("case","group") + page.find_data_by_id_page_ui() + page.validate_web_user_location_group_data_pages("form","location") + page.find_data_by_id_page_ui() + page.validate_web_user_location_group_data_pages("form","group") + #page.find_data_by_id_page_ui() + #page.validate_web_user_location_group_data_pages("case","web_user") + #page.find_data_by_id_page_ui() + #page.validate_web_user_location_group_data_pages("form", "web_user") + diff --git a/Features/FindDataById/testPages/__init__.py b/Features/FindDataById/testPages/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/Features/FindDataById/testPages/data/__init__.py b/Features/FindDataById/testPages/data/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/Features/FindDataById/testPages/data/find_data_page.py b/Features/FindDataById/testPages/data/find_data_page.py new file mode 100644 index 000000000..01c27bf36 --- /dev/null +++ b/Features/FindDataById/testPages/data/find_data_page.py @@ -0,0 +1,94 @@ + +import time +from selenium.webdriver.common.by import By +from Features.FindDataById.userInputs.user_inputs import UserData +from common_utilities.selenium.base_page import BasePage + + +""""Contains test page elements and functions related to the Lookup Table module""" + +class FindDataPage(BasePage): + + def __init__(self, driver): + super().__init__(driver) + + #FindDataById + self.find_data_by_id= (By.LINK_TEXT, "Find Data by ID") + self.find_id= "//*[@placeholder='{} ID']" + self.find_button = "//fieldset[@id='find-{}']//button[@type='button'][normalize-space()='Find']" + self.error_message ="//div[contains(text(),'Could not find {}')]" + self.export_link = "//a[normalize-space()='{} data export']" + self.export_page ="//h1[normalize-space()='Export {} Data']" + self.case_id_found = (By.XPATH,"//a[normalize-space()='View']") + self.view = "//fieldset[@id='{}']//a[contains(text(),'View')]" + + # Find_Data_By_Id_Form_Level + self.case_change = (By.XPATH, "//a[normalize-space()='Case Changes']") + self.form_metadata = (By.XPATH, "//a[normalize-space()='Form Metadata']") + self.form_properties = (By.XPATH, "//a[normalize-space()='Form Properties']") + self.properties = "//a[normalize-space()='{}']" + self.id_values = "//dt[@title='{}']//following-sibling::dd[1]" + self.view_button = (By.XPATH, "//a[normalize-space()='View']") + + + def find_data_by_id_page_ui(self): + self.wait_to_click(self.find_data_by_id) + assert self.is_present_and_displayed((By.XPATH,self.find_id.format('Case'))) , "find case field is displayed" + assert self.is_present_and_displayed((By.XPATH,self.find_id.format('Form Submission'))) ,"find form submission field is displayed" + + def search_invalid_ids(self,submission_type): + if submission_type =='case': + self.is_present_and_displayed((By.XPATH, self.find_id.format('Case'))) + self.wait_to_clear_and_send_keys((By.XPATH,self.find_id.format('Case')),UserData.invalid_id) + self.js_click((By.XPATH,self.find_button.format('case'))) + assert self.is_present_and_displayed((By.XPATH,self.error_message.format('case'))) + "Could not find case submission" + elif submission_type == 'form': + self.is_present_and_displayed((By.XPATH, self.find_id.format('Form Submission'))) + self.wait_to_clear_and_send_keys((By.XPATH,self.find_id.format('Form Submission')),UserData.invalid_id) + self.js_click((By.XPATH, self.find_button.format('form'))) + assert self.is_present_and_displayed((By.XPATH,self.error_message.format('form submission'))) + "Could not find form submission" + + def verify_data_exports_link(self,value): + if value == 'case': + self.js_click((By.XPATH, self.export_link.format(value))) + assert self.is_present_and_displayed((By.XPATH,self.export_page.format(str(value).capitalize()))) + "Case export page opened" + elif value =='form': + self.js_click((By.XPATH, self.export_link.format(value))) + assert self.is_present_and_displayed((By.XPATH,self.export_page.format(str(value).capitalize()))) + "Form export page opened" + + + def validate_web_user_location_group_data_pages(self,value_type,id_type,case_data=None): + id_map = { + "web_user": 0, + "location": 1, + "group": 2 + } + if id_type not in id_map: + raise ValueError(f"Invalid id_type '{id_type}'. Must be one of {list(id_map.keys())}.") + value = "Case" if value_type == "case" else "Form Submission" + url = self.get_current_url() + env = "staging" if "staging" in url else "prod" + user_id = str(UserData.user_details[env][id_map[id_type]]) if case_data is None else case_data + self.click((By.XPATH, self.find_id.format(value))) + self.send_keys((By.XPATH, self.find_id.format(value)), user_id) + self.js_click((By.XPATH, self.find_button.format(value_type))) + text = self.get_attribute(self.view_button, "href") + print(text) + assert user_id in text, f"{user_id} not found in URL: {text}" + print(f"[PASS] {id_type} ID '{user_id}' is present in {text}") + self.js_click(self.view_button) + time.sleep(5) + self.switch_to_next_tab() + time.sleep(5) + current_url = self.get_current_url() + assert user_id in current_url, f"{user_id} not found in new tab URL: {current_url}" + print(f"[PASS] {id_type} ID '{user_id}' is present in new tab URL {current_url}") + self.driver.close() + time.sleep(2) + self.switch_back_to_prev_tab() + + diff --git a/Features/FindDataById/userInputs/__init__.py b/Features/FindDataById/userInputs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/Features/FindDataById/userInputs/user_inputs.py b/Features/FindDataById/userInputs/user_inputs.py new file mode 100644 index 000000000..af807569f --- /dev/null +++ b/Features/FindDataById/userInputs/user_inputs.py @@ -0,0 +1,26 @@ +""""Contains test data that are used as user inputs across various areasn in CCHQ""" +import os +import random +import string + +from common_utilities.generate_random_string import fetch_random_string +from common_utilities.path_settings import PathSettings + + +class UserData: + + + """User Test Data""" + location_name = 'value="Test Location [DO NOT DELETE!!!]"' + invalid_id = 'barff57e-c455-4887-bca6-c43b29e82dde' + # [userid ,location_id, group_id] + user_details = {"prod": ['06023ed31ef7c7af67ce1526f47021c6', 'deee331b540d4211be7f95bb66e79578', + '8e4ee9b45a2a097ef41b43e1f47b2f01'], + "staging": ['7ed7f6e897b973005cb9d142c12ecdfd', '72f473f6cb324667967f8113ed6b6d79', + '6a28737916f8bd3098ae71b23060cc54'] + } + reassign_cases_app_data = { + "app_name": "Reassign Cases", + "case_list_name": "Case List", + "form_name": "Registration Form" + } \ No newline at end of file diff --git a/HQSmokeTests/testPages/reports/report_page.py b/HQSmokeTests/testPages/reports/report_page.py index fb7113fbe..db83092ed 100644 --- a/HQSmokeTests/testPages/reports/report_page.py +++ b/HQSmokeTests/testPages/reports/report_page.py @@ -266,6 +266,11 @@ def __init__(self, driver): self.result_table = (By.XPATH, "(//div[@id='report-content']//table//tbody//td[1])[1]") self.users_list_item = "//ul[@role='listbox']/li[contains(.,'{}')]" + # Find_Data_By_Id + self.properties = "//a[normalize-space()='{}']" + self.form_properties = "//a[normalize-space()='Form Properties']" + self.id_values = "//dt[@title='{}']//following-sibling::dd[1]" + def check_if_report_loaded(self): try: self.wait_to_click(self.apply_id) @@ -536,7 +541,7 @@ def verify_table_not_empty(self, locator): print("No rows are present in the web table") return False - def verify_form_data_submit_history(self, case_name, username): + def verify_form_data_submit_history(self, case_name, username,type_value=None,app_config=None): print("Sleeping for sometime for the case to get registered.") time.sleep(90) self.wait_to_click(self.submit_history_rep) @@ -546,9 +551,9 @@ def verify_form_data_submit_history(self, case_name, username): self.wait_to_click(self.users_box) self.send_keys(self.search_user, username) self.wait_to_click((By.XPATH, self.app_user_select.format(username))) - self.select_by_text(self.application_select, UserData.reassign_cases_application) - self.select_by_text(self.module_select, UserData.case_list_name) - self.select_by_text(self.form_select, UserData.form_name) + self.select_by_text(self.application_select, app_config['app_name']) + self.select_by_text(self.module_select, app_config['case_list_name']) + self.select_by_text(self.form_select, app_config['form_name']) date_range = self.get_todays_date_range() self.clear(self.date_input) self.send_keys(self.date_input, date_range + Keys.TAB) @@ -561,11 +566,24 @@ def verify_form_data_submit_history(self, case_name, username): print("View Form Link: ", form_link) # self.switch_to_new_tab() self.driver.get(form_link) + if type_value == 'case': + self.wait_to_click((By.XPATH, self.properties.format('Case Changes'))) + value_id = self.get_text((By.XPATH, self.id_values.format('@case_id'))) + new_value = str(value_id).strip() + return new_value + elif type_value == 'user': + user_id = self.get_text((By.XPATH, self.id_values.format('@user_id'))) + user_id_value = str(user_id).strip() + return user_id_value + elif type_value == 'form': + self.wait_to_click((By.XPATH, self.properties.format('Form Metadata'))) + form_id = self.get_text((By.XPATH, self.id_values.format('instanceID'))) + form_id_value = str(form_id).strip() + return form_id_value time.sleep(3) + self.wait_to_click((By.XPATH, self.properties.format('Form Properties'))) self.page_source_contains(case_name) assert True, "Case name is present in Submit history" - # self.driver.close() - # self.switch_back_to_prev_tab() self.driver.back() def verify_form_data_case_list(self, case_name, username): diff --git a/HQSmokeTests/testPages/users/roles_permissions_page.py b/HQSmokeTests/testPages/users/roles_permissions_page.py index 432fd06c1..d8f6158f3 100644 --- a/HQSmokeTests/testPages/users/roles_permissions_page.py +++ b/HQSmokeTests/testPages/users/roles_permissions_page.py @@ -51,6 +51,9 @@ def __init__(self, driver, settings): self.edit_data = (By.XPATH, "//div[@id='user-roles-table']/div[@class='panel-body']/div[@class='modal fade in']/div[@class='modal-dialog']/form/div[@class='modal-content']/div[@class='modal-body']/div[@class='form form-horizontal']/fieldset/div[3]/div[@class='form-group'][7]/div[@class='col-sm-2 controls'][1]/div[@class='form-check']/label") self.view_data_dictionary = (By.XPATH, "//input[@id='view-data-dict-checkbox']") self.edit_data_dictionary = (By.XPATH, "//input[@id='edit-data-dict-checkbox']") + self.edit_data = (By.XPATH, "//input[@id='edit-data-checkbox']") + self.view_data_dictionary = (By.XPATH, "//input[@id='view-data-dict-checkbox']") + self.edit_data_dictionary = (By.XPATH, "//input[@id='edit-data-dict-checkbox']") self.web_user_permission = "//th[./span[.='{}']]//following-sibling::td/div[contains(@data-bind,'edit_web_users')]/i[contains(@class,'check')]" self.mobile_worker_permission = "//th[./span[.='{}']]//following-sibling::td/div[contains(@data-bind,'edit_commcare_users')]/i[contains(@class,'check')]" @@ -67,7 +70,7 @@ def add_role(self): self.scroll_to_element(self.save_button) time.sleep(0.5) self.wait_to_click(self.save_button) - + assert self.is_present_and_displayed(self.role_created), "Role not added successfully!" def edit_role(self): @@ -77,9 +80,9 @@ def edit_role(self): self.scroll_to_element(self.save_button) time.sleep(0.5) self.wait_to_click(self.save_button) - + assert self.is_present_and_displayed(self.role_renamed), "Role not edited successfully!" - + def cleanup_role(self): self.wait_to_click(self.delete_role) @@ -97,7 +100,7 @@ def delete_test_roles(self): "(//th[.//span[contains(text(),'role_')]]//following-sibling::td//button[@class='btn btn-danger'])[" + str( i + 1) + "]").click() self.wait_to_click(self.confirm_role_delete) - + list_profile = self.driver.find_elements(By.XPATH, "//th[.//span[contains(text(),'role_')]]") else: print("There are no test roles") @@ -118,7 +121,7 @@ def delete_test_roles(self): "(//th[.//span[contains(text(),'role_')]]//following-sibling::td//button[@class='btn btn-danger'])[" + str( i + 1) + "]").click() self.wait_to_click(self.confirm_role_delete) - + list_profile = self.driver.find_elements(By.XPATH, "//th[.//span[contains(text(),'role_')]]") else: print("There are no test roles") @@ -127,7 +130,9 @@ def delete_test_roles(self): def add_non_admin_role(self): self.wait_to_click(self.add_new_role) - self.wait_to_clear_and_send_keys(self.role_name, self.role_non_admin_created) + self.wait_for_element(self.role_name) + self.send_keys(self.role_name, self.role_non_admin_created) + self.wait_to_click(self.edit_mobile_worker_checkbox) self.scroll_to_element(self.access_all_reports_checkbox) is_checked = self.get_attribute(self.access_all_reports_checkbox, 'checked') @@ -150,7 +155,7 @@ def add_non_admin_role(self): self.scroll_to_element(self.save_button) time.sleep(0.5) self.wait_to_click(self.save_button) - + assert self.is_present_and_displayed(self.role_non_admin), "Role not added successfully!" return self.role_non_admin_created @@ -158,19 +163,84 @@ def add_non_admin_role_dd(self, value): self.wait_to_click(self.add_new_role) self.wait_to_clear_and_send_keys(self.role_name, self.role_non_admin_created) time.sleep(1) - self.js_click(self.edit_data,5) - self.js_click(self.view_data_dictionary) + self.click(self.edit_data,5) + self.click(self.view_data_dictionary) if value == 1: print("only view access selected") elif value ==2: time.sleep(5) - self.js_click(self.edit_data_dictionary) + self.click(self.edit_data_dictionary) else: - self.js_click(self.view_data_dictionary) - self.js_click(self.edit_data_dictionary) - self.js_click(self.edit_data_dictionary) + self.click(self.view_data_dictionary) + self.click(self.edit_data_dictionary) + self.click(self.edit_data_dictionary) + print("Role added successfully") + return self.role_non_admin_created + + + def add_shared_export_role(self, name, flag='NO'): + self.wait_to_click(self.add_new_role) + self.wait_to_clear_and_send_keys(self.role_name, name) + time.sleep(1) + self.click(self.edit_mobile_worker_checkbox) + time.sleep(0.5) + self.click(self.edit_web_user_checkbox) + time.sleep(0.5) + self.click(self.data_checkbox) + time.sleep(0.5) + self.scroll_to_element(self.manage_shared_exports) + if flag == 'YES': + time.sleep(2) + self.click(self.manage_shared_exports) + time.sleep(2) + time.sleep(0.5) + self.scroll_to_element(self.access_all_reports_checkbox) + time.sleep(1) + self.click(self.access_all_reports_checkbox) + time.sleep(0.5) self.scroll_to_element(self.save_button) time.sleep(0.5) - self.js_click(self.save_button) + self.click(self.save_button) time.sleep(2) + assert self.is_present_and_displayed((By.XPATH, self.role_no_shared_export.format(name))), "Role not added successfully!" + assert self.is_present_and_displayed((By.XPATH, self.web_user_permission.format(name))), "Web User Permission not present" + assert self.is_present_and_displayed((By.XPATH, self.mobile_worker_permission.format(name))), "Mobile Worker Permission not present" + if flag == "NO": + assert not self.is_present_and_displayed((By.XPATH, self.managed_shared_export_permission.format(name)), 5), "Shared Export Permission is present" + else: + assert self.is_present_and_displayed((By.XPATH, self.managed_shared_export_permission.format(name))), "Shared Export Permission not present" return self.role_non_admin_created + + + + def add_shared_export_role(self, name, flag='NO'): + self.wait_to_click(self.add_new_role) + self.wait_to_clear_and_send_keys(self.role_name, name) + time.sleep(1) + self.click(self.edit_mobile_worker_checkbox) + time.sleep(0.5) + self.click(self.edit_web_user_checkbox) + time.sleep(0.5) + self.click(self.data_checkbox) + time.sleep(0.5) + self.scroll_to_element(self.manage_shared_exports) + if flag == 'YES': + time.sleep(2) + self.click(self.manage_shared_exports) + time.sleep(2) + time.sleep(0.5) + self.scroll_to_element(self.access_all_reports_checkbox) + time.sleep(1) + self.click(self.access_all_reports_checkbox) + time.sleep(0.5) + self.scroll_to_element(self.save_button) + time.sleep(0.5) + self.click(self.save_button) + time.sleep(2) + assert self.is_present_and_displayed((By.XPATH, self.role_no_shared_export.format(name))), "Role not added successfully!" + assert self.is_present_and_displayed((By.XPATH, self.web_user_permission.format(name))), "Web User Permission not present" + assert self.is_present_and_displayed((By.XPATH, self.mobile_worker_permission.format(name))), "Mobile Worker Permission not present" + if flag == "NO": + assert not self.is_present_and_displayed((By.XPATH, self.managed_shared_export_permission.format(name)), 5), "Shared Export Permission is present" + else: + assert self.is_present_and_displayed((By.XPATH, self.managed_shared_export_permission.format(name))), "Shared Export Permission not present"