diff --git a/py-scripts/DeviceConfig.py b/py-scripts/DeviceConfig.py
index 5f0774f9d..184ddcb22 100755
--- a/py-scripts/DeviceConfig.py
+++ b/py-scripts/DeviceConfig.py
@@ -172,8 +172,7 @@ async def forget_all_networks(self, port_list=[]):
logger.info('Port list is empty')
return
- url = 'http://{}:{}/cli-json/clear_wifi_profiles'.format(self.lanforge_ip, self.port)
-
+ url = 'cli-json/clear_wifi_profiles'.format(self.lanforge_ip, self.port)
data_list = []
for port_data in port_list:
@@ -185,7 +184,6 @@ async def forget_all_networks(self, port_list=[]):
}
data_list.append(data)
logger.info(f"DATA LIST: {data_list}")
-
loop = asyncio.get_event_loop()
tasks = [loop.run_in_executor(None, self.json_post, url, data) for data in data_list]
await asyncio.gather(*tasks)
@@ -1844,4 +1842,4 @@ def get_device_data(port_key, resource_key, port_data, resource_data):
if args.csv_name == '':
obj.device_csv_file()
else:
- obj.device_csv_file(args.csv_name)
+ obj.device_csv_file(args.csv_name)
\ No newline at end of file
diff --git a/py-scripts/basebase.py b/py-scripts/basebase.py
new file mode 100644
index 000000000..0de3c865a
--- /dev/null
+++ b/py-scripts/basebase.py
@@ -0,0 +1,10043 @@
+import asyncio
+import importlib
+import datetime
+import time
+import requests
+# echo Performing POST cleanup of browser processes... & taskkill /F /IM chrome.exe /T >nul 2>&1 & taskkill /F /IM chromedriver.exe /T >nul 2>&1 & echo Browser processes terminated.
+# cmd /c "echo Performing POST cleanup of browser processes... && taskkill /F /IM chrome.exe /T >nul 2>&1 && taskkill /F /IM chromedriver.exe /T >nul 2>&1 && echo Browser processes terminated."
+import paramiko
+import threading
+import logging
+from lf_graph import lf_bar_graph_horizontal,lf_bar_graph,lf_line_graph
+import pandas as pd
+from lf_base_interop_profile import RealDevice
+from lf_ftp import FtpTest
+import lf_webpage as http_test
+import multiprocessing
+import lf_interop_qos as qos_test
+import lf_interop_ping as ping_test
+from lf_interop_throughput import Throughput
+from lf_interop_video_streaming import VideoStreamingTest
+# from lf_interop_real_browser_test import RealBrowserTest
+from test_l3 import L3VariableTime,change_port_to_ip,configure_reporting,query_real_clients,valid_endp_types
+from lf_kpi_csv import lf_kpi_csv
+import lf_cleanup
+import os
+import sys
+lf_kpi_csv = importlib.import_module("py-scripts.lf_kpi_csv")
+import argparse
+import json
+import traceback
+from types import SimpleNamespace
+import matplotlib
+import csv
+import matplotlib.pyplot as plt
+from pathlib import Path
+realm = importlib.import_module("py-json.realm")
+Realm = realm.Realm
+error_logs = ""
+# objj = "obj"
+test_results_df = pd.DataFrame(columns=['test_name', 'status'])
+matplotlib.use('Agg') # Before importing pyplot
+base_path = os.getcwd()
+print('base path',base_path)
+sys.path.insert(0, os.path.join(base_path, 'py-json')) # for interop_connectivity, LANforge
+sys.path.insert(0, os.path.join(base_path, 'py-json', 'LANforge')) # for LFUtils
+sys.path.insert(0, os.path.join(base_path, 'py-scripts')) # for lf_logger_config
+througput_test=importlib.import_module("py-scripts.lf_interop_throughput")
+video_streaming_test=importlib.import_module("py-scripts.lf_interop_video_streaming")
+web_browser_test=importlib.import_module("py-scripts.real_application_tests.real_browser.lf_interop_real_browser_test")
+zoom_test=importlib.import_module("py-scripts.real_application_tests.zoom_automation.lf_interop_zoom")
+yt_test=importlib.import_module("py-scripts.real_application_tests.youtube.lf_interop_youtube")
+lf_report_pdf = importlib.import_module("py-scripts.lf_report")
+lf_logger_config = importlib.import_module("py-scripts.lf_logger_config")
+logger = logging.getLogger(__name__)
+RealBrowserTest = getattr(web_browser_test, "RealBrowserTest")
+Youtube = getattr(yt_test, "Youtube")
+ZoomAutomation = getattr(zoom_test, "ZoomAutomation")
+DeviceConfig=importlib.import_module("py-scripts.DeviceConfig")
+# from py_scripts import lf_logger_config, interop_connectivity
+# Saved working directory and index state WIP on test_base_class: 3f3a21f5 minor change
+from lf_interop_ping import Ping
+# from LANforge.LFUtils import LFUtils
+import sys
+import os
+from multiprocessing import Manager
+manager = Manager()
+test_results_list = manager.list()
+# BASE PATH: /home/sidartha/project/lanforge-scripts
+# base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+
+# # Add py-json and LANforge to sys.path
+# sys.path.insert(0, os.path.join(base_path, 'py-json')) # for interop_connectivity
+# sys.path.insert(0, os.path.join(base_path, 'py-json', 'LANforge')) # for LFUtils
+# sys.path.insert(0, os.path.join(base_path, 'py-scripts')) # for lf_logger_config
+
+# import LFUtils
+# import lf_logger_config
+# import interop_connectivity
+if 'py-json' not in sys.path:
+ sys.path.append(os.path.join(os.path.abspath('..'), 'py-json'))
+
+
+if 'py-scripts' not in sys.path:
+ sys.path.append('/home/lanforge/lanforge-scripts/py-scripts')
+lf_report = importlib.import_module("py-scripts.lf_report")
+from station_profile import StationProfile
+import interop_connectivity
+from LANforge import LFUtils
+class Candela(Realm):
+ """
+ Candela Class file to invoke different scripts from py-scripts.
+ """
+
+ def __init__(self, ip='localhost', port=8080,order_priority="series",result_dir="",dowebgui=False,test_name=''):
+ """
+ Constructor to initialize the LANforge IP and port
+ Args:
+ ip (str, optional): LANforge IP. Defaults to 'localhost'.
+ port (int, optional): LANforge port. Defaults to 8080.
+ """
+ super().__init__(lfclient_host=ip,
+ lfclient_port=port)
+ self.lanforge_ip = ip
+ self.port = port
+ self.api_url = 'http://{}:{}'.format(self.lanforge_ip, self.port)
+ self.cleanup = lf_cleanup.lf_clean(host=self.lanforge_ip, port=self.port, resource='all')
+ self.ftp_test = None
+ self.http_test = None
+ self.generic_endps_profile = self.new_generic_endp_profile()
+ self.iterations_before_test_stopped_by_user=None
+ self.incremental_capacity_list=None
+ self.all_dataframes=None
+ self.to_run_cxs_len=None
+ self.date=None
+ self.test_setup_info=None
+ self.individual_df=None
+ self.cx_order_list=None
+ self.dataset2=None
+ self.dataset = None
+ self.lis = None
+ self.bands = None
+ self.total_urls = None
+ self.uc_min_value = None
+ self.cx_order_list = None
+ self.gave_incremental=None
+ self.result_path = os.getcwd()
+ self.test_count_dict = {}
+ self.current_exec = "series"
+ self.order_priority = order_priority
+ self.dowebgui = dowebgui
+ self.result_dir = result_dir
+ self.test_name = test_name
+ self.overall_csv = []
+ self.overall_status = {}
+ self.obj_dict = {}
+ self.test_stopped = False
+ self.duration_dict = {}
+ self.http_obj_dict = {"parallel":{},"series":{}}
+ self.ftp_obj_dict = {"parallel":{},"series":{}}
+ self.thput_obj_dict = {"parallel":{},"series":{}}
+ self.qos_obj_dict = {"parallel":{},"series":{}}
+ self.ping_obj_dict = {"parallel":{},"series":{}}
+ self.mcast_obj_dict = {"parallel":{},"series":{}}
+ self.rb_obj_dict = {"parallel":{},"series":{}}
+ self.yt_obj_dict = {"parallel":{},"series":{}}
+ self.zoom_obj_dict = {"parallel":{},"series":{}}
+ self.vs_obj_dict = {"parallel":{},"series":{}}
+ self.rb_obj_dict = manager.dict({
+ "parallel": manager.dict(),
+ "series": manager.dict()
+ })
+ # self.rb_obj_dict = manager.dict({
+ # "parallel": manager.dict(),
+ # "series": manager.dict()
+ # })
+ # self.rb_pipe_dict = {"parallel":{},"series":{}}
+ # self.yt_obj_dict = manager.dict({"parallel": {}, "series": {}})
+ # self.zoom_obj_dict = manager.dict({"parallel": {}, "series": {}})
+ self.parallel_connect = {}
+ self.series_connect = {}
+ self.parallel_index = 0
+ self.series_index = 0
+
+ def api_get(self, endp: str):
+ """
+ Sends a GET request to fetch data
+
+ Args:
+ endp (str): API endpoint
+
+ Returns:
+ response: response code for the request
+ data: data returned in the response
+ """
+ if endp[0] != '/':
+ endp = '/' + endp
+ response = requests.get(url=self.api_url + endp)
+ data = response.json()
+ return response, data
+
+ def api_post(self, endp: str, payload: dict):
+ """
+ Sends POST request
+
+ Args:
+ endp (str): API endpoint
+ payload (dict): Endpoint data in JSON format
+
+ Returns:
+ response: response code for the request
+ None if endpoint is invalid
+ """
+ if endp == '' or endp is None:
+ logger.info('Invalid endpoint specified.')
+ return False
+ if endp[0] != '/':
+ endp = '/' + endp
+ response = requests.post(url=self.api_url + endp, json=payload)
+ return response
+ def webgui_stop_check(self,test_name):
+ try:
+ print("ENTERED STOP CHECKKK")
+ with open(self.result_dir + "/../../Running_instances/{}_{}_running.json".format(self.lanforge_ip, self.test_name), 'r') as file:
+ data = json.load(file)
+ if data["status"] != "Running":
+ logging.info('Test is stopped by the user')
+ self.test_stopped = True
+ if not self.test_stopped:
+ print("ENTERED NOT STOPPED")
+ self.overall_status[test_name] = "started"
+ self.overall_status["time"] = datetime.datetime.now().strftime("%Y %d %H:%M:%S")
+ self.overall_status["current_mode"] = self.current_exec
+ self.overall_status["current_test_name"] = test_name
+ self.overall_csv.append(self.overall_status.copy())
+ df1 = pd.DataFrame(self.overall_csv)
+ df1.to_csv('{}/overall_status.csv'.format(self.result_dir), index=False)
+ except BaseException:
+ logger.info(f"Error while running for webui during {test_name} execution")
+ if self.test_stopped:
+ logger.info("test has been stopped by the user")
+ return False
+ return True
+
+ def webgui_test_done(self, test_name):
+ try:
+ self.overall_status[test_name] = "stopped"
+ self.overall_status["time"] = datetime.datetime.now().strftime("%Y %d %H:%M:%S")
+ self.overall_csv.append(self.overall_status.copy())
+ df1 = pd.DataFrame(self.overall_csv)
+ df1.to_csv('{}/overall_status.csv'.format(self.result_dir), index=False)
+ except Exception as e:
+ logger.info(e)
+
+ def port_clean_up(self,port_no):
+ print('port cleanup......')
+ time.sleep(5)
+ hostname = self.lanforge_ip
+ username = "root"
+ password = "lanforge"
+ ports = []
+ ports.append(port_no)
+ # ssh = paramiko.SSHClient()
+ # ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ # ssh.connect(hostname, username=username, password=password)
+
+ # for cmd in commands:
+ # print(f"--- Running: {cmd} ---")
+ # stdin, stdout, stderr = ssh.exec_command(cmd)
+ # print("Output:\n", stdout.read().decode())
+ # print("Errors:\n", stderr.read().decode())
+ # ssh.close()
+
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ ssh.connect(hostname, username=username, password=password)
+
+ # for port in ports:
+ # print(f"\n--- Checking port {port} ---")
+
+ # try:
+ # check_cmd = f"lsof -i :{port}"
+ # stdin, stdout, stderr = ssh.exec_command(check_cmd, timeout=10) # ⬅ timeout added
+ # output = stdout.read().decode().strip()
+ # error = stderr.read().decode().strip()
+
+ # if output:
+ # print(f"Processes using port {port}:\n{output}")
+
+ # # kill_cmd = f"fuser -kv {port}/tcp"
+ # # kill_cmd = f"fuser -k {port}/tcp"
+ # kill_cmd = f"fuser -k {port}/tcp || true"
+ # stdin, stdout, stderr = ssh.exec_command(kill_cmd, timeout=10)
+ # print("Kill Output:\n", stdout.read().decode())
+ # print("Kill Errors:\n", stderr.read().decode())
+ # else:
+ # print(f"No process found on port {port}")
+
+ # except Exception as e:
+ # print(f"Error checking port {port}: {e}")
+
+ for port in ports:
+ print(f"\n--- Checking port {port} ---")
+
+ try:
+ # Get only the PIDs of processes using this port
+ check_cmd = f"lsof -t -i:{port}"
+ stdin, stdout, stderr = ssh.exec_command(check_cmd, timeout=10)
+ pids = stdout.read().decode().strip().splitlines()
+
+ if pids:
+ print(f"Processes using port {port}: {', '.join(pids)}")
+
+ # Kill each PID safely
+ for pid in pids:
+ kill_cmd = f"kill -9 {pid}"
+ ssh.exec_command(kill_cmd, timeout=10)
+ print(f"Killed PID {pid} on port {port}")
+ else:
+ print(f"No process found on port {port}")
+
+ except Exception as e:
+ print(f"Error checking port {port}: {e}")
+
+ ssh.close()
+
+
+ def misc_clean_up(self,layer3=False,layer4=False,generic=False,port_5000=False,port_5002=False,port_5003=False):
+ """
+ Use for the cleanup of cross connections
+ arguments:
+ layer3: (Boolean : optional) Default : False To Delete all layer3 connections
+ layer4: (Boolean : optional) Default : False To Delete all layer4 connections
+ """
+ layer3 = False
+ layer4 = False
+ generic = False
+ if layer3:
+ self.cleanup.cxs_clean()
+ self.cleanup.layer3_endp_clean()
+ if layer4:
+ self.cleanup.layer4_endp_clean()
+ if generic:
+ resp = self.json_get('/generic?fields=name')
+ if 'endpoints' in resp:
+ for i in resp['endpoints']:
+ if list(i.values())[0]['name']:
+ self.generic_endps_profile.created_cx.append('CX_' + list(i.values())[0]['name'])
+ self.generic_endps_profile.created_endp.append(list(i.values())[0]['name'])
+ self.generic_endps_profile.cleanup()
+ # if port_5000 or port_5002 or port_5003:
+ # print('port cleanup......')
+ # time.sleep(5)
+ # hostname = self.lanforge_ip
+ # username = "root"
+ # password = "lanforge"
+ # ports = []
+ # if port_5003:
+ # ports.append(5003)
+ # if port_5000:
+ # ports.append(5000)
+ # if port_5002:
+ # ports.append(5002)
+ # # ssh = paramiko.SSHClient()
+ # # ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ # # ssh.connect(hostname, username=username, password=password)
+
+ # # for cmd in commands:
+ # # print(f"--- Running: {cmd} ---")
+ # # stdin, stdout, stderr = ssh.exec_command(cmd)
+ # # print("Output:\n", stdout.read().decode())
+ # # print("Errors:\n", stderr.read().decode())
+ # # ssh.close()
+
+ # ssh = paramiko.SSHClient()
+ # ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ # ssh.connect(hostname, username=username, password=password)
+
+ # # for port in ports:
+ # # print(f"\n--- Checking port {port} ---")
+
+ # # try:
+ # # check_cmd = f"lsof -i :{port}"
+ # # stdin, stdout, stderr = ssh.exec_command(check_cmd, timeout=10) # ⬅ timeout added
+ # # output = stdout.read().decode().strip()
+ # # error = stderr.read().decode().strip()
+
+ # # if output:
+ # # print(f"Processes using port {port}:\n{output}")
+
+ # # # kill_cmd = f"fuser -kv {port}/tcp"
+ # # # kill_cmd = f"fuser -k {port}/tcp"
+ # # kill_cmd = f"fuser -k {port}/tcp || true"
+ # # stdin, stdout, stderr = ssh.exec_command(kill_cmd, timeout=10)
+ # # print("Kill Output:\n", stdout.read().decode())
+ # # print("Kill Errors:\n", stderr.read().decode())
+ # # else:
+ # # print(f"No process found on port {port}")
+
+ # # except Exception as e:
+ # # print(f"Error checking port {port}: {e}")
+
+ # for port in ports:
+ # print(f"\n--- Checking port {port} ---")
+
+ # try:
+ # # Get only the PIDs of processes using this port
+ # check_cmd = f"lsof -t -i:{port}"
+ # stdin, stdout, stderr = ssh.exec_command(check_cmd, timeout=10)
+ # pids = stdout.read().decode().strip().splitlines()
+
+ # if pids:
+ # print(f"Processes using port {port}: {', '.join(pids)}")
+
+ # # Kill each PID safely
+ # for pid in pids:
+ # kill_cmd = f"kill -9 {pid}"
+ # ssh.exec_command(kill_cmd, timeout=10)
+ # print(f"Killed PID {pid} on port {port}")
+ # else:
+ # print(f"No process found on port {port}")
+
+ # except Exception as e:
+ # print(f"Error checking port {port}: {e}")
+
+ # ssh.close()
+
+ def get_device_info(self):
+ """
+ Fetches all the real devices clustered to the LANforge
+
+ Returns:
+ interop_tab_response: if invalid response code. Response code other than 200.
+ all_devices (dict): returns both the port data and resource mgr data with shelf.resource as the key
+ """
+ androids, linux, macbooks, windows, iOS = [], [], [], [], []
+ all_devices = {}
+
+ # querying resource manager tab for fetching laptops data
+ resource_manager_tab_response, resource_manager_data = self.api_get(
+ endp='/resource/all')
+ if resource_manager_tab_response.status_code != 200:
+ logger.info('Error fetching the data with the {}. Returned {}'.format(
+ '/resources/all', resource_manager_tab_response))
+ return resource_manager_tab_response
+ resources_list = [resource_manager_data['resource']
+ if 'resource' in resource_manager_data else resource_manager_data['resources']][0]
+ for resource in resources_list:
+ resource_port, resource_data = list(resource.keys())[
+ 0], list(resource.values())[0]
+ if resource_data['phantom']:
+ continue
+ if resource_data['ct-kernel'] is False:
+ if resource_data['app-id'] == '0':
+ if 'Win' in resource_data['hw version']:
+ windows.append(resource_data)
+ elif 'Apple' in resource_data['hw version']:
+ macbooks.append(resource_data)
+ elif 'Linux' in resource_data['hw version']:
+ linux.append(resource_data)
+ else:
+ if 'Apple' in resource_data['hw version']:
+ iOS.append(resource_data)
+ else:
+ androids.append(resource_data)
+ all_devices[resource_port] = resource_data
+ shelf, resource = resource_port.split('.')
+ _, port_data = self.api_get(endp='/port/{}/{}'.format(shelf, resource))
+ if 'interface' in port_data.keys():
+ port_data['interfaces'] = [port_data['interface']]
+ for port_id in port_data['interfaces']:
+ port_id_values = list(port_id.values())[0]
+ _, all_columns = self.api_get(endp=port_id_values['_links'])
+ all_columns = all_columns['interface']
+ if all_columns['parent dev'] == 'wiphy0':
+ all_devices[resource_port].update(all_columns)
+ return all_devices
+
+ def get_client_connection_details(self, device_list: list):
+ """
+ Method to return SSID, BSSID and Signal Strength details of the ports mentioned in the device list argument.
+
+ Args:
+ device_list (list): List of all the ports. E.g., ['1.10.wlan0', '1.11.wlan0']
+
+ Returns:
+ connection_details (dict): Dictionary containing port number as the key and SSID, BSSID, Signal as the values for each device in the device_list.
+ """
+ connection_details = {}
+ for device in device_list:
+ shelf, resource, port_name = device.split('.')
+ _, device_data = self.api_get('/port/{}/{}/{}?fields=phantom,down,ssid,ap,signal,mac'.format(shelf, resource, port_name))
+ device_data = device_data['interface']
+ if device_data['phantom'] or device_data['down']:
+ print('{} is in phantom state or down state, data may not be accurate.'.format(device))
+ connection_details[device] = device_data
+ return connection_details
+
+ def filter_iOS_devices(self, device_list):
+ modified_device_list = device_list
+ if type(device_list) is str:
+ modified_device_list = device_list.split(',')
+ filtered_list = []
+ for device in modified_device_list:
+ if device.count('.') == 1:
+ shelf, resource = device.split('.')
+ elif device.count('.') == 2:
+ shelf, resource, port = device.split('.')
+ elif device.count('.') == 0:
+ shelf, resource = 1, device
+ response_code, device_data = self.api_get('/resource/{}/{}'.format(shelf, resource))
+ if 'status' in device_data and device_data['status'] == 'NOT_FOUND':
+ print('Device {} is not found.'.format(device))
+ continue
+ device_data = device_data['resource']
+ # print(device_data)
+ if 'Apple' in device_data['hw version'] and (device_data['app-id'] != '') and (device_data['app-id'] != '0' or device_data['kernel'] == ''):
+ print('{} is an iOS device. Currently we do not support iOS devices.'.format(device))
+ else:
+ filtered_list.append(device)
+ if type(device_list) is str:
+ filtered_list = ','.join(filtered_list)
+ return filtered_list
+
+ def render_overall_report(self,test_name=""):
+ if test_name == "http_test":
+ if test_name not in self.test_count_dict:
+ self.test_count_dict[test_name] = 1
+
+
+
+ def run_ping_test(
+ self,
+ target: str = '1.1.eth1',
+ ping_interval: str = '1',
+ ping_duration: float = 1,
+ ssid: str = None,
+ mgr_passwd: str = 'lanforge',
+ server_ip: str = None,
+ security: str = 'open',
+ passwd: str = '[BLANK]',
+ virtual: bool = False,
+ num_sta: int = 1,
+ radio: str = None,
+ real: bool = True,
+ use_default_config: bool = True,
+ debug: bool = False,
+ local_lf_report_dir: str = "",
+ log_level: str = None,
+ lf_logger_config_json: str = None,
+ help_summary: bool = False,
+ group_name: str = None,
+ profile_name: str = None,
+ file_name: str = None,
+ eap_method: str = 'DEFAULT',
+ eap_identity: str = '',
+ ieee8021x: bool = False,
+ ieee80211u: bool = False,
+ ieee80211w: int = 1,
+ enable_pkc: bool = False,
+ bss_transition: bool = False,
+ power_save: bool = False,
+ disable_ofdma: bool = False,
+ roam_ft_ds: bool = False,
+ key_management: str = 'DEFAULT',
+ pairwise: str = '[BLANK]',
+ private_key: str = '[BLANK]',
+ ca_cert: str = '[BLANK]',
+ client_cert: str = '[BLANK]',
+ pk_passwd: str = '[BLANK]',
+ pac_file: str = '[BLANK]',
+ expected_passfail_value: str = None,
+ device_csv_name: str = None,
+ wait_time: int = 60,
+ dev_list: str = None
+ ):
+ # set the logger level to debug
+ logger_config = lf_logger_config.lf_logger_config()
+
+ if log_level:
+ logger_config.set_level(level=log_level)
+
+ if lf_logger_config_json:
+ # logger_config.lf_logger_config_json = "lf_logger_config.json"
+ logger_config.lf_logger_config_json = lf_logger_config_json
+ logger_config.load_lf_logger_config()
+ # validate_args(args)
+
+ mgr_ip = self.lanforge_ip
+ mgr_password = mgr_passwd
+ mgr_port = self.port
+ server_ip = server_ip
+ ssid = ssid
+ security = security
+ password = passwd
+ num_sta = num_sta
+ radio = radio
+ target = target
+ interval = ping_interval
+ duration = ping_duration
+ configure = not use_default_config
+ debug = debug
+ group_name = group_name
+ file_name = file_name
+ profile_name = profile_name
+ eap_method = eap_method
+ eap_identity = eap_identity
+ ieee80211 = ieee8021x
+ ieee80211u = ieee80211u
+ ieee80211w = ieee80211w
+ enable_pkc = enable_pkc
+ bss_transition = bss_transition
+ power_save = power_save
+ disable_ofdma = disable_ofdma
+ roam_ft_ds = roam_ft_ds
+ key_management = key_management
+ pairwise = pairwise
+ private_key = private_key
+ ca_cert = ca_cert
+ client_cert = client_cert
+ pk_passwd = pk_passwd
+ pac_file = pac_file
+ if (debug):
+ print('''Specified configuration:
+ ip: {}
+ port: {}
+ ssid: {}
+ security: {}
+ password: {}
+ target: {}
+ Ping interval: {}
+ Packet Duration (in min): {}
+ virtual: {}
+ num of virtual stations: {}
+ radio: {}
+ real: {}
+ debug: {}
+ '''.format(mgr_ip, mgr_port, ssid, security, password, target, interval, duration, virtual, num_sta, radio, real, debug))
+
+ ce = self.current_exec #seires
+ if ce == "parallel":
+ obj_name = "ping_test"
+ else:
+ obj_no = 1
+ while f"ping_test_{obj_no}" in self.ping_obj_dict[ce]:
+ obj_no+=1
+ obj_name = f"ping_test_{obj_no}"
+ self.ping_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+ # ping object creation
+ self.ping_obj_dict[ce][obj_name]["obj"] = Ping(host=mgr_ip, port=mgr_port, ssid=ssid, security=security, password=password, radio=radio,
+ lanforge_password=mgr_password, target=target, interval=interval, sta_list=[], virtual=virtual, real=real, duration=duration, debug=debug, csv_name=device_csv_name,
+ expected_passfail_val=expected_passfail_value, wait_time=wait_time, group_name=group_name)
+
+ # changing the target from port to IP
+ self.ping_obj_dict[ce][obj_name]["obj"].change_target_to_ip()
+
+ # creating virtual stations if --virtual flag is specified
+ if (virtual):
+
+ logging.info('Proceeding to create {} virtual stations on {}'.format(num_sta, radio))
+ station_list = LFUtils.portNameSeries(
+ prefix_='sta', start_id_=0, end_id_=num_sta - 1, padding_number_=100000, radio=radio)
+ self.ping_obj_dict[ce][obj_name]["obj"].sta_list = station_list
+ if (debug):
+ logging.info('Virtual Stations: {}'.format(station_list).replace(
+ '[', '').replace(']', '').replace('\'', ''))
+
+ # selecting real clients if --real flag is specified
+ if (real):
+ Devices = RealDevice(manager_ip=mgr_ip, selected_bands=[])
+ Devices.get_devices()
+ self.ping_obj_dict[ce][obj_name]["obj"].Devices = Devices
+ # self.ping_obj_dict[ce][obj_name]["obj"].select_real_devices(real_devices=Devices)
+ # If config is True, attempt to bring up all devices in the list and perform tests on those that become active
+ if (configure):
+ config_devices = {}
+ obj = DeviceConfig.DeviceConfig(lanforge_ip=mgr_ip, file_name=file_name, wait_time=wait_time)
+ # Case 1: Group name, file name, and profile name are provided
+ if group_name and file_name and profile_name:
+ selected_groups = group_name.split(',')
+ selected_profiles = profile_name.split(',')
+ for i in range(len(selected_groups)):
+ config_devices[selected_groups[i]] = selected_profiles[i]
+ obj.initiate_group()
+ group_device_map = obj.get_groups_devices(data=selected_groups, groupdevmap=True)
+ # Configure devices in the selected group with the selected profile
+ eid_list = asyncio.run(obj.connectivity(config=config_devices, upstream=server_ip))
+ Devices.get_devices()
+ self.ping_obj_dict[ce][obj_name]["obj"].select_real_devices(real_devices=Devices, device_list=eid_list)
+ # Case 2: Device list is empty but config flag is True — prompt the user to input device details for configuration
+ else:
+ all_devices = obj.get_all_devices()
+ device_list = []
+ config_dict = {
+ 'ssid': ssid,
+ 'passwd': password,
+ 'enc': security,
+ 'eap_method': eap_method,
+ 'eap_identity': eap_identity,
+ 'ieee80211': ieee80211,
+ 'ieee80211u': ieee80211u,
+ 'ieee80211w': ieee80211w,
+ 'enable_pkc': enable_pkc,
+ 'bss_transition': bss_transition,
+ 'power_save': power_save,
+ 'disable_ofdma': disable_ofdma,
+ 'roam_ft_ds': roam_ft_ds,
+ 'key_management': key_management,
+ 'pairwise': pairwise,
+ 'private_key': private_key,
+ 'ca_cert': ca_cert,
+ 'client_cert': client_cert,
+ 'pk_passwd': pk_passwd,
+ 'pac_file': pac_file,
+ 'server_ip': server_ip,
+ }
+ for device in all_devices:
+ if device["type"] == 'laptop':
+ device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["hostname"])
+ else:
+ device_list.append(device["eid"] + " " + device["serial"])
+ logger.info(f"Available devices: {device_list}")
+ if dev_list is None:
+ dev_list = input("Enter the desired resources to run the test:")
+ dev_list = dev_list.split(',')
+ dev_list = asyncio.run(obj.connectivity(device_list=dev_list, wifi_config=config_dict))
+ Devices.get_devices()
+ self.ping_obj_dict[ce][obj_name]["obj"].select_real_devices(real_devices=Devices, device_list=dev_list)
+ # Case 3: Config is False, no device list is provided, and no group is selected
+ # Prompt the user to manually input devices for running the test
+ else:
+ device_list = self.ping_obj_dict[ce][obj_name]["obj"].Devices.get_devices()
+ logger.info(f"Available devices: {device_list}")
+ if dev_list is None:
+ dev_list = input("Enter the desired resources to run the test:")
+ dev_list = dev_list.split(',')
+ # dev_list = input("Enter the desired resources to run the test:").split(',')
+ self.ping_obj_dict[ce][obj_name]["obj"].select_real_devices(real_devices=Devices, device_list=dev_list)
+
+ # station precleanup
+ self.ping_obj_dict[ce][obj_name]["obj"].cleanup() #11 change
+
+ # building station if virtual
+ if (virtual):
+ self.ping_obj_dict[ce][obj_name]["obj"].buildstation()
+
+ # check if generic tab is enabled or not
+ if (not self.ping_obj_dict[ce][obj_name]["obj"].check_tab_exists()):
+ logging.error('Generic Tab is not available.\nAborting the test.')
+ return False
+
+ self.ping_obj_dict[ce][obj_name]["obj"].sta_list += self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list
+
+ # creating generic endpoints
+ self.ping_obj_dict[ce][obj_name]["obj"].create_generic_endp()
+
+ logging.info(self.ping_obj_dict[ce][obj_name]["obj"].generic_endps_profile.created_cx)
+
+ # run the test for the given duration
+ logging.info('Running the ping test for {} minutes'.format(duration))
+
+ # start generate endpoint
+ self.ping_obj_dict[ce][obj_name]["obj"].start_generic()
+ # time_counter = 0
+ ports_data_dict = self.ping_obj_dict[ce][obj_name]["obj"].json_get('/ports/all/')['interfaces']
+ ports_data = {}
+ for ports in ports_data_dict:
+ port, port_data = list(ports.keys())[0], list(ports.values())[0]
+ ports_data[port] = port_data
+ ping_duration = duration
+ if self.dowebgui:
+ try:
+ with open(self.result_dir + "/../../Running_instances/{}_{}_running.json".format(self.lanforge_ip, self.test_name), 'r') as file:
+ data = json.load(file)
+ if data["status"] != "Running":
+ logging.info('Test is stopped by the user')
+ self.test_stopped = True
+ if not self.test_stopped:
+ self.overall_status['ping'] = "started"
+ self.overall_status["time"] = datetime.datetime.now().strftime("%Y %d %H:%M:%S")
+ self.overall_status["current_mode"] = self.current_exec
+ self.overall_status["current_test_name"] = "ping"
+ self.overall_csv.append(self.overall_status.copy())
+ df1 = pd.DataFrame(self.overall_csv)
+ df1.to_csv('{}/overall_status.csv'.format(self.result_dir), index=False)
+ except BaseException:
+ logger.info("Error while running for webui during ping execution")
+ if self.test_stopped:
+ logger.info("test has been stopped by the user")
+ return False
+ start_time = datetime.datetime.now()
+ end_time = start_time + datetime.timedelta(seconds=ping_duration * 60)
+ temp_json = []
+ while (datetime.datetime.now() < end_time):
+ temp_json = []
+ temp_checked_sta = []
+ temp_result_data = self.ping_obj_dict[ce][obj_name]["obj"].get_results()
+ if isinstance(temp_result_data, dict):
+ for station in self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list:
+ current_device_data = ports_data[station]
+ if (station in temp_result_data['name']):
+ temp_json.append({
+ 'device': station,
+ 'sent': temp_result_data['tx pkts'],
+ 'recv': temp_result_data['rx pkts'],
+ 'dropped': temp_result_data['dropped'],
+ 'status': "Running",
+ 'start_time': start_time.strftime("%d/%m %I:%M:%S %p"),
+ 'end_time': end_time.strftime("%d/%m %I:%M:%S %p"),
+ "remaining_time": ""
+ })
+ else:
+ for station in self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list:
+ current_device_data = ports_data[station]
+ for ping_device in temp_result_data:
+ ping_endp, ping_data = list(ping_device.keys())[0], list(ping_device.values())[0]
+ if station.split('-')[-1] in ping_endp and station not in temp_checked_sta:
+ temp_checked_sta.append(station)
+ temp_json.append({
+ 'device': station,
+ 'sent': ping_data['tx pkts'],
+ 'recv': ping_data['rx pkts'],
+ 'dropped': ping_data['dropped'],
+ 'status': "Running",
+ 'start_time': start_time.strftime("%d/%m %I:%M:%S %p"),
+ 'end_time': end_time.strftime("%d/%m %I:%M:%S %p"),
+ "remaining_time": ""
+ })
+ df1 = pd.DataFrame(temp_json)
+ df1.to_csv('{}/ping_datavalues.csv'.format(self.result_dir), index=False)
+ # try:
+ # with open(self.result_dir + "/../../Running_instances/{}_{}_running.json".format(self.host, self.test_name), 'r') as file:
+ # data = json.load(file)
+ # if data["status"] != "Running":
+ # logging.info('Test is stopped by the user')
+ # break
+ # except BaseException:
+ # logging.info("execption while reading running json in ping")
+ time.sleep(3)
+ else:
+ time.sleep(ping_duration * 60)
+
+ logging.info('Stopping the test')
+ self.ping_obj_dict[ce][obj_name]["obj"].stop_generic()
+
+ result_data = self.ping_obj_dict[ce][obj_name]["obj"].get_results()
+ # logging.info(result_data)
+ logging.info(self.ping_obj_dict[ce][obj_name]["obj"].result_json)
+ if (virtual):
+ ports_data_dict = self.ping_obj_dict[ce][obj_name]["obj"].json_get('/ports/all/')['interfaces']
+ ports_data = {}
+ for ports in ports_data_dict:
+ port, port_data = list(ports.keys())[0], list(ports.values())[0]
+ ports_data[port] = port_data
+ if (isinstance(result_data, dict)):
+ for station in self.ping_obj_dict[ce][obj_name]["obj"].sta_list:
+ if (station not in self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list):
+ current_device_data = ports_data[station]
+ if (station.split('.')[2] in result_data['name']):
+ try:
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station] = {
+ 'command': result_data['command'],
+ 'sent': result_data['tx pkts'],
+ 'recv': result_data['rx pkts'],
+ 'dropped': result_data['dropped'],
+ 'min_rtt': [result_data['last results'].split('\n')[-2].split()[-1].split('/')[0] if len(result_data['last results']) != 0 and 'min/avg/max' in result_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'avg_rtt': [result_data['last results'].split('\n')[-2].split()[-1].split('/')[1] if len(result_data['last results']) != 0 and 'min/avg/max' in result_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'max_rtt': [result_data['last results'].split('\n')[-2].split()[-1].split('/')[2] if len(result_data['last results']) != 0 and 'min/avg/max' in result_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'mac': current_device_data['mac'],
+ 'channel': current_device_data['channel'],
+ 'ssid': current_device_data['ssid'],
+ 'mode': current_device_data['mode'],
+ 'name': station,
+ 'os': 'Virtual',
+ 'remarks': [],
+ 'last_result': [result_data['last results'].split('\n')[-2] if len(result_data['last results']) != 0 else ""][0]
+ }
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station]['remarks'] = self.ping_obj_dict[ce][obj_name]["obj"].generate_remarks(self.ping_obj_dict[ce][obj_name]["obj"].result_json[station])
+ except BaseException:
+ logging.error('Failed parsing the result for the station {}'.format(station))
+
+ else:
+ for station in self.ping_obj_dict[ce][obj_name]["obj"].sta_list:
+ if (station not in self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list):
+ current_device_data = ports_data[station]
+ for ping_device in result_data:
+ ping_endp, ping_data = list(ping_device.keys())[
+ 0], list(ping_device.values())[0]
+ if (station.split('.')[2] in ping_endp):
+ try:
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station] = {
+ 'command': ping_data['command'],
+ 'sent': ping_data['tx pkts'],
+ 'recv': ping_data['rx pkts'],
+ 'dropped': ping_data['dropped'],
+ 'min_rtt': [ping_data['last results'].split('\n')[-2].split()[-1].split('/')[0] if len(ping_data['last results']) != 0 and 'min/avg/max' in ping_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'avg_rtt': [ping_data['last results'].split('\n')[-2].split()[-1].split('/')[1] if len(ping_data['last results']) != 0 and 'min/avg/max' in ping_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'max_rtt': [ping_data['last results'].split('\n')[-2].split()[-1].split('/')[2] if len(ping_data['last results']) != 0 and 'min/avg/max' in ping_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'mac': current_device_data['mac'],
+ 'ssid': current_device_data['ssid'],
+ 'channel': current_device_data['channel'],
+ 'mode': current_device_data['mode'],
+ 'name': station,
+ 'os': 'Virtual',
+ 'remarks': [],
+ 'last_result': [ping_data['last results'].split('\n')[-2] if len(ping_data['last results']) != 0 else ""][0]
+ }
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station]['remarks'] = self.ping_obj_dict[ce][obj_name]["obj"].generate_remarks(self.ping_obj_dict[ce][obj_name]["obj"].result_json[station])
+ except BaseException:
+ logging.error('Failed parsing the result for the station {}'.format(station))
+
+ if (real):
+ if (isinstance(result_data, dict)):
+ for station in self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list:
+ current_device_data = Devices.devices_data[station]
+ # logging.info(current_device_data)
+ if (station in result_data['name']):
+ try:
+ # logging.info(result_data['last results'].split('\n'))
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station] = {
+ 'command': result_data['command'],
+ 'sent': result_data['tx pkts'],
+ 'recv': result_data['rx pkts'],
+ 'dropped': result_data['dropped'],
+ 'min_rtt': [result_data['last results'].split('\n')[-2].split()[-1].split(':')[-1].split('/')[0] if len(result_data['last results']) != 0 and 'min/avg/max' in result_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'avg_rtt': [result_data['last results'].split('\n')[-2].split()[-1].split(':')[-1].split('/')[1] if len(result_data['last results']) != 0 and 'min/avg/max' in result_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'max_rtt': [result_data['last results'].split('\n')[-2].split()[-1].split(':')[-1].split('/')[2] if len(result_data['last results']) != 0 and 'min/avg/max' in result_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'mac': current_device_data['mac'],
+ 'ssid': current_device_data['ssid'],
+ 'channel': current_device_data['channel'],
+ 'mode': current_device_data['mode'],
+ 'name': [current_device_data['user'] if current_device_data['user'] != '' else current_device_data['hostname']][0],
+ 'os': ['Windows' if 'Win' in current_device_data['hw version'] else 'Linux' if 'Linux' in current_device_data['hw version'] else 'Mac' if 'Apple' in current_device_data['hw version'] else 'Android'][0], # noqa E501
+ 'remarks': [],
+ 'last_result': [result_data['last results'].split('\n')[-2] if len(result_data['last results']) != 0 else ""][0]
+ }
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station]['remarks'] = self.ping_obj_dict[ce][obj_name]["obj"].generate_remarks(self.ping_obj_dict[ce][obj_name]["obj"].result_json[station])
+ except BaseException:
+ logging.error('Failed parsing the result for the station {}'.format(station))
+ else:
+ for station in self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list:
+ current_device_data = Devices.devices_data[station]
+ for ping_device in result_data:
+ ping_endp, ping_data = list(ping_device.keys())[
+ 0], list(ping_device.values())[0]
+ if (station in ping_endp):
+ try:
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station] = {
+ 'command': ping_data['command'],
+ 'sent': ping_data['tx pkts'],
+ 'recv': ping_data['rx pkts'],
+ 'dropped': ping_data['dropped'],
+ 'min_rtt': [ping_data['last results'].split('\n')[-2].split()[-1].split(':')[-1].split('/')[0] if len(ping_data['last results']) != 0 and 'min/avg/max' in ping_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'avg_rtt': [ping_data['last results'].split('\n')[-2].split()[-1].split(':')[-1].split('/')[1] if len(ping_data['last results']) != 0 and 'min/avg/max' in ping_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'max_rtt': [ping_data['last results'].split('\n')[-2].split()[-1].split(':')[-1].split('/')[2] if len(ping_data['last results']) != 0 and 'min/avg/max' in ping_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'mac': current_device_data['mac'],
+ 'ssid': current_device_data['ssid'],
+ 'channel': current_device_data['channel'],
+ 'mode': current_device_data['mode'],
+ 'name': [current_device_data['user'] if current_device_data['user'] != '' else current_device_data['hostname']][0],
+ 'os': ['Windows' if 'Win' in current_device_data['hw version'] else 'Linux' if 'Linux' in current_device_data['hw version'] else 'Mac' if 'Apple' in current_device_data['hw version'] else 'Android'][0], # noqa E501
+ 'remarks': [],
+ 'last_result': [ping_data['last results'].split('\n')[-2] if len(ping_data['last results']) != 0 else ""][0]
+ }
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station]['remarks'] = self.ping_obj_dict[ce][obj_name]["obj"].generate_remarks(self.ping_obj_dict[ce][obj_name]["obj"].result_json[station])
+ except BaseException:
+ logging.error('Failed parsing the result for the station {}'.format(station))
+
+ logging.info(self.ping_obj_dict[ce][obj_name]["obj"].result_json)
+
+ # station post cleanup
+ self.ping_obj_dict[ce][obj_name]["obj"].cleanup() #12 change
+ if self.dowebgui:
+ temp_json = []
+ for station in self.ping_obj_dict[ce][obj_name]["obj"].result_json:
+ logging.debug('{} {}'.format(station, self.ping_obj_dict[ce][obj_name]["obj"].result_json[station]))
+ temp_json.append({'device': station,
+ 'sent': self.ping_obj_dict[ce][obj_name]["obj"].result_json[station]['sent'],
+ 'recv': self.ping_obj_dict[ce][obj_name]["obj"].result_json[station]['recv'],
+ 'dropped': self.ping_obj_dict[ce][obj_name]["obj"].result_json[station]['dropped'],
+ 'status': "Stopped",
+ 'start_time': start_time.strftime("%d/%m %I:%M:%S %p"),
+ 'end_time': end_time.strftime("%d/%m %I:%M:%S %p"),
+ "remaining_time": ""})
+ df1 = pd.DataFrame(temp_json)
+ df1.to_csv('{}/ping_datavalues.csv'.format(self.result_dir), index=False)
+ if local_lf_report_dir == "":
+ # Report generation when groups are specified but no custom report path is provided
+ if group_name:
+ self.ping_obj_dict[ce][obj_name]["obj"].generate_report(config_devices=config_devices, group_device_map=group_device_map)
+ # Report generation when no group is specified and no custom report path is provided
+ else:
+ self.ping_obj_dict[ce][obj_name]["obj"].generate_report()
+ else:
+ # Report generation when groups are specified and a custom report path is provided
+ if group_name:
+ self.ping_obj_dict[ce][obj_name]["obj"].generate_report(config_devices=config_devices, group_device_map=group_device_map, report_path=local_lf_report_dir)
+ # Report generation when no group is specified but a custom report path is provided
+ else:
+ self.ping_obj_dict[ce][obj_name]["obj"].generate_report(report_path=local_lf_report_dir)
+ params = {
+ "result_json": None,
+ "result_dir": "Ping_Test_Report",
+ "report_path": "",
+ "config_devices": "",
+ "group_device_map": {}
+ }
+
+ if local_lf_report_dir != "":
+ params["report_path"] = local_lf_report_dir
+
+ if group_name:
+ params["config_devices"] = config_devices
+ params["group_device_map"] = group_device_map
+ self.ping_obj_dict[ce][obj_name]["data"] = params.copy()
+ if self.dowebgui:
+ self.webgui_test_done("ping")
+ return True
+
+ def run_http_test(
+ self,
+ upstream_port='eth2',
+ num_stations=0,
+ twog_radio='wiphy3',
+ fiveg_radio='wiphy0',
+ sixg_radio='wiphy2',
+ twog_security=None,
+ twog_ssid=None,
+ twog_passwd=None,
+ fiveg_security=None,
+ fiveg_ssid=None,
+ fiveg_passwd=None,
+ sixg_security=None,
+ sixg_ssid=None,
+ sixg_passwd=None,
+ target_per_ten=100,
+ file_size='5MB',
+ bands=["5G", "2.4G", "6G"],
+ duration=None,
+ client_type="Real",
+ threshold_5g="60",
+ threshold_2g="90",
+ threshold_both="50",
+ ap_name="TestAP",
+ lf_username="lanforge",
+ lf_password="lanforge",
+ ssh_port=22,
+ test_rig="",
+ test_tag="",
+ dut_hw_version="",
+ dut_sw_version="",
+ dut_model_num="",
+ dut_serial_num="",
+ test_priority="",
+ test_id="lf_webpage",
+ csv_outfile="",
+ dowebgui=False,
+ result_dir='',
+ device_list=[],
+ test_name=None,
+ get_url_from_file=False,
+ file_path=None,
+ help_summary=False,
+ ssid=None,
+ passwd='',
+ security=None,
+ file_name=None,
+ group_name=None,
+ profile_name=None,
+ eap_method='DEFAULT',
+ eap_identity='',
+ ieee8021x=False,
+ ieee80211u=False,
+ ieee80211w=1,
+ enable_pkc=False,
+ bss_transition=False,
+ power_save=False,
+ disable_ofdma=False,
+ roam_ft_ds=False,
+ key_management='DEFAULT',
+ pairwise='NA',
+ private_key='NA',
+ ca_cert='NA',
+ client_cert='NA',
+ pk_passwd='NA',
+ pac_file='NA',
+ expected_passfail_value=None,
+ device_csv_name=None,
+ wait_time=60,
+ config=False,
+ get_live_view=False,
+ total_floors="0"
+ ):
+ if self.dowebgui:
+ if not self.webgui_stop_check("http"):
+ return False
+ bands.sort()
+
+ # Error checking to prevent case issues
+ for band in range(len(bands)):
+ bands[band] = bands[band].upper()
+ if bands[band] == "BOTH":
+ bands[band] = "Both"
+
+ # Error checking for non-existent bands
+ valid_bands = ['2.4G', '5G', '6G', 'Both']
+ for band in bands:
+ if band not in valid_bands:
+ raise ValueError("Invalid band '%s' used in bands argument!" % band)
+
+ # Check for Both being used independently
+ if len(bands) > 1 and "Both" in bands:
+ raise ValueError("'Both' test type must be used independently!")
+
+ # validate_args(args)
+ if duration.endswith('s') or duration.endswith('S'):
+ duration = int(duration[0:-1])
+ elif duration.endswith('m') or duration.endswith('M'):
+ duration = int(duration[0:-1]) * 60
+ elif duration.endswith('h') or duration.endswith('H'):
+ duration = int(duration[0:-1]) * 60 * 60
+ elif duration.endswith(''):
+ duration = int(duration)
+
+ list6G, list6G_bytes, list6G_speed, list6G_urltimes = [], [], [], []
+ list5G, list5G_bytes, list5G_speed, list5G_urltimes = [], [], [], []
+ list2G, list2G_bytes, list2G_speed, list2G_urltimes = [], [], [], []
+ Both, Both_bytes, Both_speed, Both_urltimes = [], [], [], []
+ listReal, listReal_bytes, listReal_speed, listReal_urltimes = [], [], [], [] # For real devices (not band specific)
+ dict_keys = []
+ dict_keys.extend(bands)
+ # print(dict_keys)
+ final_dict = dict.fromkeys(dict_keys)
+ # print(final_dict)
+ dict1_keys = ['dl_time', 'min', 'max', 'avg', 'bytes_rd', 'speed', 'url_times']
+ for i in final_dict:
+ final_dict[i] = dict.fromkeys(dict1_keys)
+ print(final_dict)
+ min6 = []
+ min5 = []
+ min2 = []
+ min_both = []
+ max6 = []
+ max5 = []
+ max2 = []
+ max_both = []
+ avg6 = []
+ avg2 = []
+ avg5 = []
+ avg_both = []
+ port_list, dev_list, macid_list = [], [], []
+ for band in bands:
+ # For real devices while ensuring no blocker for Virtual devices
+ if client_type == 'Real':
+ ssid = ssid
+ passwd = passwd
+ security = security
+ elif band == "2.4G":
+ security = [twog_security]
+ ssid = [twog_ssid]
+ passwd = [twog_passwd]
+ elif band == "5G":
+ security = [fiveg_security]
+ ssid = [fiveg_ssid]
+ passwd = [fiveg_passwd]
+ elif band == "6G":
+ security = [sixg_security]
+ ssid = [sixg_ssid]
+ passwd = [sixg_passwd]
+ elif band == "Both":
+ security = [twog_security, fiveg_security]
+ ssid = [twog_ssid, fiveg_ssid]
+ passwd = [twog_passwd, fiveg_passwd]
+ ce = self.current_exec #seires
+ if ce == "parallel":
+ obj_name = "http_test"
+ else:
+ obj_no = 1
+ while f"http_test_{obj_no}" in self.http_obj_dict[ce]:
+ obj_no+=1
+ obj_name = f"http_test_{obj_no}"
+ self.http_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+
+ self.http_obj_dict[ce][obj_name]["obj"] = http_test.HttpDownload(lfclient_host=self.lanforge_ip, lfclient_port=self.port,
+ upstream=upstream_port, num_sta=num_stations,
+ security=security, ap_name=ap_name,
+ ssid=ssid, password=passwd,
+ target_per_ten=target_per_ten,
+ file_size=file_size, bands=band,
+ twog_radio=twog_radio,
+ fiveg_radio=fiveg_radio,
+ sixg_radio=sixg_radio,
+ client_type=client_type,
+ lf_username=lf_username, lf_password=lf_password,
+ result_dir=result_dir, # FOR WEBGUI
+ dowebgui=dowebgui, # FOR WEBGUI
+ device_list=device_list,
+ test_name=test_name, # FOR WEBGUI
+ get_url_from_file=get_url_from_file,
+ file_path=file_path,
+ file_name=file_name,
+ group_name=group_name,
+ profile_name=profile_name,
+ eap_method=eap_method,
+ eap_identity=eap_identity,
+ ieee80211=ieee8021x,
+ ieee80211u=ieee80211u,
+ ieee80211w=ieee80211w,
+ enable_pkc=enable_pkc,
+ bss_transition=bss_transition,
+ power_save=power_save,
+ disable_ofdma=disable_ofdma,
+ roam_ft_ds=roam_ft_ds,
+ key_management=key_management,
+ pairwise=pairwise,
+ private_key=private_key,
+ ca_cert=ca_cert,
+ client_cert=client_cert,
+ pk_passwd=pk_passwd,
+ pac_file=pac_file,
+ expected_passfail_value=expected_passfail_value,
+ device_csv_name=device_csv_name,
+ wait_time=wait_time,
+ config=config,
+ get_live_view= get_live_view,
+ total_floors = total_floors
+ )
+ if client_type == "Real":
+ if not isinstance(device_list, list):
+ self.http_obj_dict[ce][obj_name]["obj"].device_list = self.http_obj_dict[ce][obj_name]["obj"].filter_iOS_devices(device_list)
+ if len(self.http_obj_dict[ce][obj_name]["obj"].device_list) == 0:
+ logger.info("There are no devices available")
+ return False
+ port_list, dev_list, macid_list, configuration = self.http_obj_dict[ce][obj_name]["obj"].get_real_client_list()
+ if dowebgui and group_name:
+ if len(dev_list) == 0:
+ logger.info("No device is available to run the test")
+ obj = {
+ "status": "Stopped",
+ "configuration_status": "configured"
+ }
+ self.http_obj_dict[ce][obj_name]["obj"].updating_webui_runningjson(obj)
+ return
+ else:
+ obj = {
+ "configured_devices": dev_list,
+ "configuration_status": "configured"
+ }
+ self.http_obj_dict[ce][obj_name]["obj"].updating_webui_runningjson(obj)
+ num_stations = len(port_list)
+ if not get_url_from_file:
+ self.http_obj_dict[ce][obj_name]["obj"].file_create(ssh_port=ssh_port)
+ else:
+ if file_path is None:
+ print("WARNING: Please Specify the path of the file, if you select the --get_url_from_file")
+ return False
+ self.http_obj_dict[ce][obj_name]["obj"].set_values()
+ self.http_obj_dict[ce][obj_name]["obj"].precleanup()
+ self.http_obj_dict[ce][obj_name]["obj"].build()
+ if client_type == 'Real':
+ self.http_obj_dict[ce][obj_name]["obj"].monitor_cx()
+ logger.info(f'Test started on the devices : {self.http_obj_dict[ce][obj_name]["obj"].port_list}')
+ test_time = datetime.datetime.now()
+ # Solution For Leap Year conflict changed it to %Y
+ test_time = test_time.strftime("%Y %d %H:%M:%S")
+ print("Test started at ", test_time)
+ self.http_obj_dict[ce][obj_name]["obj"].start()
+ if dowebgui:
+ # FOR WEBGUI, -This fumction is called to fetch the runtime data from layer-4
+ self.http_obj_dict[ce][obj_name]["obj"].monitor_for_runtime_csv(duration)
+ elif client_type == 'Real':
+ # To fetch runtime csv during runtime
+ self.http_obj_dict[ce][obj_name]["obj"].monitor_for_runtime_csv(duration)
+ else:
+ time.sleep(duration)
+ self.http_obj_dict[ce][obj_name]["obj"].stop()
+ # taking self.http_obj_dict[ce][obj_name]["obj"].data, which got updated in the monitor_for_runtime_csv method
+ if client_type == 'Real':
+ uc_avg_val = self.http_obj_dict[ce][obj_name]["obj"].data['uc_avg']
+ url_times = self.http_obj_dict[ce][obj_name]["obj"].data['url_data']
+ rx_bytes_val = self.http_obj_dict[ce][obj_name]["obj"].data['bytes_rd']
+ print('rx_rate_Val',self.http_obj_dict[ce][obj_name]["obj"].data['rx rate (1m)'])
+ rx_rate_val = list(self.http_obj_dict[ce][obj_name]["obj"].data['rx rate (1m)'])
+ else:
+ uc_avg_val = self.http_obj_dict[ce][obj_name]["obj"].my_monitor('uc-avg')
+ url_times = self.http_obj_dict[ce][obj_name]["obj"].my_monitor('total-urls')
+ rx_bytes_val = self.http_obj_dict[ce][obj_name]["obj"].my_monitor('bytes-rd')
+ rx_rate_val = self.http_obj_dict[ce][obj_name]["obj"].my_monitor('rx rate')
+ if dowebgui:
+ self.http_obj_dict[ce][obj_name]["obj"].data_for_webui["url_data"] = url_times # storing the layer-4 url data at the end of test
+ if client_type == 'Real': # for real clients
+ listReal.extend(uc_avg_val)
+ listReal_bytes.extend(rx_bytes_val)
+ listReal_speed.extend(rx_rate_val)
+ listReal_urltimes.extend(url_times)
+ logger.info("%s %s %s", listReal, listReal_bytes, listReal_speed)
+ final_dict[band]['dl_time'] = listReal
+ min2.append(min(listReal))
+ final_dict[band]['min'] = min2
+ max2.append(max(listReal))
+ final_dict[band]['max'] = max2
+ avg2.append((sum(listReal) / num_stations))
+ final_dict[band]['avg'] = avg2
+ final_dict[band]['bytes_rd'] = listReal_bytes
+ final_dict[band]['speed'] = listReal_speed
+ final_dict[band]['url_times'] = listReal_urltimes
+ else:
+ if band == "5G":
+ list5G.extend(uc_avg_val)
+ list5G_bytes.extend(rx_bytes_val)
+ list5G_speed.extend(rx_rate_val)
+ list5G_urltimes.extend(url_times)
+ logger.info("%s %s %s %s", list5G, list5G_bytes, list5G_speed, list5G_urltimes)
+ final_dict['5G']['dl_time'] = list5G
+ min5.append(min(list5G))
+ final_dict['5G']['min'] = min5
+ max5.append(max(list5G))
+ final_dict['5G']['max'] = max5
+ avg5.append((sum(list5G) / num_stations))
+ final_dict['5G']['avg'] = avg5
+ final_dict['5G']['bytes_rd'] = list5G_bytes
+ final_dict['5G']['speed'] = list5G_speed
+ final_dict['5G']['url_times'] = list5G_urltimes
+ elif band == "6G":
+ list6G.extend(uc_avg_val)
+ list6G_bytes.extend(rx_bytes_val)
+ list6G_speed.extend(rx_rate_val)
+ list6G_urltimes.extend(url_times)
+ final_dict['6G']['dl_time'] = list6G
+ min6.append(min(list6G))
+ final_dict['6G']['min'] = min6
+ max6.append(max(list6G))
+ final_dict['6G']['max'] = max6
+ avg6.append((sum(list6G) / num_stations))
+ final_dict['6G']['avg'] = avg6
+ final_dict['6G']['bytes_rd'] = list6G_bytes
+ final_dict['6G']['speed'] = list6G_speed
+ final_dict['6G']['url_times'] = list6G_urltimes
+ elif band == "2.4G":
+ list2G.extend(uc_avg_val)
+ list2G_bytes.extend(rx_bytes_val)
+ list2G_speed.extend(rx_rate_val)
+ list2G_urltimes.extend(url_times)
+ logger.info("%s %s %s", list2G, list2G_bytes, list2G_speed)
+ final_dict['2.4G']['dl_time'] = list2G
+ min2.append(min(list2G))
+ final_dict['2.4G']['min'] = min2
+ max2.append(max(list2G))
+ final_dict['2.4G']['max'] = max2
+ avg2.append((sum(list2G) / num_stations))
+ final_dict['2.4G']['avg'] = avg2
+ final_dict['2.4G']['bytes_rd'] = list2G_bytes
+ final_dict['2.4G']['speed'] = list2G_speed
+ final_dict['2.4G']['url_times'] = list2G_urltimes
+ elif bands == "Both":
+ Both.extend(uc_avg_val)
+ Both_bytes.extend(rx_bytes_val)
+ Both_speed.extend(rx_rate_val)
+ Both_urltimes.extend(url_times)
+ final_dict['Both']['dl_time'] = Both
+ min_both.append(min(Both))
+ final_dict['Both']['min'] = min_both
+ max_both.append(max(Both))
+ final_dict['Both']['max'] = max_both
+ avg_both.append((sum(Both) / num_stations))
+ final_dict['Both']['avg'] = avg_both
+ final_dict['Both']['bytes_rd'] = Both_bytes
+ final_dict['Both']['speed'] = Both_speed
+ final_dict['Both']['url_times'] = Both_urltimes
+
+ result_data = final_dict
+ print("result", result_data)
+ print("Test Finished")
+ test_end = datetime.datetime.now()
+ test_end = test_end.strftime("%Y %d %H:%M:%S")
+ print("Test ended at ", test_end)
+ s1 = test_time
+ s2 = test_end # for example
+ FMT = '%Y %d %H:%M:%S'
+ test_duration = datetime.datetime.strptime(s2, FMT) - datetime.datetime.strptime(s1, FMT)
+
+ info_ssid = []
+ info_security = []
+ # For real clients
+ if client_type == 'Real':
+ info_ssid.append(ssid)
+ info_security.append(security)
+ else:
+ for band in bands:
+ if band == "2.4G":
+ info_ssid.append(twog_ssid)
+ info_security.append(twog_security)
+ elif band == "5G":
+ info_ssid.append(fiveg_ssid)
+ info_security.append(fiveg_security)
+ elif band == "6G":
+ info_ssid.append(sixg_ssid)
+ info_security.append(sixg_security)
+ elif band == "Both":
+ info_ssid.append(fiveg_ssid)
+ info_security.append(fiveg_security)
+ info_ssid.append(twog_ssid)
+ info_security.append(twog_security)
+
+ print("total test duration ", test_duration)
+ date = str(datetime.datetime.now()).split(",")[0].replace(" ", "-").split(".")[0]
+ duration = duration
+ if int(duration) < 60:
+ duration = str(duration) + "s"
+ elif int(duration == 60) or (int(duration) > 60 and int(duration) < 3600):
+ duration = str(duration / 60) + "m"
+ else:
+ if int(duration == 3600) or (int(duration) > 3600):
+ duration = str(duration / 3600) + "h"
+
+ android_devices, windows_devices, linux_devices, mac_devices = 0, 0, 0, 0
+ all_devices_names = []
+ device_type = []
+ total_devices = ""
+ for i in self.http_obj_dict[ce][obj_name]["obj"].devices_list:
+ split_device_name = i.split(" ")
+ if 'android' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Android)"))
+ device_type.append("Android")
+ android_devices += 1
+ elif 'Win' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Windows)"))
+ device_type.append("Windows")
+ windows_devices += 1
+ elif 'Lin' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Linux)"))
+ device_type.append("Linux")
+ linux_devices += 1
+ elif 'Mac' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Mac)"))
+ device_type.append("Mac")
+ mac_devices += 1
+
+ # Build total_devices string based on counts
+ if android_devices > 0:
+ total_devices += f" Android({android_devices})"
+ if windows_devices > 0:
+ total_devices += f" Windows({windows_devices})"
+ if linux_devices > 0:
+ total_devices += f" Linux({linux_devices})"
+ if mac_devices > 0:
+ total_devices += f" Mac({mac_devices})"
+ if client_type == "Real":
+ if group_name:
+ group_names = ', '.join(configuration.keys())
+ profile_names = ', '.join(configuration.values())
+ configmap = "Groups:" + group_names + " -> Profiles:" + profile_names
+ test_setup_info = {
+ "AP name": ap_name,
+ "Configuration": configmap,
+ "Configured Devices": ", ".join(all_devices_names),
+ "No of Devices": "Total" + f"({len(all_devices_names)})" + total_devices,
+ "Traffic Direction": "Download",
+ "Traffic Duration ": duration
+ }
+ else:
+ test_setup_info = {
+ "AP Name": ap_name,
+ "SSID": ssid,
+ "Device List": ", ".join(all_devices_names),
+ "Security": security,
+ "No of Devices": "Total" + f"({len(all_devices_names)})" + total_devices,
+ "Traffic Direction": "Download",
+ "Traffic Duration ": duration
+ }
+ else:
+ test_setup_info = {
+ "AP Name": ap_name,
+ "SSID": ssid,
+ "Security": security,
+ "No of Devices": num_stations,
+ "Traffic Direction": "Download",
+ "Traffic Duration ": duration
+ }
+ test_input_infor = {
+ "LANforge ip": self.lanforge_ip,
+ "Bands": bands,
+ "Upstream": upstream_port,
+ "Stations": num_stations,
+ "SSID": ','.join(filter(None, info_ssid)) if info_ssid else "",
+ "Security": ', '.join(filter(None, info_security)) if info_security else "",
+ "Duration": duration,
+ "Contact": "support@candelatech.com"
+ }
+ if not file_path:
+ test_setup_info["File size"] = file_size
+ test_setup_info["File location"] = "/usr/local/lanforge/nginx/html"
+ test_input_infor["File size"] = file_size
+ else:
+ test_setup_info["File location (URLs from the File)"] = file_path
+ if client_type == "Real":
+ test_setup_info["failed_cx's"] = self.http_obj_dict[ce][obj_name]["obj"].failed_cx if self.http_obj_dict[ce][obj_name]["obj"].failed_cx else "NONE"
+ # dataset = self.http_obj_dict[ce][obj_name]["obj"].download_time_in_sec(result_data=result_data)
+ rx_rate = []
+ for i in result_data:
+ dataset = result_data[i]['dl_time']
+ dataset2 = result_data[i]['url_times']
+ bytes_rd = result_data[i]['bytes_rd']
+ rx_rate = result_data[i]['speed']
+ dataset1 = [round(x / 1000000, 4) for x in bytes_rd]
+ rx_rate = [round(x / 1000000, 4) for x in rx_rate] # converting bps to mbps
+
+ lis = []
+ if band == "Both":
+ for i in range(1, num_stations * 2 + 1):
+ lis.append(i)
+ else:
+ for i in range(1, num_stations + 1):
+ lis.append(i)
+
+ if dowebgui:
+ self.http_obj_dict[ce][obj_name]["obj"].data_for_webui["status"] = ["STOPPED"] * len(self.http_obj_dict[ce][obj_name]["obj"].devices_list)
+ self.http_obj_dict[ce][obj_name]["obj"].data_for_webui['rx rate (1m)'] = self.http_obj_dict[ce][obj_name]["obj"].data['rx rate (1m)']
+ self.http_obj_dict[ce][obj_name]["obj"].data_for_webui['total_err'] = self.http_obj_dict[ce][obj_name]["obj"].data['total_err']
+ self.http_obj_dict[ce][obj_name]["obj"].data_for_webui["start_time"] = self.http_obj_dict[ce][obj_name]["obj"].data["start_time"]
+ self.http_obj_dict[ce][obj_name]["obj"].data_for_webui["end_time"] = self.http_obj_dict[ce][obj_name]["obj"].data["end_time"]
+ self.http_obj_dict[ce][obj_name]["obj"].data_for_webui["remaining_time"] = self.http_obj_dict[ce][obj_name]["obj"].data["remaining_time"]
+ df1 = pd.DataFrame(self.http_obj_dict[ce][obj_name]["obj"].data_for_webui)
+ df1.to_csv('{}/http_datavalues.csv'.format(self.http_obj_dict[ce][obj_name]["obj"].result_dir), index=False)
+
+ self.http_obj_dict[ce][obj_name]["obj"].generate_report(date, num_stations=num_stations,
+ duration=duration, test_setup_info=test_setup_info, dataset=dataset, lis=lis,
+ bands=bands, threshold_2g=threshold_2g, threshold_5g=threshold_5g,
+ threshold_both=threshold_both, dataset2=dataset2, dataset1=dataset1,
+ # summary_table_value=summary_table_value,
+ result_data=result_data, rx_rate=rx_rate,
+ test_rig=test_rig, test_tag=test_tag, dut_hw_version=dut_hw_version,
+ dut_sw_version=dut_sw_version, dut_model_num=dut_model_num,
+ dut_serial_num=dut_serial_num, test_id=test_id,
+ test_input_infor=test_input_infor, csv_outfile=csv_outfile,report_path=self.result_path if not self.dowebgui else self.result_dir)
+ params = {
+ "date": date,
+ "num_stations": num_stations,
+ "duration": duration,
+ "test_setup_info": test_setup_info,
+ "dataset": dataset,
+ "lis": lis,
+ "bands": bands,
+ "threshold_2g": threshold_2g,
+ "threshold_5g": threshold_5g,
+ "threshold_both": threshold_both,
+ "dataset2": dataset2,
+ "dataset1": dataset1,
+ # "summary_table_value": summary_table_value, # optional
+ "result_data": result_data,
+ "rx_rate": rx_rate,
+ "test_rig": test_rig,
+ "test_tag": test_tag,
+ "dut_hw_version": dut_hw_version,
+ "dut_sw_version": dut_sw_version,
+ "dut_model_num": dut_model_num,
+ "dut_serial_num": dut_serial_num,
+ "test_id": test_id,
+ "test_input_infor": test_input_infor,
+ "csv_outfile": csv_outfile,
+ "report_path": self.result_path
+ }
+ self.http_obj_dict[ce][obj_name]["data"] = params.copy()
+ if self.dowebgui:
+ self.webgui_test_done("http")
+
+ self.http_obj_dict[ce][obj_name]["obj"].postcleanup()
+ if dowebgui:
+ self.http_obj_dict[ce][obj_name]["obj"].copy_reports_to_home_dir()
+ return True
+
+
+ def run_ftp_test(
+ self,
+ mgr='localhost',
+ mgr_port=8080,
+ upstream_port='eth1',
+ ssid=None,
+ passwd=None,
+ security=None,
+ group_name=None,
+ profile_name=None,
+ file_name=None,
+ ap_name=None,
+ traffic_duration=None,
+ clients_type="Real",
+ dowebgui=False,
+ directions=["Download"],
+ file_sizes=["2MB", "500MB", "1000MB"],
+ local_lf_report_dir="",
+ ap_ip=None,
+ twog_radio='wiphy1',
+ fiveg_radio='wiphy0',
+ sixg_radio='wiphy2',
+ lf_username='lanforge',
+ lf_password='lanforge',
+ ssh_port=22,
+ bands=["5G", "2.4G", "6G", "Both"],
+ num_stations=0,
+ result_dir='',
+ device_list=[],
+ test_name=None,
+ expected_passfail_value=None,
+ device_csv_name=None,
+ wait_time=60,
+ config=False,
+ test_rig="",
+ test_tag="",
+ dut_hw_version="",
+ dut_sw_version="",
+ dut_model_num="",
+ dut_serial_num="",
+ test_priority="",
+ test_id="FTP Data",
+ csv_outfile="",
+ eap_method="DEFAULT",
+ eap_identity='',
+ ieee8021x=False,
+ ieee80211u=False,
+ ieee80211w=1,
+ enable_pkc=False,
+ bss_transition=False,
+ power_save=False,
+ disable_ofdma=False,
+ roam_ft_ds=False,
+ key_management="DEFAULT",
+ pairwise='NA',
+ private_key='NA',
+ ca_cert='NA',
+ client_cert='NA',
+ pk_passwd='NA',
+ pac_file='NA',
+ get_live_view=False,
+ total_floors="0",
+ lf_logger_config_json=None,
+ help_summary=False
+ ):
+ args = SimpleNamespace(**locals())
+ args.mgr = self.lanforge_ip
+ args.mgr_port = int(self.port)
+ return self.run_ftp_test1(args)
+
+ def run_ftp_test1(self,args):
+ if self.dowebgui:
+ if not self.webgui_stop_check("ftp"):
+ return False
+ # 1st time stamp for test duration
+ time_stamp1 = datetime.datetime.now()
+ # use for creating ftp_test dictionary
+ interation_num = 0
+
+ # empty dictionary for whole test data
+ ftp_data = {}
+
+ # validate_args(args)
+ if args.traffic_duration.endswith('s') or args.traffic_duration.endswith('S'):
+ args.traffic_duration = int(args.traffic_duration[0:-1])
+ elif args.traffic_duration.endswith('m') or args.traffic_duration.endswith('M'):
+ args.traffic_duration = int(args.traffic_duration[0:-1]) * 60
+ elif args.traffic_duration.endswith('h') or args.traffic_duration.endswith('H'):
+ args.traffic_duration = int(args.traffic_duration[0:-1]) * 60 * 60
+ elif args.traffic_duration.endswith(''):
+ args.traffic_duration = int(args.traffic_duration)
+ ce = self.current_exec #seires
+ if ce == "parallel":
+ obj_name = "ftp_test"
+ else:
+ obj_no = 1
+ while f"ftp_test_{obj_no}" in self.ftp_obj_dict[ce]:
+ obj_no+=1
+ obj_name = f"ftp_test_{obj_no}"
+ self.ftp_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+ # For all combinations ftp_data of directions, file size and client counts, run the test
+ for band in args.bands:
+ for direction in args.directions:
+ for file_size in args.file_sizes:
+ # Start Test
+ self.ftp_obj_dict[ce][obj_name]["obj"] = FtpTest(lfclient_host=args.mgr,
+ lfclient_port=args.mgr_port,
+ result_dir=args.result_dir,
+ upstream=args.upstream_port,
+ dut_ssid=args.ssid,
+ group_name=args.group_name,
+ profile_name=args.profile_name,
+ file_name=args.file_name,
+ dut_passwd=args.passwd,
+ dut_security=args.security,
+ num_sta=args.num_stations,
+ band=band,
+ ap_name=args.ap_name,
+ file_size=file_size,
+ direction=direction,
+ twog_radio=args.twog_radio,
+ fiveg_radio=args.fiveg_radio,
+ sixg_radio=args.sixg_radio,
+ lf_username=args.lf_username,
+ lf_password=args.lf_password,
+ # duration=pass_fail_duration(band, file_size),
+ traffic_duration=args.traffic_duration,
+ ssh_port=args.ssh_port,
+ clients_type=args.clients_type,
+ dowebgui=args.dowebgui,
+ device_list=args.device_list,
+ test_name=args.test_name,
+ eap_method=args.eap_method,
+ eap_identity=args.eap_identity,
+ ieee80211=args.ieee8021x,
+ ieee80211u=args.ieee80211u,
+ ieee80211w=args.ieee80211w,
+ enable_pkc=args.enable_pkc,
+ bss_transition=args.bss_transition,
+ power_save=args.power_save,
+ disable_ofdma=args.disable_ofdma,
+ roam_ft_ds=args.roam_ft_ds,
+ key_management=args.key_management,
+ pairwise=args.pairwise,
+ private_key=args.private_key,
+ ca_cert=args.ca_cert,
+ client_cert=args.client_cert,
+ pk_passwd=args.pk_passwd,
+ pac_file=args.pac_file,
+ expected_passfail_val=args.expected_passfail_value,
+ csv_name=args.device_csv_name,
+ wait_time=args.wait_time,
+ config=args.config,
+ get_live_view= args.get_live_view,
+ total_floors = args.total_floors
+ )
+
+ interation_num = interation_num + 1
+ self.ftp_obj_dict[ce][obj_name]["obj"].file_create()
+ if args.clients_type == "Real":
+ if not isinstance(args.device_list, list):
+ self.ftp_obj_dict[ce][obj_name]["obj"].device_list = self.ftp_obj_dict[ce][obj_name]["obj"].filter_iOS_devices(args.device_list)
+ if len(self.ftp_obj_dict[ce][obj_name]["obj"].device_list) == 0:
+ logger.info("There are no devices available")
+ return False
+ configured_device, configuration = self.ftp_obj_dict[ce][obj_name]["obj"].query_realclients()
+
+ if args.dowebgui and args.group_name:
+ # If no devices are configured,update the Web UI with "Stopped" status
+ if len(configured_device) == 0:
+ logger.warning("No device is available to run the test")
+ obj1 = {
+ "status": "Stopped",
+ "configuration_status": "configured"
+ }
+ self.ftp_obj_dict[ce][obj_name]["obj"].updating_webui_runningjson(obj1)
+ return
+ # If devices are configured, update the Web UI with the list of configured devices
+ else:
+ obj1 = {
+ "configured_devices": configured_device,
+ "configuration_status": "configured"
+ }
+ self.ftp_obj_dict[ce][obj_name]["obj"].updating_webui_runningjson(obj1)
+ self.ftp_obj_dict[ce][obj_name]["obj"].set_values()
+ self.ftp_obj_dict[ce][obj_name]["obj"].precleanup()
+ self.ftp_obj_dict[ce][obj_name]["obj"].build()
+ if not self.ftp_obj_dict[ce][obj_name]["obj"].passes():
+ logger.info(self.ftp_obj_dict[ce][obj_name]["obj"].get_fail_message())
+ return False
+
+ if self.ftp_obj_dict[ce][obj_name]["obj"].clients_type == 'Real':
+ self.ftp_obj_dict[ce][obj_name]["obj"].monitor_cx()
+ logger.info(f'Test started on the devices : {self.ftp_obj_dict[ce][obj_name]["obj"].input_devices_list}')
+ # First time stamp
+ time1 = datetime.datetime.now()
+ logger.info("Traffic started running at %s", time1)
+ self.ftp_obj_dict[ce][obj_name]["obj"].start(False, False)
+ # to fetch runtime values during the execution and fill the csv.
+ if args.dowebgui or args.clients_type == "Real":
+ self.ftp_obj_dict[ce][obj_name]["obj"].monitor_for_runtime_csv()
+ self.ftp_obj_dict[ce][obj_name]["obj"].my_monitor_for_real_devices()
+ else:
+ time.sleep(args.traffic_duration)
+ self.ftp_obj_dict[ce][obj_name]["obj"].my_monitor()
+ self.ftp_obj_dict[ce][obj_name]["obj"].stop()
+ print("Traffic stopped running")
+
+ self.ftp_obj_dict[ce][obj_name]["obj"].postcleanup()
+ time2 = datetime.datetime.now()
+ logger.info("Test ended at %s", time2)
+
+ # 2nd time stamp for test duration
+ time_stamp2 = datetime.datetime.now()
+
+ # total time for test duration
+ # test_duration = str(time_stamp2 - time_stamp1)[:-7]
+
+ date = str(datetime.datetime.now()).split(",")[0].replace(" ", "-").split(".")[0]
+
+ # print(ftp_data)
+
+ input_setup_info = {
+ "AP IP": args.ap_ip,
+ "File Size": args.file_sizes,
+ "Bands": args.bands,
+ "Direction": args.directions,
+ "Stations": args.num_stations,
+ "Upstream": args.upstream_port,
+ "SSID": args.ssid,
+ "Security": args.security,
+ "Contact": "support@candelatech.com"
+ }
+ if args.dowebgui:
+ self.ftp_obj_dict[ce][obj_name]["obj"].data_for_webui["status"] = ["STOPPED"] * len(self.ftp_obj_dict[ce][obj_name]["obj"].url_data)
+
+ df1 = pd.DataFrame(self.ftp_obj_dict[ce][obj_name]["obj"].data_for_webui)
+ df1.to_csv('{}/ftp_datavalues.csv'.format(self.ftp_obj_dict[ce][obj_name]["obj"].result_dir), index=False)
+ # copying to home directory i.e home/user_name
+ # self.ftp_obj_dict[ce][obj_name]["obj"].copy_reports_to_home_dir()
+ # Report generation when groups are specified
+ if args.group_name:
+ self.ftp_obj_dict[ce][obj_name]["obj"].generate_report(ftp_data, date, input_setup_info, test_rig=args.test_rig,
+ test_tag=args.test_tag, dut_hw_version=args.dut_hw_version,
+ dut_sw_version=args.dut_sw_version, dut_model_num=args.dut_model_num,
+ dut_serial_num=args.dut_serial_num, test_id=args.test_id,
+ bands=args.bands, csv_outfile=args.csv_outfile, local_lf_report_dir=args.local_lf_report_dir, config_devices=configuration,report_path=self.result_path if not self.dowebgui else self.result_dir)
+ # Generating report without group-specific device configuration
+ else:
+ self.ftp_obj_dict[ce][obj_name]["obj"].generate_report(ftp_data, date, input_setup_info, test_rig=args.test_rig,
+ test_tag=args.test_tag, dut_hw_version=args.dut_hw_version,
+ dut_sw_version=args.dut_sw_version, dut_model_num=args.dut_model_num,
+ dut_serial_num=args.dut_serial_num, test_id=args.test_id,
+ bands=args.bands, csv_outfile=args.csv_outfile, local_lf_report_dir=args.local_lf_report_dir,report_path=self.result_path if not self.dowebgui else self.result_dir)
+
+ params = {
+ "ftp_data": ftp_data,
+ "date": date,
+ "input_setup_info": input_setup_info,
+ "test_rig": args.test_rig,
+ "test_tag": args.test_tag,
+ "dut_hw_version": args.dut_hw_version,
+ "dut_sw_version": args.dut_sw_version,
+ "dut_model_num": args.dut_model_num,
+ "dut_serial_num": args.dut_serial_num,
+ "test_id": args.test_id,
+ "bands": args.bands,
+ "csv_outfile": args.csv_outfile,
+ "local_lf_report_dir": args.local_lf_report_dir,
+ "report_path": self.result_path
+ }
+
+ if args.group_name:
+ params["config_devices"] = configuration
+ self.ftp_obj_dict[ce][obj_name]["data"] = params.copy()
+
+ # if args.group_name:
+ # config_devices = configuration
+ # else:
+ # config_devices = ""
+
+ # ftp_data = ftp_data
+ # date = date
+ # input_setup_info = input_setup_info
+ # test_rig = args.test_rig
+ # test_tag = args.test_tag
+ # dut_hw_version = args.dut_hw_version
+ # dut_sw_version = args.dut_sw_version
+ # dut_model_num = args.dut_model_num
+ # dut_serial_num = args.dut_serial_num
+ # test_id = args.test_id
+ # bands = args.bands
+ # csv_outfile = args.csv_outfile
+ # local_lf_report_dir = args.local_lf_report_dir
+ # report_path = self.result_path
+
+ # no_of_stations = ""
+ # duration = ""
+ # x_fig_size = 18
+ # y_fig_size = len(obj.real_client_list1) * .5 + 4
+
+ # if int(obj.traffic_duration) < 60:
+ # duration = str(obj.traffic_duration) + "s"
+ # elif int(obj.traffic_duration == 60) or (int(obj.traffic_duration) > 60 and int(obj.traffic_duration) < 3600):
+ # duration = str(obj.traffic_duration / 60) + "m"
+ # else:
+ # if int(obj.traffic_duration == 3600) or (int(obj.traffic_duration) > 3600):
+ # duration = str(obj.traffic_duration / 3600) + "h"
+
+ # client_list = []
+ # if obj.clients_type == "Real":
+ # client_list = obj.real_client_list1
+ # android_devices, windows_devices, linux_devices, mac_devices = 0, 0, 0, 0
+ # all_devices_names = []
+ # device_type = []
+ # total_devices = ""
+ # for i in obj.real_client_list:
+ # split_device_name = i.split(" ")
+ # if 'android' in split_device_name:
+ # all_devices_names.append(split_device_name[2] + ("(Android)"))
+ # device_type.append("Android")
+ # android_devices += 1
+ # elif 'Win' in split_device_name:
+ # all_devices_names.append(split_device_name[2] + ("(Windows)"))
+ # device_type.append("Windows")
+ # windows_devices += 1
+ # elif 'Lin' in split_device_name:
+ # all_devices_names.append(split_device_name[2] + ("(Linux)"))
+ # device_type.append("Linux")
+ # linux_devices += 1
+ # elif 'Mac' in split_device_name:
+ # all_devices_names.append(split_device_name[2] + ("(Mac)"))
+ # device_type.append("Mac")
+ # mac_devices += 1
+
+ # if android_devices > 0:
+ # total_devices += f" Android({android_devices})"
+ # if windows_devices > 0:
+ # total_devices += f" Windows({windows_devices})"
+ # if linux_devices > 0:
+ # total_devices += f" Linux({linux_devices})"
+ # if mac_devices > 0:
+ # total_devices += f" Mac({mac_devices})"
+ # else:
+ # if obj.clients_type == "Virtual":
+ # client_list = obj.station_list
+ # if 'ftp_test' not in self.test_count_dict:
+ # self.test_count_dict['ftp_test']=0
+ # self.test_count_dict['ftp_test']+=1
+ # self.overall_report.set_obj_html(_obj_title=f'FTP Test ', _obj="")
+ # self.overall_report.build_objective()
+ # self.overall_report.set_table_title("Test Setup Information")
+ # self.overall_report.build_table_title()
+
+ # if obj.clients_type == "Virtual":
+ # no_of_stations = str(len(obj.station_list))
+ # else:
+ # no_of_stations = str(len(obj.input_devices_list))
+
+ # if obj.clients_type == "Real":
+ # if config_devices == "":
+ # test_setup_info = {
+ # "AP Name": obj.ap_name,
+ # "SSID": obj.ssid,
+ # "Security": obj.security,
+ # "Device List": ", ".join(all_devices_names),
+ # "No of Devices": "Total" + f"({no_of_stations})" + total_devices,
+ # "Failed CXs": obj.failed_cx if obj.failed_cx else "NONE",
+ # "File size": obj.file_size,
+ # "File location": "/home/lanforge",
+ # "Traffic Direction": obj.direction,
+ # "Traffic Duration ": duration
+ # }
+ # else:
+ # group_names = ', '.join(config_devices.keys())
+ # profile_names = ', '.join(config_devices.values())
+ # configmap = "Groups:" + group_names + " -> Profiles:" + profile_names
+ # test_setup_info = {
+ # "AP Name": obj.ap_name,
+ # 'Configuration': configmap,
+ # "No of Devices": "Total" + f"({no_of_stations})" + total_devices,
+ # "File size": obj.file_size,
+ # "File location": "/home/lanforge",
+ # "Traffic Direction": obj.direction,
+ # "Traffic Duration ": duration
+ # }
+ # else:
+ # test_setup_info = {
+ # "AP Name": obj.ap_name,
+ # "SSID": obj.ssid,
+ # "Security": obj.security,
+ # "No of Devices": no_of_stations,
+ # "File size": obj.file_size,
+ # "File location": "/home/lanforge",
+ # "Traffic Direction": obj.direction,
+ # "Traffic Duration ": duration
+ # }
+
+ # self.overall_report.test_setup_table(value="Test Setup Information", test_setup_data=test_setup_info)
+
+ # self.overall_report.set_obj_html(
+ # _obj_title=f"No of times file {obj.direction}",
+ # _obj=f"The below graph represents number of times a file {obj.direction} for each client"
+ # f"(WiFi) traffic. X- axis shows “No of times file {obj.direction}” and Y-axis shows "
+ # f"Client names.")
+
+ # self.overall_report.build_objective()
+ # graph = lf_bar_graph_horizontal(_data_set=[obj.url_data], _xaxis_name=f"No of times file {obj.direction}",
+ # _yaxis_name="Client names",
+ # _yaxis_categories=[i for i in client_list],
+ # _yaxis_label=[i for i in client_list],
+ # _yaxis_step=1,
+ # _yticks_font=8,
+ # _yticks_rotation=None,
+ # _graph_title=f"No of times file {obj.direction} (Count)",
+ # _title_size=16,
+ # _figsize=(x_fig_size, y_fig_size),
+ # _legend_loc="best",
+ # _legend_box=(1.0, 1.0),
+ # _color_name=['orange'],
+ # _show_bar_value=True,
+ # _enable_csv=True,
+ # _graph_image_name="Total-url_ftp", _color_edge=['black'],
+ # _color=['orange'],
+ # _label=[obj.direction])
+ # graph_png = graph.build_bar_graph_horizontal()
+ # print("graph name {}".format(graph_png))
+ # self.overall_report.set_graph_image(graph_png)
+ # # need to move the graph image to the results
+ # self.overall_report.move_graph_image()
+ # self.overall_report.set_csv_filename(graph_png)
+ # self.overall_report.move_csv_file()
+ # self.overall_report.build_graph()
+ # self.overall_report.set_obj_html(
+ # _obj_title=f"Average time taken to {obj.direction} file ",
+ # _obj=f"The below graph represents average time taken to {obj.direction} for each client "
+ # f"(WiFi) traffic. X- axis shows “Average time taken to {obj.direction} a file ” and Y-axis shows "
+ # f"Client names.")
+
+ # self.overall_report.build_objective()
+ # graph = lf_bar_graph_horizontal(_data_set=[obj.uc_avg], _xaxis_name=f"Average time taken to {obj.direction} file in ms",
+ # _yaxis_name="Client names",
+ # _yaxis_categories=[i for i in client_list],
+ # _yaxis_label=[i for i in client_list],
+ # _yaxis_step=1,
+ # _yticks_font=8,
+ # _yticks_rotation=None,
+ # _graph_title=f"Average time taken to {obj.direction} file",
+ # _title_size=16,
+ # _figsize=(x_fig_size, y_fig_size),
+ # _legend_loc="best",
+ # _legend_box=(1.0, 1.0),
+ # _color_name=['steelblue'],
+ # _show_bar_value=True,
+ # _enable_csv=True,
+ # _graph_image_name="ucg-avg_ftp", _color_edge=['black'],
+ # _color=['steelblue'],
+ # _label=[obj.direction])
+ # graph_png = graph.build_bar_graph_horizontal()
+ # print("graph name {}".format(graph_png))
+ # self.overall_report.set_graph_image(graph_png)
+ # self.overall_report.move_graph_image()
+ # # need to move the graph image to the results
+ # self.overall_report.set_csv_filename(graph_png)
+ # self.overall_report.move_csv_file()
+ # self.overall_report.build_graph()
+ # if(obj.dowebgui and obj.get_live_view):
+ # for floor in range(0,int(obj.total_floors)):
+ # script_dir = os.path.dirname(os.path.abspath(__file__))
+ # throughput_image_path = os.path.join(script_dir, "heatmap_images", f"ftp_{obj.test_name}_{floor+1}.png")
+ # # rssi_image_path = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_rssi_{floor+1}.png")
+ # timeout = 60 # seconds
+ # start_time = time.time()
+
+ # while not (os.path.exists(throughput_image_path)):
+ # if time.time() - start_time > timeout:
+ # print("Timeout: Images not found within 60 seconds.")
+ # break
+ # time.sleep(1)
+ # while not os.path.exists(throughput_image_path):
+ # if os.path.exists(throughput_image_path):
+ # break
+ # # time.sleep(10)
+ # if os.path.exists(throughput_image_path):
+ # self.overall_report.set_custom_html('
')
+ # self.overall_report.build_custom()
+ # # self.overall_report.set_custom_html("Average Throughput Heatmap:
")
+ # # self.overall_report.build_custom()
+ # self.overall_report.set_custom_html(f'
')
+ # self.overall_report.build_custom()
+ # # os.remove(throughput_image_path)
+ # self.overall_report.set_obj_html("File Download Time (sec)", "The below table will provide information of "
+ # "minimum, maximum and the average time taken by clients to download a file in seconds")
+ # self.overall_report.build_objective()
+ # dataframe2 = {
+ # "Minimum": [str(round(min(obj.uc_min) / 1000, 1))],
+ # "Maximum": [str(round(max(obj.uc_max) / 1000, 1))],
+ # "Average": [str(round((sum(obj.uc_avg) / len(client_list)) / 1000, 1))]
+ # }
+ # dataframe3 = pd.DataFrame(dataframe2)
+ # self.overall_report.set_table_dataframe(dataframe3)
+ # self.overall_report.build_table()
+ # self.overall_report.set_table_title("Overall Results")
+ # self.overall_report.build_table_title()
+ # if obj.clients_type == 'Real':
+ # # Calculating the pass/fail criteria when either expected_passfail_val or csv_name is provided
+ # if obj.expected_passfail_val or obj.csv_name:
+ # obj.get_pass_fail_list(client_list)
+ # # When groups are provided a seperate table will be generated for each group using generate_dataframe
+ # if obj.group_name:
+ # for key, val in obj.group_device_map.items():
+ # if obj.expected_passfail_val or obj.csv_name:
+ # dataframe = obj.generate_dataframe(val, client_list, obj.mac_id_list, obj.channel_list, obj.ssid_list, obj.mode_list,
+ # obj.url_data, obj.test_input_list, obj.uc_avg, obj.bytes_rd, obj.rx_rate, obj.pass_fail_list)
+ # else:
+ # dataframe = obj.generate_dataframe(val, client_list, obj.mac_id_list, obj.channel_list, obj.ssid_list,
+ # obj.mode_list, obj.url_data, [], obj.uc_avg, obj.bytes_rd, obj.rx_rate, [])
+
+ # if dataframe:
+ # self.overall_report.set_obj_html("", "Group: {}".format(key))
+ # self.overall_report.build_objective()
+ # dataframe1 = pd.DataFrame(dataframe)
+ # self.overall_report.set_table_dataframe(dataframe1)
+ # self.overall_report.build_table()
+ # else:
+ # dataframe = {
+ # " Clients": client_list,
+ # " MAC ": obj.mac_id_list,
+ # " Channel": obj.channel_list,
+ # " SSID ": obj.ssid_list,
+ # " Mode": obj.mode_list,
+ # " No of times File downloaded ": obj.url_data,
+ # " Time Taken to Download file (ms)": obj.uc_avg,
+ # " Bytes-rd (Mega Bytes)": obj.bytes_rd,
+ # " RX RATE (Mbps) ": obj.rx_rate,
+ # "Failed Urls": obj.total_err
+ # }
+ # if obj.expected_passfail_val or obj.csv_name:
+ # dataframe[" Expected output "] = obj.test_input_list
+ # dataframe[" Status "] = obj.pass_fail_list
+
+ # dataframe1 = pd.DataFrame(dataframe)
+ # self.overall_report.set_table_dataframe(dataframe1)
+ # self.overall_report.build_table()
+
+ # else:
+ # dataframe = {
+ # " Clients": client_list,
+ # " MAC ": obj.mac_id_list,
+ # " Channel": obj.channel_list,
+ # " SSID ": obj.ssid_list,
+ # " Mode": obj.mode_list,
+ # " No of times File downloaded ": obj.url_data,
+ # " Time Taken to Download file (ms)": obj.uc_avg,
+ # " Bytes-rd (Mega Bytes)": obj.bytes_rd,
+ # }
+ # dataframe1 = pd.DataFrame(dataframe)
+ # self.overall_report.set_table_dataframe(dataframe1)
+ # self.overall_report.build_table()
+ # # self.overall_report.build_footer()
+ # # html_file = self.overall_report.write_html()
+ # # logger.info("returned file {}".format(html_file))
+ # # logger.info(html_file)
+ # # self.overall_report.write_pdf()
+
+ # if csv_outfile is not None:
+ # current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
+ # csv_outfile = "{}_{}-test_l4_ftp.csv".format(
+ # csv_outfile, current_time)
+ # csv_outfile = self.overall_report.file_add_path(csv_outfile)
+ # logger.info("csv output file : {}".format(csv_outfile))
+
+
+
+
+ # if args.dowebgui:
+ # obj.copy_reports_to_home_dir()
+ if self.dowebgui:
+ self.webgui_test_done("ftp")
+ return True
+
+
+ def run_qos_test(
+ self,
+ device_list=None,
+ test_name=None,
+ result_dir='',
+ upstream_port='eth1',
+ security="open",
+ ssid=None,
+ passwd='[BLANK]',
+ traffic_type=None,
+ upload=None,
+ download=None,
+ test_duration="2m",
+ ap_name="Test-AP",
+ tos=None,
+ dowebgui=False,
+ debug=False,
+ help_summary=False,
+ group_name=None,
+ profile_name=None,
+ file_name=None,
+ eap_method='DEFAULT',
+ eap_identity='',
+ ieee8021x=False,
+ ieee80211u=False,
+ ieee80211w=1,
+ enable_pkc=False,
+ bss_transition=False,
+ power_save=False,
+ disable_ofdma=False,
+ roam_ft_ds=False,
+ key_management='DEFAULT',
+ pairwise='NA',
+ private_key='NA',
+ ca_cert='NA',
+ client_cert='NA',
+ pk_passwd='NA',
+ pac_file='NA',
+ expected_passfail_value=None,
+ device_csv_name=None,
+ wait_time=60,
+ config=False,
+ get_live_view=False,
+ total_floors="0"
+ ):
+ if self.dowebgui:
+ if not self.webgui_stop_check("qos"):
+ return False
+ test_results = {'test_results': []}
+ loads = {}
+ data = {}
+ if download and upload:
+ loads = {'upload': str(upload).split(","), 'download': str(download).split(",")}
+ loads_data = loads["download"]
+ elif download:
+ loads = {'upload': [], 'download': str(download).split(",")}
+ for i in range(len(download)):
+ loads['upload'].append(0)
+ loads_data = loads["download"]
+ else:
+ if upload:
+ loads = {'upload': str(upload).split(","), 'download': []}
+ for i in range(len(upload)):
+ loads['download'].append(0)
+ loads_data = loads["upload"]
+ if download and upload:
+ direction = 'L3_' + traffic_type.split('_')[1].upper() + '_BiDi'
+ elif upload:
+ direction = 'L3_' + traffic_type.split('_')[1].upper() + '_UL'
+ else:
+ direction = 'L3_' + traffic_type.split('_')[1].upper() + '_DL'
+
+ # validate_args(args)
+ if test_duration.endswith('s') or test_duration.endswith('S'):
+ test_duration = int(test_duration[0:-1])
+ elif test_duration.endswith('m') or test_duration.endswith('M'):
+ test_duration = int(test_duration[0:-1]) * 60
+ elif test_duration.endswith('h') or test_duration.endswith('H'):
+ test_duration = int(test_duration[0:-1]) * 60 * 60
+ elif test_duration.endswith(''):
+ test_duration = int(test_duration)
+ ce = self.current_exec #seires
+ if ce == "parallel":
+ obj_name = "qos_test"
+ else:
+ obj_no = 1
+ while f"qos_test_{obj_no}" in self.qos_obj_dict[ce]:
+ obj_no+=1
+ obj_name = f"qos_test_{obj_no}"
+ self.qos_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+ for index in range(len(loads_data)):
+ self.qos_obj_dict[ce][obj_name]["obj"] = qos_test.ThroughputQOS(host=self.lanforge_ip,
+ ip=self.lanforge_ip,
+ port=self.port,
+ number_template="0000",
+ ap_name=ap_name,
+ name_prefix="TOS-",
+ upstream=upstream_port,
+ ssid=ssid,
+ password=passwd,
+ security=security,
+ test_duration=test_duration,
+ use_ht160=False,
+ side_a_min_rate=int(loads['upload'][index]),
+ side_b_min_rate=int(loads['download'][index]),
+ traffic_type=traffic_type,
+ tos=tos,
+ csv_direction=direction,
+ dowebgui=dowebgui,
+ test_name=test_name,
+ result_dir=result_dir,
+ device_list=device_list,
+ _debug_on=debug,
+ group_name=group_name,
+ profile_name=profile_name,
+ file_name=file_name,
+ eap_method=eap_method,
+ eap_identity=eap_identity,
+ ieee80211=ieee8021x,
+ ieee80211u=ieee80211u,
+ ieee80211w=ieee80211w,
+ enable_pkc=enable_pkc,
+ bss_transition=bss_transition,
+ power_save=power_save,
+ disable_ofdma=disable_ofdma,
+ roam_ft_ds=roam_ft_ds,
+ key_management=key_management,
+ pairwise=pairwise,
+ private_key=private_key,
+ ca_cert=ca_cert,
+ client_cert=client_cert,
+ pk_passwd=pk_passwd,
+ pac_file=pac_file,
+ expected_passfail_val=expected_passfail_value,
+ csv_name=device_csv_name,
+ wait_time=wait_time,
+ config=config,
+ get_live_view=get_live_view,
+ total_floors=total_floors
+ )
+ self.qos_obj_dict[ce][obj_name]["obj"].os_type()
+ _, configured_device, _, configuration = self.qos_obj_dict[ce][obj_name]["obj"].phantom_check()
+ if dowebgui and group_name:
+ if len(configured_device) == 0:
+ logger.warning("No device is available to run the test")
+ obj1 = {
+ "status": "Stopped",
+ "configuration_status": "configured"
+ }
+ self.qos_obj_dict[ce][obj_name]["obj"].updating_webui_runningjson(obj1)
+ return
+ else:
+ obj1 = {
+ "configured_devices": configured_device,
+ "configuration_status": "configured"
+ }
+ self.qos_obj_dict[ce][obj_name]["obj"].updating_webui_runningjson(obj1)
+ # checking if we have atleast one device available for running test
+ if self.qos_obj_dict[ce][obj_name]["obj"].dowebgui == "True":
+ if self.qos_obj_dict[ce][obj_name]["obj"].device_found is False:
+ logger.warning("No Device is available to run the test hence aborting the test")
+ df1 = pd.DataFrame([{
+ "BE_dl": 0,
+ "BE_ul": 0,
+ "BK_dl": 0,
+ "BK_ul": 0,
+ "VI_dl": 0,
+ "VI_ul": 0,
+ "VO_dl": 0,
+ "VO_ul": 0,
+ "timestamp": datetime.datetime.now().strftime('%H:%M:%S'),
+ 'status': 'Stopped'
+ }]
+ )
+ df1.to_csv('{}/overall_throughput.csv'.format(self.qos_obj_dict[ce][obj_name]["obj"].result_dir), index=False)
+ raise ValueError("Aborting the test....")
+ self.qos_obj_dict[ce][obj_name]["obj"].build()
+ self.qos_obj_dict[ce][obj_name]["obj"].monitor_cx()
+ self.qos_obj_dict[ce][obj_name]["obj"].start(False, False)
+ time.sleep(10)
+ connections_download, connections_upload, drop_a_per, drop_b_per, connections_download_avg, connections_upload_avg, avg_drop_a, avg_drop_b = self.qos_obj_dict[ce][obj_name]["obj"].monitor()
+ logger.info("connections download {}".format(connections_download))
+ logger.info("connections upload {}".format(connections_upload))
+ self.qos_obj_dict[ce][obj_name]["obj"].stop()
+ time.sleep(5)
+ test_results['test_results'].append(self.qos_obj_dict[ce][obj_name]["obj"].evaluate_qos(connections_download, connections_upload, drop_a_per, drop_b_per))
+ data.update(test_results)
+ test_end_time = datetime.datetime.now().strftime("%Y %d %H:%M:%S")
+ print("Test ended at: ", test_end_time)
+
+ input_setup_info = {
+ "contact": "support@candelatech.com"
+ }
+ self.qos_obj_dict[ce][obj_name]["obj"].cleanup()
+
+ # Update webgui running json with latest entry and test status completed
+ if self.qos_obj_dict[ce][obj_name]["obj"].dowebgui == "True":
+ last_entry = self.qos_obj_dict[ce][obj_name]["obj"].overall[len(self.qos_obj_dict[ce][obj_name]["obj"].overall) - 1]
+ last_entry["status"] = "Stopped"
+ last_entry["timestamp"] = datetime.datetime.now().strftime("%d/%m %I:%M:%S %p")
+ last_entry["remaining_time"] = "0"
+ last_entry["end_time"] = last_entry["timestamp"]
+ self.qos_obj_dict[ce][obj_name]["obj"].df_for_webui.append(
+ last_entry
+ )
+ df1 = pd.DataFrame(self.qos_obj_dict[ce][obj_name]["obj"].df_for_webui)
+ df1.to_csv('{}/overall_throughput.csv'.format(result_dir, ), index=False)
+
+ # copying to home directory i.e home/user_name
+ self.qos_obj_dict[ce][obj_name]["obj"].copy_reports_to_home_dir()
+ if group_name:
+ self.qos_obj_dict[ce][obj_name]["obj"].generate_report(
+ data=data,
+ input_setup_info=input_setup_info,
+ report_path=self.qos_obj_dict[ce][obj_name]["obj"].result_dir if self.qos_obj_dict[ce][obj_name]["obj"].dowebgui else self.result_path,
+ connections_upload_avg=connections_upload_avg,
+ connections_download_avg=connections_download_avg,
+ avg_drop_a=avg_drop_a,
+ avg_drop_b=avg_drop_b, config_devices=configuration)
+ else:
+ self.qos_obj_dict[ce][obj_name]["obj"].generate_report(
+ data=data,
+ input_setup_info=input_setup_info,
+ report_path=self.qos_obj_dict[ce][obj_name]["obj"].result_dir if self.qos_obj_dict[ce][obj_name]["obj"].dowebgui else self.result_path,
+ connections_upload_avg=connections_upload_avg,
+ connections_download_avg=connections_download_avg,
+ avg_drop_a=avg_drop_a,
+ avg_drop_b=avg_drop_b)
+ params = {
+ "data": None,
+ "input_setup_info": None,
+ "connections_download_avg": None,
+ "connections_upload_avg": None,
+ "avg_drop_a": None,
+ "avg_drop_b": None,
+ "report_path": "",
+ "result_dir_name": "Qos_Test_report",
+ "selected_real_clients_names": None,
+ "config_devices": ""
+ }
+
+ params.update({
+ "data": data,
+ "input_setup_info": input_setup_info,
+ "report_path": (
+ self.qos_obj_dict[ce][obj_name]["obj"].result_dir
+ if self.qos_obj_dict[ce][obj_name]["obj"].dowebgui else self.result_path
+ ),
+ "connections_upload_avg": connections_upload_avg,
+ "connections_download_avg": connections_download_avg,
+ "avg_drop_a": avg_drop_a,
+ "avg_drop_b": avg_drop_b
+ })
+
+ if group_name:
+ params["config_devices"] = configuration
+ self.qos_obj_dict[ce][obj_name]["data"] = params.copy()
+ if self.dowebgui:
+ self.webgui_test_done("qos")
+ return True
+
+ def run_vs_test(self,args):
+ if self.dowebgui:
+ if not self.webgui_stop_check("vs"):
+ return False
+ media_source_dict = {
+ 'dash': '1',
+ 'smooth_streaming': '2',
+ 'hls': '3',
+ 'progressive': '4',
+ 'rtsp': '5'
+ }
+ media_quality_dict = {
+ '4k': '0',
+ '8k': '1',
+ '1080p': '2',
+ '720p': '3',
+ '360p': '4'
+ }
+
+ if args.file_name:
+ args.file_name = args.file_name.removesuffix('.csv')
+
+ media_source, media_quality = args.media_source.capitalize(), args.media_quality
+ args.media_source = args.media_source.lower()
+ args.media_quality = args.media_quality.lower()
+
+ if any(char.isalpha() for char in args.media_source):
+ args.media_source = media_source_dict[args.media_source]
+
+ if any(char.isalpha() for char in args.media_quality):
+ args.media_quality = media_quality_dict[args.media_quality]
+
+ logger_config = lf_logger_config.lf_logger_config()
+
+ if args.log_level:
+ logger_config.set_level(level=args.log_level)
+
+ if args.lf_logger_config_json:
+ logger_config.lf_logger_config_json = args.lf_logger_config_json
+ logger_config.load_lf_logger_config()
+
+ logger = logging.getLogger(__name__)
+ ce = self.current_exec #seires
+ if ce == "parallel":
+ obj_name = "vs_test"
+ else:
+ obj_no = 1
+ while f"vs_test_{obj_no}" in self.vs_obj_dict[ce]:
+ obj_no+=1
+ obj_name = f"vs_test_{obj_no}"
+ self.vs_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+ self.vs_obj_dict[ce][obj_name]["obj"] = VideoStreamingTest(host=args.host, ssid=args.ssid, passwd=args.passwd, encryp=args.encryp,
+ suporrted_release=["7.0", "10", "11", "12"], max_speed=args.max_speed,
+ url=args.url, urls_per_tenm=args.urls_per_tenm, duration=args.duration,
+ resource_ids=args.device_list, dowebgui=args.dowebgui, media_quality=args.media_quality, media_source=args.media_source,
+ result_dir=args.result_dir, test_name=args.test_name, incremental=args.incremental, postcleanup=args.postcleanup,
+ precleanup=args.precleanup,
+ pass_fail_val=args.expected_passfail_value,
+ csv_name=args.device_csv_name,
+ groups=args.group_name,
+ profiles=args.profile_name,
+ config=args.config,
+ file_name=args.file_name,
+ floors=args.floors,
+ get_live_view=args.get_live_view
+ )
+ args.upstream_port = self.vs_obj_dict[ce][obj_name]["obj"].change_port_to_ip(args.upstream_port)
+ self.vs_obj_dict[ce][obj_name]["obj"].validate_args()
+ config_obj = DeviceConfig.DeviceConfig(lanforge_ip=args.host, file_name=args.file_name)
+ # if not args.expected_passfail_value and args.device_csv_name is None:
+ # config_obj.device_csv_file(csv_name="device.csv")
+
+ resource_ids_sm = []
+ resource_set = set()
+ resource_list = []
+ resource_ids_generated = ""
+
+ if args.group_name and args.file_name and args.profile_name:
+ selected_groups = args.group_name.split(',')
+ selected_profiles = args.profile_name.split(',')
+ config_devices = {}
+ for i in range(len(selected_groups)):
+ config_devices[selected_groups[i]] = selected_profiles[i]
+ config_obj.initiate_group()
+ asyncio.run(config_obj.connectivity(config_devices, upstream=args.upstream_port))
+
+ adbresponse = config_obj.adb_obj.get_devices()
+ resource_manager = config_obj.laptop_obj.get_devices()
+ all_res = {}
+ df1 = config_obj.display_groups(config_obj.groups)
+ groups_list = df1.to_dict(orient='list')
+ group_devices = {}
+ for adb in adbresponse:
+ group_devices[adb['serial']] = adb['eid']
+ for res in resource_manager:
+ all_res[res['hostname']] = res['shelf'] + '.' + res['resource']
+ eid_list = []
+ for grp_name in groups_list.keys():
+ for g_name in selected_groups:
+ if grp_name == g_name:
+ for j in groups_list[grp_name]:
+ if j in group_devices.keys():
+ eid_list.append(group_devices[j])
+ elif j in all_res.keys():
+ eid_list.append(all_res[j])
+ args.device_list = ",".join(id for id in eid_list)
+ else:
+ # When group/profile are not provided
+ config_dict = {
+ 'ssid': args.ssid,
+ 'passwd': args.passwd,
+ 'enc': args.encryp,
+ 'eap_method': args.eap_method,
+ 'eap_identity': args.eap_identity,
+ 'ieee80211': args.ieee8021x,
+ 'ieee80211u': args.ieee80211u,
+ 'ieee80211w': args.ieee80211w,
+ 'enable_pkc': args.enable_pkc,
+ 'bss_transition': args.bss_transition,
+ 'power_save': args.power_save,
+ 'disable_ofdma': args.disable_ofdma,
+ 'roam_ft_ds': args.roam_ft_ds,
+ 'key_management': args.key_management,
+ 'pairwise': args.pairwise,
+ 'private_key': args.private_key,
+ 'ca_cert': args.ca_cert,
+ 'client_cert': args.client_cert,
+ 'pk_passwd': args.pk_passwd,
+ 'pac_file': args.pac_file,
+ 'server_ip': args.upstream_port
+ }
+ if args.device_list:
+ all_devices = config_obj.get_all_devices()
+ if args.group_name is None and args.file_name is None and args.profile_name is None:
+ dev_list = args.device_list.split(',')
+ if args.config:
+ asyncio.run(config_obj.connectivity(device_list=dev_list, wifi_config=config_dict))
+ else:
+ if args.config:
+ all_devices = config_obj.get_all_devices()
+ device_list = []
+ for device in all_devices:
+ if device["type"] != 'laptop':
+ device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["serial"])
+ elif device["type"] == 'laptop':
+ device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["hostname"])
+ print("Available devices:")
+ for device in device_list:
+ print(device)
+ args.device_list = input("Enter the desired resources to run the test:")
+ dev1_list = args.device_list.split(',')
+ asyncio.run(config_obj.connectivity(device_list=dev1_list, wifi_config=config_dict))
+ else:
+ self.vs_obj_dict[ce][obj_name]["obj"].android_devices = self.vs_obj_dict[ce][obj_name]["obj"].devices.get_devices(only_androids=True)
+ selected_devices, report_labels, selected_macs = self.vs_obj_dict[ce][obj_name]["obj"].devices.query_user()
+ if not selected_devices:
+ logging.info("devices donot exist..!!")
+ return
+
+ self.vs_obj_dict[ce][obj_name]["obj"].android_list = selected_devices
+ # Verify if all resource IDs are valid for Android devices
+ if self.vs_obj_dict[ce][obj_name]["obj"].android_list:
+ resource_ids = ",".join([item.split(".")[1] for item in self.vs_obj_dict[ce][obj_name]["obj"].android_list])
+
+ num_list = list(map(int, resource_ids.split(',')))
+
+ # Sort the list
+ num_list.sort()
+
+ # Join the sorted list back into a string
+ sorted_string = ','.join(map(str, num_list))
+
+ self.vs_obj_dict[ce][obj_name]["obj"].resource_ids = sorted_string
+ resource_ids1 = list(map(int, sorted_string.split(',')))
+ modified_list = list(map(lambda item: int(item.split('.')[1]), self.vs_obj_dict[ce][obj_name]["obj"].android_devices))
+ if not all(x in modified_list for x in resource_ids1):
+ logging.info("Verify Resource ids, as few are invalid...!!")
+ return False
+ resource_ids_sm = self.vs_obj_dict[ce][obj_name]["obj"].resource_ids
+ resource_list = resource_ids_sm.split(',')
+ resource_set = set(resource_list)
+ resource_list_sorted = sorted(resource_set)
+ resource_ids_generated = ','.join(resource_list_sorted)
+ available_resources = list(resource_set)
+
+ if args.dowebgui:
+ resource_ids_sm = args.device_list.split(',')
+ resource_set = set(resource_ids_sm)
+ resource_list = sorted(resource_set)
+ resource_ids_generated = ','.join(resource_list)
+ resource_list_sorted = resource_list
+ selected_devices, report_labels, selected_macs = self.vs_obj_dict[ce][obj_name]["obj"].devices.query_user(dowebgui=args.dowebgui, device_list=resource_ids_generated)
+ self.vs_obj_dict[ce][obj_name]["obj"].resource_ids = ",".join(id.split(".")[1] for id in args.device_list.split(","))
+ available_resources = [int(num) for num in self.vs_obj_dict[ce][obj_name]["obj"].resource_ids.split(',')]
+ else:
+ self.vs_obj_dict[ce][obj_name]["obj"].android_devices = self.vs_obj_dict[ce][obj_name]["obj"].devices.get_devices(only_androids=True)
+ if args.device_list:
+ device_list = args.device_list.split(',')
+ # Extract resource IDs (after the dot), remove duplicates, and sort them
+ resource_ids = sorted(set(int(item.split('.')[1]) for item in device_list if '.' in item))
+ resource_list_sorted = resource_ids
+ self.vs_obj_dict[ce][obj_name]["obj"].resource_ids = ','.join(map(str, resource_ids))
+ # Create a set of Android device IDs (e.g., "resource.123")
+ android_device_ids = set(self.vs_obj_dict[ce][obj_name]["obj"].android_devices)
+ android_device_short_ids = {device.split('.')[0] + '.' + device.split('.')[1] for device in android_device_ids}
+ self.vs_obj_dict[ce][obj_name]["obj"].android_list = [dev for dev in android_device_short_ids if dev in device_list]
+ # Log any devices in the list that are not available
+ for dev in device_list:
+ if dev not in android_device_short_ids:
+ logger.info(f"{dev} device is not available")
+ # Final list of available Android resource IDs
+ available_resources = sorted(set(int(dev.split('.')[1]) for dev in self.vs_obj_dict[ce][obj_name]["obj"].android_list))
+ logger.info(f"Available devices: {available_resources}")
+ if len(available_resources) != 0:
+ available_resources = self.vs_obj_dict[ce][obj_name]["obj"].filter_ios_devices(available_resources)
+ if len(available_resources) == 0:
+ logger.info("No devices which are selected are available in the lanforge")
+ return False
+ gave_incremental = False
+ if args.incremental and not args.webgui_incremental:
+ if self.vs_obj_dict[ce][obj_name]["obj"].resource_ids:
+ logging.info("The total available devices are {}".format(len(available_resources)))
+ self.vs_obj_dict[ce][obj_name]["obj"].incremental = input('Specify incremental values as 1,2,3 : ')
+ self.vs_obj_dict[ce][obj_name]["obj"].incremental = [int(x) for x in self.vs_obj_dict[ce][obj_name]["obj"].incremental.split(',')]
+ else:
+ logging.info("incremental Values are not needed as Android devices are not selected..")
+ elif not args.incremental:
+ gave_incremental = True
+ self.vs_obj_dict[ce][obj_name]["obj"].incremental = [len(available_resources)]
+
+ if args.webgui_incremental:
+ incremental = [int(x) for x in args.webgui_incremental.split(',')]
+ if (len(args.webgui_incremental) == 1 and incremental[0] != len(resource_list_sorted)) or (len(args.webgui_incremental) > 1):
+ self.vs_obj_dict[ce][obj_name]["obj"].incremental = incremental
+
+ if self.vs_obj_dict[ce][obj_name]["obj"].incremental and self.vs_obj_dict[ce][obj_name]["obj"].resource_ids:
+ if self.vs_obj_dict[ce][obj_name]["obj"].incremental[-1] > len(available_resources):
+ logging.info("Exiting the program as incremental values are greater than the resource ids provided")
+ return False
+ elif self.vs_obj_dict[ce][obj_name]["obj"].incremental[-1] < len(available_resources) and len(self.vs_obj_dict[ce][obj_name]["obj"].incremental) > 1:
+ logging.info("Exiting the program as the last incremental value must be equal to selected devices")
+ return False
+
+ # To create cx for selected devices
+ self.vs_obj_dict[ce][obj_name]["obj"].build()
+
+ # To set media source and media quality
+ time.sleep(10)
+
+ # self.vs_obj_dict[ce][obj_name]["obj"].run
+ test_time = datetime.datetime.now()
+ test_time = test_time.strftime("%b %d %H:%M:%S")
+
+ logging.info("Initiating Test...")
+
+ individual_dataframe_columns = []
+
+ keys = list(self.vs_obj_dict[ce][obj_name]["obj"].http_profile.created_cx.keys())
+
+ # Extend individual_dataframe_column with dynamically generated column names
+ for i in range(len(keys)):
+ individual_dataframe_columns.extend([
+ f'video_format_bitrate_{keys[i]}',
+ f'total_wait_time_{keys[i]}',
+ f'total_urls_{keys[i]}',
+ f'RSSI_{keys[i]}',
+ f'Link Speed_{keys[i]}',
+ f'Total Buffer_{keys[i]}',
+ f'Total Errors_{keys[i]}',
+ f'Min_Video_Rate_{keys[i]}',
+ f'Max_Video_Rate_{keys[i]}',
+ f'Avg_Video_Rate_{keys[i]}',
+ f'bytes_rd_{keys[i]}',
+ f'rx rate_{keys[i]} bps',
+ f'frame_rate_{keys[i]}',
+ f'Video Quality_{keys[i]}'
+ ])
+
+ individual_dataframe_columns.extend(['overall_video_format_bitrate', 'timestamp', 'iteration', 'start_time', 'end_time', 'remaining_Time', 'status'])
+ individual_df = pd.DataFrame(columns=individual_dataframe_columns)
+
+ cx_order_list = []
+ index = 0
+ file_path = ""
+
+ # Parsing test_duration
+ if args.duration.endswith('s') or args.duration.endswith('S'):
+ args.duration = round(int(args.duration[0:-1]) / 60, 2)
+
+ elif args.duration.endswith('m') or args.duration.endswith('M'):
+ args.duration = int(args.duration[0:-1])
+
+ elif args.duration.endswith('h') or args.duration.endswith('H'):
+ args.duration = int(args.duration[0:-1]) * 60
+
+ elif args.duration.endswith(''):
+ args.duration = int(args.duration)
+
+ incremental_capacity_list_values = self.vs_obj_dict[ce][obj_name]["obj"].get_incremental_capacity_list()
+ if incremental_capacity_list_values[-1] != len(available_resources):
+ logger.error("Incremental capacity doesnt match available devices")
+ if args.postcleanup:
+ self.vs_obj_dict[ce][obj_name]["obj"].postcleanup()
+ return False
+ # Process resource IDs and incremental values if specified
+ if self.vs_obj_dict[ce][obj_name]["obj"].resource_ids:
+ if self.vs_obj_dict[ce][obj_name]["obj"].incremental:
+ test_setup_info_incremental_values = ','.join([str(n) for n in incremental_capacity_list_values])
+ if len(self.vs_obj_dict[ce][obj_name]["obj"].incremental) == len(available_resources):
+ test_setup_info_total_duration = args.duration
+ elif len(self.vs_obj_dict[ce][obj_name]["obj"].incremental) == 1 and len(available_resources) > 1:
+ if self.vs_obj_dict[ce][obj_name]["obj"].incremental[0] == len(available_resources):
+ test_setup_info_total_duration = args.duration
+ else:
+ div = len(available_resources) // self.vs_obj_dict[ce][obj_name]["obj"].incremental[0]
+ mod = len(available_resources) % self.vs_obj_dict[ce][obj_name]["obj"].incremental[0]
+ if mod == 0:
+ test_setup_info_total_duration = args.duration * (div)
+ else:
+ test_setup_info_total_duration = args.duration * (div + 1)
+ else:
+ test_setup_info_total_duration = args.duration * len(incremental_capacity_list_values)
+ else:
+ test_setup_info_total_duration = args.duration
+
+ if args.webgui_incremental:
+ test_setup_info_incremental_values = ','.join([str(n) for n in incremental_capacity_list_values])
+ elif gave_incremental:
+ test_setup_info_incremental_values = "No Incremental Value provided"
+ self.vs_obj_dict[ce][obj_name]["obj"].total_duration = test_setup_info_total_duration
+
+ actual_start_time = datetime.datetime.now()
+
+ iterations_before_test_stopped_by_user = []
+
+ # Calculate and manage cx_order_list ( list of cross connections to run ) based on incremental values
+ if self.vs_obj_dict[ce][obj_name]["obj"].resource_ids:
+ # Check if incremental is specified
+ if self.vs_obj_dict[ce][obj_name]["obj"].incremental:
+
+ # Case 1: Incremental list has only one value and it equals the length of keys
+ if len(self.vs_obj_dict[ce][obj_name]["obj"].incremental) == 1 and self.vs_obj_dict[ce][obj_name]["obj"].incremental[0] == len(keys):
+ cx_order_list.append(keys[index:])
+
+ # Case 2: Incremental list has only one value but length of keys is greater than 1
+ elif len(self.vs_obj_dict[ce][obj_name]["obj"].incremental) == 1 and len(keys) > 1:
+ incremental_value = self.vs_obj_dict[ce][obj_name]["obj"].incremental[0]
+ max_index = len(keys)
+ index = 0
+
+ while index < max_index:
+ next_index = min(index + incremental_value, max_index)
+ cx_order_list.append(keys[index:next_index])
+ index = next_index
+
+ # Case 3: Incremental list has multiple values and length of keys is greater than 1
+ elif len(self.vs_obj_dict[ce][obj_name]["obj"].incremental) != 1 and len(keys) > 1:
+
+ index = 0
+ for num in self.vs_obj_dict[ce][obj_name]["obj"].incremental:
+
+ cx_order_list.append(keys[index: num])
+ index = num
+
+ if index < len(keys):
+ cx_order_list.append(keys[index:])
+
+ # Iterate over cx_order_list to start tests incrementally
+ for i in range(len(cx_order_list)):
+ if i == 0:
+ self.vs_obj_dict[ce][obj_name]["obj"].data["start_time_webGUI"] = [datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')]
+ end_time_webGUI = (datetime.datetime.now() + datetime.timedelta(minutes=self.vs_obj_dict[ce][obj_name]["obj"].total_duration)).strftime('%Y-%m-%d %H:%M:%S')
+ self.vs_obj_dict[ce][obj_name]["obj"].data['end_time_webGUI'] = [end_time_webGUI]
+
+ # time.sleep(10)
+
+ # Start specific devices based on incremental capacity
+ self.vs_obj_dict[ce][obj_name]["obj"].start_specific(cx_order_list[i])
+ if cx_order_list[i]:
+ logging.info("Test started on Devices with resource Ids : {selected}".format(selected=cx_order_list[i]))
+ else:
+ logging.info("Test started on Devices with resource Ids : {selected}".format(selected=cx_order_list[i]))
+ file_path = "video_streaming_realtime_data.csv"
+ if end_time_webGUI < datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'):
+ self.vs_obj_dict[ce][obj_name]["obj"].data['remaining_time_webGUI'] = ['0:00']
+ else:
+ date_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
+ self.vs_obj_dict[ce][obj_name]["obj"].data['remaining_time_webGUI'] = [datetime.datetime.strptime(end_time_webGUI, "%Y-%m-%d %H:%M:%S") - datetime.datetime.strptime(date_time, "%Y-%m-%d %H:%M:%S")]
+
+ if args.dowebgui:
+ file_path = os.path.join(self.vs_obj_dict[ce][obj_name]["obj"].result_dir, "../../Running_instances/{}_{}_running.json".format(self.vs_obj_dict[ce][obj_name]["obj"].host, self.vs_obj_dict[ce][obj_name]["obj"].test_name))
+ if os.path.exists(file_path):
+ with open(file_path, 'r') as file:
+ data = json.load(file)
+ if data["status"] != "Running":
+ break
+ test_stopped_by_user = self.vs_obj_dict[ce][obj_name]["obj"].monitor_for_runtime_csv(args.duration, file_path, individual_df, i, actual_start_time, resource_list_sorted, cx_order_list[i])
+ else:
+ test_stopped_by_user = self.vs_obj_dict[ce][obj_name]["obj"].monitor_for_runtime_csv(args.duration, file_path, individual_df, i, actual_start_time, resource_list_sorted, cx_order_list[i])
+ if not test_stopped_by_user:
+ # Append current iteration index to iterations_before_test_stopped_by_user
+ iterations_before_test_stopped_by_user.append(i)
+ else:
+ # Append current iteration index to iterations_before_test_stopped_by_user
+ iterations_before_test_stopped_by_user.append(i)
+ break
+ self.vs_obj_dict[ce][obj_name]["obj"].stop()
+
+ if self.vs_obj_dict[ce][obj_name]["obj"].resource_ids:
+
+ date = str(datetime.datetime.now()).split(",")[0].replace(" ", "-").split(".")[0]
+ username = []
+
+ try:
+ eid_data = self.vs_obj_dict[ce][obj_name]["obj"].json_get("ports?fields=alias,mac,mode,Parent Dev,rx-rate,tx-rate,ssid,signal")
+ except KeyError:
+ logger.error("Error: 'interfaces' key not found in port data")
+ return False
+
+ resource_ids = list(map(int, self.vs_obj_dict[ce][obj_name]["obj"].resource_ids.split(',')))
+ for alias in eid_data["interfaces"]:
+ for i in alias:
+ if int(i.split(".")[1]) > 1 and alias[i]["alias"] == 'wlan0':
+ resource_hw_data = self.vs_obj_dict[ce][obj_name]["obj"].json_get("/resource/" + i.split(".")[0] + "/" + i.split(".")[1])
+ hw_version = resource_hw_data['resource']['hw version']
+ if not hw_version.startswith(('Win', 'Linux', 'Apple')) and int(resource_hw_data['resource']['eid'].split('.')[1]) in resource_ids:
+ username.append(resource_hw_data['resource']['user'])
+
+ device_list_str = ','.join([f"{name} ( Android )" for name in username])
+
+ test_setup_info = {
+ "Testname": args.test_name,
+ "Device List": device_list_str,
+ "No of Devices": "Total" + "( " + str(len(keys)) + " ): Android(" + str(len(keys)) + ")",
+ "Incremental Values": "",
+ "URL": args.url,
+ "Media Source": media_source.upper(),
+ "Media Quality": media_quality
+ }
+ test_setup_info['Incremental Values'] = test_setup_info_incremental_values
+ test_setup_info['Total Duration (min)'] = str(test_setup_info_total_duration)
+
+ logging.info("Test Completed")
+
+ # prev_inc_value = 0
+ if self.vs_obj_dict[ce][obj_name]["obj"].resource_ids and self.vs_obj_dict[ce][obj_name]["obj"].incremental:
+ self.vs_obj_dict[ce][obj_name]["obj"].generate_report(date, list(set(iterations_before_test_stopped_by_user)), test_setup_info=test_setup_info, realtime_dataset=individual_df, cx_order_list=cx_order_list,report_path=self.result_path if not self.dowebgui else self.result_dir)
+ elif self.vs_obj_dict[ce][obj_name]["obj"].resource_ids:
+ self.vs_obj_dict[ce][obj_name]["obj"].generate_report(date, list(set(iterations_before_test_stopped_by_user)), test_setup_info=test_setup_info, realtime_dataset=individual_df,report_path=self.result_path if not self.dowebgui else self.result_dir)
+
+ params = {
+ "date": None,
+ "iterations_before_test_stopped_by_user": None,
+ "test_setup_info": None,
+ "realtime_dataset": None,
+ "report_path": "",
+ "cx_order_list": []
+ }
+ if self.vs_obj_dict[ce][obj_name]["obj"].resource_ids and self.vs_obj_dict[ce][obj_name]["obj"].incremental:
+ params.update({
+ "date": date,
+ "iterations_before_test_stopped_by_user": list(set(iterations_before_test_stopped_by_user)),
+ "test_setup_info": test_setup_info,
+ "realtime_dataset": individual_df,
+ "report_path": self.result_path,
+ "cx_order_list": cx_order_list
+ })
+ elif self.vs_obj_dict[ce][obj_name]["obj"].resource_ids:
+ params.update({
+ "date": date,
+ "iterations_before_test_stopped_by_user": list(set(iterations_before_test_stopped_by_user)),
+ "test_setup_info": test_setup_info,
+ "realtime_dataset": individual_df,
+ "report_path": self.result_path
+ })
+ self.vs_obj_dict[ce][obj_name]["data"] = params.copy()
+ # Perform post-cleanup operations
+ if args.postcleanup:
+ self.vs_obj_dict[ce][obj_name]["obj"].postcleanup()
+
+ if args.dowebgui:
+ self.vs_obj_dict[ce][obj_name]["obj"].copy_reports_to_home_dir()
+ self.webgui_test_done("vs")
+ return True
+
+
+ def run_vs_test1(
+ self,
+ ssid=None,
+ passwd="something",
+ encryp="psk",
+ url="www.google.com",
+ max_speed=0,
+ urls_per_tenm=100,
+ duration=None,
+ test_name="video_streaming_test",
+ dowebgui=False,
+ result_dir='',
+ lf_logger_config_json=None,
+ log_level=None,
+ debug=False,
+ media_source='1',
+ media_quality='0',
+ device_list=None,
+ webgui_incremental=None,
+ incremental=False,
+ no_laptops=True,
+ postcleanup=False,
+ precleanup=False,
+ help_summary=False,
+ group_name=None,
+ profile_name=None,
+ file_name=None,
+ eap_method='DEFAULT',
+ eap_identity='DEFAULT',
+ ieee8021x=False,
+ ieee80211u=False,
+ ieee80211w=1,
+ enable_pkc=False,
+ bss_transition=False,
+ power_save=False,
+ disable_ofdma=False,
+ roam_ft_ds=False,
+ key_management='DEFAULT',
+ pairwise='NA',
+ private_key='NA',
+ ca_cert='NA',
+ client_cert='NA',
+ pk_passwd='NA',
+ pac_file='NA',
+ upstream_port='NA',
+ expected_passfail_value=None,
+ csv_name=None,
+ wait_time=60,
+ config=False,
+ device_csv_name=None,
+ get_live_view=False,
+ floors=0
+ ):
+ args = SimpleNamespace(**locals())
+ args.host = self.lanforge_ip
+ return self.run_vs_test(args)
+
+ def run_throughput_test(
+ self,
+ device_list=[],
+ upstream_port='eth1',
+ ssid=None,
+ passwd='[BLANK]',
+ traffic_type=None,
+ upload='2560',
+ download='2560',
+ test_duration='',
+ report_timer='1s',
+ ap_name='Test-AP',
+ dowebgui=False,
+ tos='Best_Efforts',
+ packet_size='-1',
+ incremental_capacity=[],
+ load_type='wc_per_client_load',
+ do_interopability=False,
+ postcleanup=False,
+ precleanup=False,
+ incremental=False,
+ security='open',
+ test_name=None,
+ result_dir='',
+ get_live_view=False,
+ total_floors='0',
+ expected_passfail_value=None,
+ device_csv_name=None,
+ eap_method='DEFAULT',
+ eap_identity='',
+ ieee8021x=False,
+ ieee80211u=False,
+ ieee80211w=1,
+ enable_pkc=False,
+ bss_transition=False,
+ power_save=False,
+ disable_ofdma=False,
+ roam_ft_ds=False,
+ key_management='DEFAULT',
+ pairwise='NA',
+ private_key='NA',
+ ca_cert='NA',
+ client_cert='NA',
+ pk_passwd='NA',
+ pac_file='NA',
+ file_name=None,
+ group_name=None,
+ profile_name=None,
+ wait_time=60,
+ config=False,
+ default_config=True,
+ thpt_mbps=False,
+ help_summary=False
+ ):
+
+ if dowebgui:
+ if (upload == '0'):
+ upload = '2560'
+ if (download == '0'):
+ download = '2560'
+ if self.dowebgui:
+ if not self.webgui_stop_check("thput"):
+ return False
+
+ logger_config = lf_logger_config.lf_logger_config()
+
+ if(thpt_mbps):
+ if download != '2560' and download != '0' and upload != '0' and upload != '2560':
+ download = str(int(download) * 1000000)
+ upload = str(int(upload) * 1000000)
+ elif upload != '2560' and upload != '0':
+ upload = str(int(upload) * 1000000)
+ else:
+ download = str(int(download) * 1000000)
+ loads = {}
+ iterations_before_test_stopped_by_user = []
+ gave_incremental = False
+ # Case based on download and upload arguments are provided
+ if download and upload:
+ loads = {'upload': str(upload).split(","), 'download': str(download).split(",")}
+ loads_data = loads["download"]
+ elif download:
+ loads = {'upload': [], 'download': str(download).split(",")}
+ for i in range(len(download)):
+ loads['upload'].append(2560)
+ loads_data = loads["download"]
+ else:
+ if upload:
+ loads = {'upload': str(upload).split(","), 'download': []}
+ for i in range(len(upload)):
+ loads['download'].append(2560)
+ loads_data = loads["upload"]
+
+ if download != '2560' and download != '0' and upload != '0' and upload != '2560':
+ csv_direction = 'L3_' + traffic_type.split('_')[1].upper() + '_BiDi'
+ elif upload != '2560' and upload != '0':
+ csv_direction = 'L3_' + traffic_type.split('_')[1].upper() + '_UL'
+ else:
+ csv_direction = 'L3_' + traffic_type.split('_')[1].upper() + '_DL'
+
+ # validate_args(args)
+ if incremental_capacity == 'no_increment' and dowebgui:
+ incremental_capacity = str(len(device_list.split(",")))
+ gave_incremental = True
+
+ if do_interopability:
+ incremental_capacity = "1"
+
+ # Parsing test_duration
+ if test_duration.endswith('s') or test_duration.endswith('S'):
+ test_duration = int(test_duration[0:-1])
+
+ elif test_duration.endswith('m') or test_duration.endswith('M'):
+ test_duration = int(test_duration[0:-1]) * 60
+
+ elif test_duration.endswith('h') or test_duration.endswith('H'):
+ test_duration = int(test_duration[0:-1]) * 60 * 60
+
+ elif test_duration.endswith(''):
+ test_duration = int(test_duration)
+
+ # Parsing report_timer
+ if report_timer.endswith('s') or report_timer.endswith('S'):
+ report_timer = int(report_timer[0:-1])
+
+ elif report_timer.endswith('m') or report_timer.endswith('M'):
+ report_timer = int(report_timer[0:-1]) * 60
+
+ elif report_timer.endswith('h') or report_timer.endswith('H'):
+ report_timer = int(report_timer[0:-1]) * 60 * 60
+
+ elif test_duration.endswith(''):
+ report_timer = int(report_timer)
+ if (int(packet_size) < 16 or int(packet_size) > 65507) and int(packet_size) != -1:
+ logger.error("Packet size should be greater than 16 bytes and less than 65507 bytes incorrect")
+ return
+ ce = self.current_exec #seires
+ if ce == "parallel":
+ obj_name = "thput_test"
+ else:
+ obj_no = 1
+ while f"thput_test_{obj_no}" in self.thput_obj_dict[ce]:
+ obj_no+=1
+ obj_name = f"thput_test_{obj_no}"
+ self.thput_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+ for index in range(len(loads_data)):
+ self.thput_obj_dict[ce][obj_name]["obj"] = Throughput(host=self.lanforge_ip,
+ ip=self.lanforge_ip,
+ port=self.port,
+ number_template="0000",
+ ap_name=ap_name,
+ name_prefix="TOS-",
+ upstream=upstream_port,
+ ssid=ssid,
+ password=passwd,
+ security=security,
+ test_duration=test_duration,
+ use_ht160=False,
+ side_a_min_rate=int(loads['upload'][index]),
+ side_b_min_rate=int(loads['download'][index]),
+ side_a_min_pdu=int(packet_size),
+ side_b_min_pdu=int(packet_size),
+ traffic_type=traffic_type,
+ tos=tos,
+ dowebgui=dowebgui,
+ test_name=test_name,
+ result_dir=result_dir,
+ device_list=device_list,
+ incremental_capacity=incremental_capacity,
+ report_timer=report_timer,
+ load_type=load_type,
+ do_interopability=do_interopability,
+ incremental=incremental,
+ precleanup=precleanup,
+ get_live_view= get_live_view,
+ total_floors = total_floors,
+ csv_direction=csv_direction,
+ expected_passfail_value=expected_passfail_value,
+ device_csv_name=device_csv_name,
+ file_name=file_name,
+ group_name=group_name,
+ profile_name=profile_name,
+ eap_method=eap_method,
+ eap_identity=eap_identity,
+ ieee80211=ieee8021x,
+ ieee80211u=ieee80211u,
+ ieee80211w=ieee80211w,
+ enable_pkc=enable_pkc,
+ bss_transition=bss_transition,
+ power_save=power_save,
+ disable_ofdma=disable_ofdma,
+ roam_ft_ds=roam_ft_ds,
+ key_management=key_management,
+ pairwise=pairwise,
+ private_key=private_key,
+ ca_cert=ca_cert,
+ client_cert=client_cert,
+ pk_passwd=pk_passwd,
+ pac_file=pac_file,
+ wait_time=wait_time,
+ config=config,
+ default_config = default_config
+ )
+
+ if gave_incremental:
+ self.thput_obj_dict[ce][obj_name]["obj"].gave_incremental = True
+ self.thput_obj_dict[ce][obj_name]["obj"].os_type()
+
+ check_condition, clients_to_run = self.thput_obj_dict[ce][obj_name]["obj"].phantom_check()
+
+ if check_condition == False:
+ return
+
+ check_increment_condition = self.thput_obj_dict[ce][obj_name]["obj"].check_incremental_list()
+
+ if check_increment_condition == False:
+ logger.error("Incremental values given for selected devices are incorrect")
+ return
+
+ elif (len(incremental_capacity) > 0 and check_increment_condition == False):
+ logger.error("Incremental values given for selected devices are incorrect")
+ return
+
+ created_cxs = self.thput_obj_dict[ce][obj_name]["obj"].build()
+ time.sleep(10)
+ created_cxs = list(created_cxs.keys())
+ individual_dataframe_column = []
+
+ to_run_cxs, to_run_cxs_len, created_cx_lists_keys, incremental_capacity_list = self.thput_obj_dict[ce][obj_name]["obj"].get_incremental_capacity_list()
+
+ for i in range(len(clients_to_run)):
+
+ # Extend individual_dataframe_column with dynamically generated column names
+ individual_dataframe_column.extend([f'Download{clients_to_run[i]}', f'Upload{clients_to_run[i]}', f'Rx % Drop {clients_to_run[i]}',
+ f'Tx % Drop{clients_to_run[i]}', f'Average RTT {clients_to_run[i]} ', f'RSSI {clients_to_run[i]} ', f'Tx-Rate {clients_to_run[i]} ', f'Rx-Rate {clients_to_run[i]} '])
+
+ individual_dataframe_column.extend(['Overall Download', 'Overall Upload', 'Overall Rx % Drop ', 'Overall Tx % Drop', 'Iteration',
+ 'TIMESTAMP', 'Start_time', 'End_time', 'Remaining_Time', 'Incremental_list', 'status'])
+ individual_df = pd.DataFrame(columns=individual_dataframe_column)
+
+ overall_start_time = datetime.datetime.now()
+ overall_end_time = overall_start_time + datetime.timedelta(seconds=int(test_duration) * len(incremental_capacity_list))
+
+ for i in range(len(to_run_cxs)):
+ is_device_configured = True
+ if do_interopability:
+ # To get resource of device under test in interopability
+ device_to_run_resource = self.thput_obj_dict[ce][obj_name]["obj"].extract_digits_until_alpha(to_run_cxs[i][0])
+
+ # Check the load type specified by the user
+ if load_type == "wc_intended_load":
+ # Perform intended load for the current iteration
+ self.thput_obj_dict[ce][obj_name]["obj"].perform_intended_load(i, incremental_capacity_list)
+ if i != 0:
+
+ # Stop throughput testing if not the first iteration
+ self.thput_obj_dict[ce][obj_name]["obj"].stop()
+
+ # Start specific connections for the current iteration
+ self.thput_obj_dict[ce][obj_name]["obj"].start_specific(created_cx_lists_keys[:incremental_capacity_list[i]])
+ else:
+ if (do_interopability and i != 0):
+ self.thput_obj_dict[ce][obj_name]["obj"].stop_specific(to_run_cxs[i - 1])
+ time.sleep(5)
+ if not default_config:
+ if (do_interopability and i == 0):
+ self.thput_obj_dict[ce][obj_name]["obj"].disconnect_all_devices()
+ if do_interopability and "iOS" not in to_run_cxs[i][0]:
+ logger.info("Configuring device of resource{}".format(to_run_cxs[i][0]))
+ is_device_configured = self.thput_obj_dict[ce][obj_name]["obj"].configure_specific([device_to_run_resource])
+ if is_device_configured:
+ self.thput_obj_dict[ce][obj_name]["obj"].start_specific(to_run_cxs[i])
+
+ # Determine device names based on the current iteration
+ device_names = created_cx_lists_keys[:to_run_cxs_len[i][-1]]
+
+ # Monitor throughput and capture all dataframes and test stop status
+ all_dataframes, test_stopped_by_user = self.thput_obj_dict[ce][obj_name]["obj"].monitor(i, individual_df, device_names, incremental_capacity_list, overall_start_time, overall_end_time, is_device_configured)
+ if do_interopability and "iOS" not in to_run_cxs[i][0] and not default_config:
+ # logger.info("Disconnecting device of resource{}".format(to_run_cxs[i][0]))
+ self.thput_obj_dict[ce][obj_name]["obj"].disconnect_all_devices([device_to_run_resource])
+ # Check if the test was stopped by the user
+ if test_stopped_by_user == False:
+
+ # Append current iteration index to iterations_before_test_stopped_by_user
+ iterations_before_test_stopped_by_user.append(i)
+ else:
+
+ # Append current iteration index to iterations_before_test_stopped_by_user
+ iterations_before_test_stopped_by_user.append(i)
+ break
+
+ # logger.info("connections download {}".format(connections_download))
+ # logger.info("connections upload {}".format(connections_upload))
+ self.thput_obj_dict[ce][obj_name]["obj"].stop()
+ if postcleanup:
+ self.thput_obj_dict[ce][obj_name]["obj"].cleanup()
+ self.thput_obj_dict[ce][obj_name]["obj"].generate_report(list(set(iterations_before_test_stopped_by_user)), incremental_capacity_list, data=all_dataframes, data1=to_run_cxs_len, report_path=self.result_path if not self.thput_obj_dict[ce][obj_name]["obj"].dowebgui else self.thput_obj_dict[ce][obj_name]["obj"].result_dir)
+ if self.thput_obj_dict[ce][obj_name]["obj"].dowebgui:
+ # copying to home directory i.e home/user_name
+ self.thput_obj_dict[ce][obj_name]["obj"].copy_reports_to_home_dir()
+ params = {
+ "iterations_before_test_stopped_by_user": list(set(iterations_before_test_stopped_by_user)),
+ "incremental_capacity_list": incremental_capacity_list,
+ "data": all_dataframes,
+ "data1": to_run_cxs_len,
+ "report_path": self.result_path if not self.thput_obj_dict[ce][obj_name]["obj"].dowebgui else self.thput_obj_dict[ce][obj_name]["obj"].result_dir
+ }
+ self.thput_obj_dict[ce][obj_name]["data"] = params.copy()
+ if self.dowebgui:
+ self.webgui_test_done("thput")
+ return True
+
+ def run_mc_test(self,args):
+ endp_types = "lf_udp"
+
+ help_summary = '''\
+ The Layer 3 Traffic Generation Test is designed to test the performance of the
+ Access Point by running layer 3 TCP and/or UDP Traffic. Layer-3 Cross-Connects represent a stream
+ of data flowing through the system under test. A Cross-Connect (CX) is composed of two Endpoints,
+ each of which is associated with a particular Port (physical or virtual interface).
+
+ The test will create stations, create CX traffic between upstream port and stations, run traffic
+ and generate a report.
+ '''
+ # args = parse_args()
+ if self.dowebgui:
+ if not self.webgui_stop_check("mc"):
+ return False
+ test_name = ""
+ ip = ""
+ # print('newww',args.local_lf_report_dir)
+ # exit(0)
+ if args.dowebgui:
+ logger.info("In webGUI execution")
+ if args.dowebgui:
+ test_name = args.test_name
+ ip = args.lfmgr
+ logger.info(" dowebgui %s %s %s", args.dowebgui, test_name, ip)
+
+ # initialize pass / fail
+ test_passed = False
+
+ # Configure logging
+ logger_config = lf_logger_config.lf_logger_config()
+
+ # set the logger level to debug
+ if args.log_level:
+ logger_config.set_level(level=args.log_level)
+
+ # lf_logger_config_json will take presidence to changing debug levels
+ if args.lf_logger_config_json:
+ # logger_config.lf_logger_config_json = "lf_logger_config.json"
+ logger_config.lf_logger_config_json = args.lf_logger_config_json
+ logger_config.load_lf_logger_config()
+
+ # validate_args(args)
+ endp_input_list = []
+ graph_input_list = []
+ if args.real:
+ endp_input_list, graph_input_list, config_devices, group_device_map = query_real_clients(args)
+ # Validate existing station list configuration if specified before starting test
+ if not args.use_existing_station_list and args.existing_station_list:
+ logger.error("Existing stations specified, but argument \'--use_existing_station_list\' not specified")
+ return False
+ elif args.use_existing_station_list and not args.existing_station_list:
+ logger.error(
+ "Argument \'--use_existing_station_list\' specified, but no existing stations provided. See \'--existing_station_list\'")
+ return False
+
+ # Gather data for test reporting and KPI generation
+ logger.info("Read in command line paramaters")
+ interopt_mode = args.interopt_mode
+
+ if args.endp_type:
+ endp_types = args.endp_type
+
+ if args.radio:
+ radios = args.radio
+ else:
+ radios = None
+
+ MAX_NUMBER_OF_STATIONS = 1000
+
+ # Lists to help with station creation
+ radio_name_list = []
+ number_of_stations_per_radio_list = []
+ ssid_list = []
+ ssid_password_list = []
+ ssid_security_list = []
+ station_lists = []
+ existing_station_lists = []
+
+ # wifi settings configuration
+ wifi_mode_list = []
+ wifi_enable_flags_list = []
+
+ # optional radio configuration
+ reset_port_enable_list = []
+ reset_port_time_min_list = []
+ reset_port_time_max_list = []
+
+ # wifi extra configuration
+ key_mgmt_list = []
+ pairwise_list = []
+ group_list = []
+ psk_list = []
+ wep_key_list = []
+ ca_cert_list = []
+ eap_list = []
+ identity_list = []
+ anonymous_identity_list = []
+ phase1_list = []
+ phase2_list = []
+ passwd_list = []
+ pin_list = []
+ pac_file_list = []
+ private_key_list = []
+ pk_password_list = []
+ hessid_list = []
+ realm_list = []
+ client_cert_list = []
+ imsi_list = []
+ milenage_list = []
+ domain_list = []
+ roaming_consortium_list = []
+ venue_group_list = []
+ network_type_list = []
+ ipaddr_type_avail_list = []
+ network_auth_type_list = []
+ anqp_3gpp_cell_net_list = []
+ ieee80211w_list = []
+
+ logger.debug("Parse radio arguments used for station configuration")
+ if radios is not None:
+ logger.info("radios {}".format(radios))
+ for radio_ in radios:
+ radio_keys = ['radio', 'stations', 'ssid', 'ssid_pw', 'security']
+ logger.info("radio_dict before format {}".format(radio_))
+ radio_info_dict = dict(
+ map(
+ lambda x: x.split('=='),
+ str(radio_).replace(
+ '"',
+ '').replace(
+ '[',
+ '').replace(
+ ']',
+ '').replace(
+ "'",
+ "").replace(
+ ",",
+ " ").split()))
+
+ logger.debug("radio_dict {}".format(radio_info_dict))
+
+ for key in radio_keys:
+ if key not in radio_info_dict:
+ logger.critical(
+ "missing config, for the {}, all of the following need to be present {} ".format(
+ key, radio_keys))
+ return False
+
+ radio_name_list.append(radio_info_dict['radio'])
+ number_of_stations_per_radio_list.append(
+ radio_info_dict['stations'])
+ ssid_list.append(radio_info_dict['ssid'])
+ ssid_password_list.append(radio_info_dict['ssid_pw'])
+ ssid_security_list.append(radio_info_dict['security'])
+
+ # check for set_wifi_extra
+ # check for wifi_settings
+ wifi_extra_keys = ['wifi_extra']
+ wifi_extra_found = False
+ for wifi_extra_key in wifi_extra_keys:
+ if wifi_extra_key in radio_info_dict:
+ logger.info("wifi_extra_keys found")
+ wifi_extra_found = True
+ break
+
+ if wifi_extra_found:
+ logger.debug("wifi_extra: {extra}".format(
+ extra=radio_info_dict['wifi_extra']))
+
+ wifi_extra_dict = dict(
+ map(
+ lambda x: x.split('&&'),
+ str(radio_info_dict['wifi_extra']).replace(
+ '"',
+ '').replace(
+ '[',
+ '').replace(
+ ']',
+ '').replace(
+ "'",
+ "").replace(
+ ",",
+ " ").replace(
+ "!!",
+ " "
+ )
+ .split()))
+
+ logger.info("wifi_extra_dict: {wifi_extra}".format(
+ wifi_extra=wifi_extra_dict))
+
+ if 'key_mgmt' in wifi_extra_dict:
+ key_mgmt_list.append(wifi_extra_dict['key_mgmt'])
+ else:
+ key_mgmt_list.append('[BLANK]')
+
+ if 'pairwise' in wifi_extra_dict:
+ pairwise_list.append(wifi_extra_dict['pairwise'])
+ else:
+ pairwise_list.append('[BLANK]')
+
+ if 'group' in wifi_extra_dict:
+ group_list.append(wifi_extra_dict['group'])
+ else:
+ group_list.append('[BLANK]')
+
+ if 'psk' in wifi_extra_dict:
+ psk_list.append(wifi_extra_dict['psk'])
+ else:
+ psk_list.append('[BLANK]')
+
+ if 'wep_key' in wifi_extra_dict:
+ wep_key_list.append(wifi_extra_dict['wep_key'])
+ else:
+ wep_key_list.append('[BLANK]')
+
+ if 'ca_cert' in wifi_extra_dict:
+ ca_cert_list.append(wifi_extra_dict['ca_cert'])
+ else:
+ ca_cert_list.append('[BLANK]')
+
+ if 'eap' in wifi_extra_dict:
+ eap_list.append(wifi_extra_dict['eap'])
+ else:
+ eap_list.append('[BLANK]')
+
+ if 'identity' in wifi_extra_dict:
+ identity_list.append(wifi_extra_dict['identity'])
+ else:
+ identity_list.append('[BLANK]')
+
+ if 'anonymous' in wifi_extra_dict:
+ anonymous_identity_list.append(
+ wifi_extra_dict['anonymous'])
+ else:
+ anonymous_identity_list.append('[BLANK]')
+
+ if 'phase1' in wifi_extra_dict:
+ phase1_list.append(wifi_extra_dict['phase1'])
+ else:
+ phase1_list.append('[BLANK]')
+
+ if 'phase2' in wifi_extra_dict:
+ phase2_list.append(wifi_extra_dict['phase2'])
+ else:
+ phase2_list.append('[BLANK]')
+
+ if 'passwd' in wifi_extra_dict:
+ passwd_list.append(wifi_extra_dict['passwd'])
+ else:
+ passwd_list.append('[BLANK]')
+
+ if 'pin' in wifi_extra_dict:
+ pin_list.append(wifi_extra_dict['pin'])
+ else:
+ pin_list.append('[BLANK]')
+
+ if 'pac_file' in wifi_extra_dict:
+ pac_file_list.append(wifi_extra_dict['pac_file'])
+ else:
+ pac_file_list.append('[BLANK]')
+
+ if 'private_key' in wifi_extra_dict:
+ private_key_list.append(wifi_extra_dict['private_key'])
+ else:
+ private_key_list.append('[BLANK]')
+
+ if 'pk_password' in wifi_extra_dict:
+ pk_password_list.append(wifi_extra_dict['pk_password'])
+ else:
+ pk_password_list.append('[BLANK]')
+
+ if 'hessid' in wifi_extra_dict:
+ hessid_list.append(wifi_extra_dict['hessid'])
+ else:
+ hessid_list.append("00:00:00:00:00:00")
+
+ if 'realm' in wifi_extra_dict:
+ realm_list.append(wifi_extra_dict['realm'])
+ else:
+ realm_list.append('[BLANK]')
+
+ if 'client_cert' in wifi_extra_dict:
+ client_cert_list.append(wifi_extra_dict['client_cert'])
+ else:
+ client_cert_list.append('[BLANK]')
+
+ if 'imsi' in wifi_extra_dict:
+ imsi_list.append(wifi_extra_dict['imsi'])
+ else:
+ imsi_list.append('[BLANK]')
+
+ if 'milenage' in wifi_extra_dict:
+ milenage_list.append(wifi_extra_dict['milenage'])
+ else:
+ milenage_list.append('[BLANK]')
+
+ if 'domain' in wifi_extra_dict:
+ domain_list.append(wifi_extra_dict['domain'])
+ else:
+ domain_list.append('[BLANK]')
+
+ if 'roaming_consortium' in wifi_extra_dict:
+ roaming_consortium_list.append(
+ wifi_extra_dict['roaming_consortium'])
+ else:
+ roaming_consortium_list.append('[BLANK]')
+
+ if 'venue_group' in wifi_extra_dict:
+ venue_group_list.append(wifi_extra_dict['venue_group'])
+ else:
+ venue_group_list.append('[BLANK]')
+
+ if 'network_type' in wifi_extra_dict:
+ network_type_list.append(wifi_extra_dict['network_type'])
+ else:
+ network_type_list.append('[BLANK]')
+
+ if 'ipaddr_type_avail' in wifi_extra_dict:
+ ipaddr_type_avail_list.append(
+ wifi_extra_dict['ipaddr_type_avail'])
+ else:
+ ipaddr_type_avail_list.append('[BLANK]')
+
+ if 'network_auth_type' in wifi_extra_dict:
+ network_auth_type_list.append(
+ wifi_extra_dict['network_auth_type'])
+ else:
+ network_auth_type_list.append('[BLANK]')
+
+ if 'anqp_3gpp_cell_net' in wifi_extra_dict:
+ anqp_3gpp_cell_net_list.append(
+ wifi_extra_dict['anqp_3gpp_cell_net'])
+ else:
+ anqp_3gpp_cell_net_list.append('[BLANK]')
+
+ if 'ieee80211w' in wifi_extra_dict:
+ ieee80211w_list.append(wifi_extra_dict['ieee80211w'])
+ else:
+ ieee80211w_list.append('Optional')
+
+ '''
+ # wifi extra configuration
+ key_mgmt_list.append(key_mgmt)
+ pairwise_list.append(pairwise)
+ group_list.append(group)
+ psk_list.append(psk)
+ eap_list.append(eap)
+ identity_list.append(identity)
+ anonymous_identity_list.append(anonymous_identity)
+ phase1_list.append(phase1)
+ phase2_list.append(phase2)
+ passwd_list.append(passwd)
+ pin_list.append(pin)
+ pac_file_list.append(pac_file)
+ private_key_list.append(private)
+ pk_password_list.append(pk_password)
+ hessid_list.append(hssid)
+ realm_list.append(realm)
+ client_cert_list.append(client_cert)
+ imsi_list.append(imsi)
+ milenage_list.append(milenage)
+ domain_list.append(domain)
+ roaming_consortium_list.append(roaming_consortium)
+ venue_group_list.append(venue_group)
+ network_type_list.append(network_type)
+ ipaddr_type_avail_list.append(ipaddr_type_avail)
+ network_auth_type_list.append(network_ath_type)
+ anqp_3gpp_cell_net_list.append(anqp_3gpp_cell_net)
+
+ '''
+ # no wifi extra for this station
+ else:
+ key_mgmt_list.append('[BLANK]')
+ pairwise_list.append('[BLANK]')
+ group_list.append('[BLANK]')
+ psk_list.append('[BLANK]')
+ # for testing
+ # psk_list.append(radio_info_dict['ssid_pw'])
+ wep_key_list.append('[BLANK]')
+ ca_cert_list.append('[BLANK]')
+ eap_list.append('[BLANK]')
+ identity_list.append('[BLANK]')
+ anonymous_identity_list.append('[BLANK]')
+ phase1_list.append('[BLANK]')
+ phase2_list.append('[BLANK]')
+ passwd_list.append('[BLANK]')
+ pin_list.append('[BLANK]')
+ pac_file_list.append('[BLANK]')
+ private_key_list.append('[BLANK]')
+ pk_password_list.append('[BLANK]')
+ hessid_list.append("00:00:00:00:00:00")
+ realm_list.append('[BLANK]')
+ client_cert_list.append('[BLANK]')
+ imsi_list.append('[BLANK]')
+ milenage_list.append('[BLANK]')
+ domain_list.append('[BLANK]')
+ roaming_consortium_list.append('[BLANK]')
+ venue_group_list.append('[BLANK]')
+ network_type_list.append('[BLANK]')
+ ipaddr_type_avail_list.append('[BLANK]')
+ network_auth_type_list.append('[BLANK]')
+ anqp_3gpp_cell_net_list.append('[BLANK]')
+ ieee80211w_list.append('Optional')
+
+ # check for wifi_settings
+ wifi_settings_keys = ['wifi_settings']
+ wifi_settings_found = True
+ for key in wifi_settings_keys:
+ if key not in radio_info_dict:
+ logger.debug("wifi_settings_keys not enabled")
+ wifi_settings_found = False
+ break
+
+ if wifi_settings_found:
+ # Check for additional flags
+ if {'wifi_mode', 'enable_flags'}.issubset(
+ radio_info_dict.keys()):
+ logger.debug("wifi_settings flags set")
+ else:
+ logger.debug("wifi_settings is present wifi_mode, enable_flags need to be set "
+ "or remove the wifi_settings or set wifi_settings==False flag on "
+ "the radio for defaults")
+ return False
+ wifi_mode_list.append(radio_info_dict['wifi_mode'])
+ enable_flags_str = radio_info_dict['enable_flags'].replace(
+ '(', '').replace(')', '').replace('|', ',').replace('&&', ',')
+ enable_flags_list = list(enable_flags_str.split(","))
+ wifi_enable_flags_list.append(enable_flags_list)
+ else:
+ wifi_mode_list.append(0)
+ wifi_enable_flags_list.append(
+ ["wpa2_enable", "80211u_enable", "create_admin_down"])
+ # 8021x_radius is the same as Advanced/8021x on the gui
+
+ # check for optional radio key , currently only reset is enabled
+ # update for checking for reset_port_time_min, reset_port_time_max
+ optional_radio_reset_keys = ['reset_port_enable']
+ radio_reset_found = True
+ for key in optional_radio_reset_keys:
+ if key not in radio_info_dict:
+ # logger.debug("port reset test not enabled")
+ radio_reset_found = False
+ break
+
+ if radio_reset_found:
+ reset_port_enable_list.append(
+ radio_info_dict['reset_port_enable'])
+ reset_port_time_min_list.append(
+ radio_info_dict['reset_port_time_min'])
+ reset_port_time_max_list.append(
+ radio_info_dict['reset_port_time_max'])
+ else:
+ reset_port_enable_list.append(False)
+ reset_port_time_min_list.append('0s')
+ reset_port_time_max_list.append('0s')
+
+ index = 0
+ for (radio_name_, number_of_stations_per_radio_) in zip(
+ radio_name_list, number_of_stations_per_radio_list):
+ number_of_stations = int(number_of_stations_per_radio_)
+ if number_of_stations > MAX_NUMBER_OF_STATIONS:
+ logger.critical("number of stations per radio exceeded max of : {}".format(
+ MAX_NUMBER_OF_STATIONS))
+ quit(1)
+ station_list = LFUtils.portNameSeries(
+ prefix_="sta",
+ start_id_=0 + index * 1000 + int(args.sta_start_offset),
+ end_id_=number_of_stations - 1 + index *
+ 1000 + int(args.sta_start_offset),
+ padding_number_=10000,
+ radio=radio_name_)
+ station_lists.append(station_list)
+ index += 1
+
+ # create a secondary station_list
+ if args.use_existing_station_list:
+ if args.existing_station_list is not None:
+ # these are entered stations
+ for existing_sta_list in args.existing_station_list:
+ existing_stations = str(existing_sta_list).replace(
+ '"',
+ '').replace(
+ '[',
+ '').replace(
+ ']',
+ '').replace(
+ "'",
+ "").replace(
+ ",",
+ " ").split()
+
+ for existing_sta in existing_stations:
+ existing_station_lists.append(existing_sta)
+ else:
+ logger.error(
+ "--use_station_list set true, --station_list is None Exiting")
+ raise Exception(
+ "--use_station_list is used in conjunction with a --station_list")
+
+ logger.info("existing_station_lists: {sta}".format(
+ sta=existing_station_lists))
+
+ # logger.info("endp-types: %s"%(endp_types))
+ ul_rates = args.side_a_min_bps.replace(',', ' ').split()
+ dl_rates = args.side_b_min_bps.replace(',', ' ').split()
+ ul_pdus = args.side_a_min_pdu.replace(',', ' ').split()
+ dl_pdus = args.side_b_min_pdu.replace(',', ' ').split()
+ if args.attenuators == "":
+ attenuators = []
+ else:
+ attenuators = args.attenuators.split(",")
+ if args.atten_vals == "":
+ atten_vals = [-1]
+ else:
+ atten_vals = args.atten_vals.split(",")
+
+ if len(ul_rates) != len(dl_rates):
+ # todo make fill assignable
+ logger.info(
+ "ul_rates %s and dl_rates %s arrays are of different length will fill shorter list with 256000\n" %
+ (len(ul_rates), len(dl_rates)))
+ if len(ul_pdus) != len(dl_pdus):
+ logger.info(
+ "ul_pdus %s and dl_pdus %s arrays are of different lengths will fill shorter list with size AUTO \n" %
+ (len(ul_pdus), len(dl_pdus)))
+
+ # Configure reporting
+ logger.info("Configuring report")
+ report, kpi_csv, csv_outfile = configure_reporting(**vars(args))
+ ce = self.current_exec #seires
+ if ce == "parallel":
+ obj_name = "mcast_test"
+ else:
+ obj_no = 1
+ while f"mcast_test_{obj_no}" in self.mcast_obj_dict[ce]:
+ obj_no+=1
+ obj_name = f"mcast_test_{obj_no}"
+ self.mcast_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+ logger.debug("Configure test object")
+ self.mcast_obj_dict[ce][obj_name]["obj"] = L3VariableTime(
+ endp_types=endp_types,
+ args=args,
+ tos=args.tos,
+ side_b=args.upstream_port,
+ side_a=args.downstream_port,
+ radio_name_list=radio_name_list,
+ number_of_stations_per_radio_list=number_of_stations_per_radio_list,
+ ssid_list=ssid_list,
+ ssid_password_list=ssid_password_list,
+ ssid_security_list=ssid_security_list,
+ wifi_mode_list=wifi_mode_list,
+ enable_flags_list=wifi_enable_flags_list,
+ station_lists=station_lists,
+ name_prefix="LT-",
+ outfile=csv_outfile,
+ reset_port_enable_list=reset_port_enable_list,
+ reset_port_time_min_list=reset_port_time_min_list,
+ reset_port_time_max_list=reset_port_time_max_list,
+ side_a_min_rate=ul_rates,
+ side_b_min_rate=dl_rates,
+ side_a_min_pdu=ul_pdus,
+ side_b_min_pdu=dl_pdus,
+ rates_are_totals=args.rates_are_totals,
+ mconn=args.multiconn,
+ attenuators=attenuators,
+ atten_vals=atten_vals,
+ number_template="00",
+ test_duration=args.test_duration,
+ polling_interval=args.polling_interval,
+ lfclient_host=args.lfmgr,
+ lfclient_port=args.lfmgr_port,
+ debug=args.debug,
+ kpi_csv=kpi_csv,
+ no_cleanup=args.no_cleanup,
+ use_existing_station_lists=args.use_existing_station_list,
+ existing_station_lists=existing_station_lists,
+ wait_for_ip_sec=args.wait_for_ip_sec,
+ exit_on_ip_acquired=args.exit_on_ip_acquired,
+ ap_read=args.ap_read,
+ ap_module=args.ap_module,
+ ap_test_mode=args.ap_test_mode,
+ ap_ip=args.ap_ip,
+ ap_user=args.ap_user,
+ ap_passwd=args.ap_passwd,
+ ap_scheme=args.ap_scheme,
+ ap_serial_port=args.ap_serial_port,
+ ap_ssh_port=args.ap_ssh_port,
+ ap_telnet_port=args.ap_telnet_port,
+ ap_serial_baud=args.ap_serial_baud,
+ ap_if_2g=args.ap_if_2g,
+ ap_if_5g=args.ap_if_5g,
+ ap_if_6g=args.ap_if_6g,
+ ap_report_dir="",
+ ap_file=args.ap_file,
+ ap_band_list=args.ap_band_list.split(','),
+
+ # for webgui execution
+ test_name=test_name,
+ dowebgui=args.dowebgui,
+ ip=ip,
+ get_live_view= args.get_live_view,
+ total_floors = args.total_floors,
+ # for uniformity from webGUI result_dir as variable is used insead of local_lf_report_dir
+ result_dir=args.local_lf_report_dir,
+
+ # wifi extra configuration
+ key_mgmt_list=key_mgmt_list,
+ pairwise_list=pairwise_list,
+ group_list=group_list,
+ psk_list=psk_list,
+ wep_key_list=wep_key_list,
+ ca_cert_list=ca_cert_list,
+ eap_list=eap_list,
+ identity_list=identity_list,
+ anonymous_identity_list=anonymous_identity_list,
+ phase1_list=phase1_list,
+ phase2_list=phase2_list,
+ passwd_list=passwd_list,
+ pin_list=pin_list,
+ pac_file_list=pac_file_list,
+ private_key_list=private_key_list,
+ pk_password_list=pk_password_list,
+ hessid_list=hessid_list,
+ realm_list=realm_list,
+ client_cert_list=client_cert_list,
+ imsi_list=imsi_list,
+ milenage_list=milenage_list,
+ domain_list=domain_list,
+ roaming_consortium_list=roaming_consortium_list,
+ venue_group_list=venue_group_list,
+ network_type_list=network_type_list,
+ ipaddr_type_avail_list=ipaddr_type_avail_list,
+ network_auth_type_list=network_auth_type_list,
+ anqp_3gpp_cell_net_list=anqp_3gpp_cell_net_list,
+ ieee80211w_list=ieee80211w_list,
+ interopt_mode=interopt_mode,
+ endp_input_list=endp_input_list,
+ graph_input_list=graph_input_list,
+ real=args.real,
+ expected_passfail_value=args.expected_passfail_value,
+ device_csv_name=args.device_csv_name,
+ group_name=args.group_name
+ )
+
+ # Perform pre-test cleanup, if configured to do so
+ if args.no_pre_cleanup:
+ logger.info("Skipping pre-test cleanup, '--no_pre_cleanup' specified")
+ elif args.use_existing_station_list:
+ logger.info("Skipping pre-test cleanup, '--use_existing_station_list' specified")
+ else:
+ logger.info("Performing pre-test cleanup")
+ self.mcast_obj_dict[ce][obj_name]["obj"].pre_cleanup()
+
+ # Build test configuration
+ logger.info("Building test configuration")
+ self.mcast_obj_dict[ce][obj_name]["obj"].build()
+ if not self.mcast_obj_dict[ce][obj_name]["obj"].passes():
+ logger.critical("Test configuration build failed")
+ logger.critical(self.mcast_obj_dict[ce][obj_name]["obj"].get_fail_message())
+ return False
+
+ # Run test
+ logger.info("Starting test")
+ self.mcast_obj_dict[ce][obj_name]["obj"].start(False)
+
+ if args.wait > 0:
+ logger.info(f"Pausing {args.wait} seconds for manual inspection before test conclusion and "
+ "possible traffic stop/post-test cleanup")
+ time.sleep(args.wait)
+
+ # Admin down the stations
+ if args.no_stop_traffic:
+ logger.info("Test complete, '--no_stop_traffic' specified, traffic continues to run")
+ else:
+ if args.quiesce_cx:
+ logger.info("Test complete, quiescing traffic")
+ self.mcast_obj_dict[ce][obj_name]["obj"].quiesce_cx()
+ time.sleep(3)
+ else:
+ logger.info("Test complete, stopping traffic")
+ self.mcast_obj_dict[ce][obj_name]["obj"].stop()
+
+ # Set DUT information for reporting
+ self.mcast_obj_dict[ce][obj_name]["obj"].set_dut_info(
+ dut_model_num=args.dut_model_num,
+ dut_hw_version=args.dut_hw_version,
+ dut_sw_version=args.dut_sw_version,
+ dut_serial_num=args.dut_serial_num)
+ self.mcast_obj_dict[ce][obj_name]["obj"].set_report_obj(report=report)
+ if args.dowebgui:
+ self.mcast_obj_dict[ce][obj_name]["obj"].webgui_finalize()
+ # Generate and write out test report
+ logger.info("Generating test report")
+ if args.real:
+ self.mcast_obj_dict[ce][obj_name]["obj"].generate_report(config_devices, group_device_map)
+ else:
+ self.mcast_obj_dict[ce][obj_name]["obj"].generate_report()
+ params = {
+ "config_devices" : None,
+ "group_device_map": None
+ }
+ params["group_device_map"] = group_device_map
+ params["config_devices"] = config_devices
+ self.mcast_obj_dict[ce][obj_name]["data"] = params.copy()
+ self.mcast_obj_dict[ce][obj_name]["obj"].write_report()
+
+ # TODO move to after reporting
+ if not self.mcast_obj_dict[ce][obj_name]["obj"].passes():
+ logger.warning("Test Ended: There were Failures")
+ logger.warning(self.mcast_obj_dict[ce][obj_name]["obj"].get_fail_message())
+
+ if args.no_cleanup:
+ logger.info("Skipping post-test cleanup, '--no_cleanup' specified")
+ elif args.no_stop_traffic:
+ logger.info("Skipping post-test cleanup, '--no_stop_traffic' specified")
+ else:
+ logger.info("Performing post-test cleanup")
+ self.mcast_obj_dict[ce][obj_name]["obj"].cleanup()
+
+ # TODO: This is redundant if '--no_cleanup' is not specified (already taken care of there)
+ if args.cleanup_cx:
+ logger.info("Performing post-test CX traffic pair cleanup")
+ self.mcast_obj_dict[ce][obj_name]["obj"].cleanup_cx()
+
+ if self.mcast_obj_dict[ce][obj_name]["obj"].passes():
+ test_passed = True
+ logger.info("Full test passed, all connections increased rx bytes")
+
+ # Run WebGUI-specific post test logic
+ if args.dowebgui:
+ self.mcast_obj_dict[ce][obj_name]["obj"].copy_reports_to_home_dir()
+
+ if test_passed:
+ self.mcast_obj_dict[ce][obj_name]["obj"].exit_success()
+ else:
+ self.mcast_obj_dict[ce][obj_name]["obj"].exit_fail()
+ if self.dowebgui:
+ self.webgui_test_done("mc")
+ return True
+
+
+ def run_mc_test1(
+ self,
+ local_lf_report_dir="",
+ results_dir_name="test_l3",
+ test_rig="",
+ test_tag="",
+ dut_hw_version="",
+ dut_sw_version="",
+ dut_model_num="",
+ dut_serial_num="",
+ test_priority="",
+ test_id="test l3",
+ csv_outfile="",
+ tty="",
+ baud="9600",
+ test_duration="3m",
+ tos="BE",
+ debug=False,
+ log_level=None,
+ interopt_mode=False,
+ endp_type="mc_udp",
+ upstream_port="eth1",
+ downstream_port=None,
+ polling_interval="5s",
+ radio=None,
+ side_a_min_bps="0",
+ side_a_min_pdu="MTU",
+ side_b_min_bps="256000",
+ side_b_min_pdu="MTU",
+ rates_are_totals=True,
+ multiconn=1,
+ attenuators="",
+ atten_vals="",
+ wait=0,
+ sta_start_offset="0",
+ no_pre_cleanup=False,
+ no_cleanup=True,
+ cleanup_cx=False,
+ csv_data_to_report=False,
+ no_stop_traffic=False,
+ quiesce_cx=False,
+ use_existing_station_list=False,
+ existing_station_list=None,
+ wait_for_ip_sec="120s",
+ exit_on_ip_acquired=False,
+ lf_logger_config_json=None,
+ ap_read=False,
+ ap_module=None,
+ ap_test_mode=True,
+ ap_scheme="serial",
+ ap_serial_port="/dev/ttyUSB0",
+ ap_serial_baud="115200",
+ ap_ip="192.168.50.1",
+ ap_ssh_port="1025",
+ ap_telnet_port="23",
+ ap_user="lanforge",
+ ap_passwd="lanforge",
+ ap_if_2g="wl0",
+ ap_if_5g="wl1",
+ ap_if_6g="wl2",
+ ap_file=None,
+ ap_band_list="2g,5g,6g",
+ dowebgui=False,
+ test_name=None,
+ ssid=None,
+ passwd=None,
+ security=None,
+ device_list=None,
+ expected_passfail_value=None,
+ device_csv_name=None,
+ file_name=None,
+ group_name=None,
+ profile_name=None,
+ eap_method="DEFAULT",
+ eap_identity="",
+ ieee8021x=False,
+ ieee80211u=False,
+ ieee80211w=1,
+ enable_pkc=False,
+ bss_transition=False,
+ power_save=False,
+ disable_ofdma=False,
+ roam_ft_ds=False,
+ key_management="DEFAULT",
+ pairwise="NA",
+ private_key="NA",
+ ca_cert="NA",
+ client_cert="NA",
+ pk_passwd="NA",
+ pac_file="NA",
+ config=False,
+ wait_time=60,
+ real=True,
+ get_live_view=False,
+ total_floors="0",
+ help_summary=False,
+ result_dir = ''
+ ):
+ args = SimpleNamespace(**locals())
+ args.lfmgr_port = self.port
+ args.lfmgr = self.lanforge_ip
+ args.local_lf_report_dir = os.getcwd() if not args.dowebgui else result_dir
+ return self.run_mc_test(args)
+
+
+ def run_yt_test(
+ self,
+ url=None,
+ duration=None,
+ ap_name="TIP",
+ sec="wpa2",
+ band="5GHZ",
+ test_name=None,
+ upstream_port=None,
+ resource_list=None,
+ no_pre_cleanup=False,
+ no_post_cleanup=False,
+ debug=False,
+ log_level=None,
+ res="Auto",
+ lf_logger_config_json=None,
+ ui_report_dir=None,
+ do_webUI=False,
+ file_name=None,
+ group_name=None,
+ profile_name=None,
+ ssid=None,
+ passwd=None,
+ encryp=None,
+ eap_method="DEFAULT",
+ eap_identity="DEFAULT",
+ ieee8021x=False,
+ ieee80211u=False,
+ ieee80211w=1,
+ enable_pkc=False,
+ bss_transition=False,
+ power_save=False,
+ disable_ofdma=False,
+ roam_ft_ds=False,
+ key_management="DEFAULT",
+ pairwise="NA",
+ private_key="NA",
+ ca_cert="NA",
+ pac_file="NA",
+ client_cert="NA",
+ pk_passwd="NA",
+ help_summary=None,
+ expected_passfail_value=None,
+ device_csv_name=None,
+ config=False,
+ exec_type=None
+ ):
+ try:
+ print('duration',duration)
+ if self.dowebgui:
+ if not self.webgui_stop_check("yt"):
+ return False
+ if type(duration) == int:
+ pass
+ elif duration.endswith('s') or duration.endswith('S'):
+ duration = int(duration[0:-1])/60
+ elif duration.endswith('m') or duration.endswith('M'):
+ duration = int(duration[0:-1])
+ elif duration.endswith('h') or duration.endswith('H'):
+ duration = int(duration[0:-1])*60
+ else:
+ duration = int(duration)
+
+ # set the logger level to debug
+ logger_config = lf_logger_config.lf_logger_config()
+
+ if log_level:
+ logger_config.set_level(level=log_level)
+
+ if lf_logger_config_json:
+ logger_config.lf_logger_config_json = lf_logger_config_json
+ logger_config.load_lf_logger_config()
+
+ mgr_ip = self.lanforge_ip
+ mgr_port = self.port
+ url = url
+ duration = duration
+
+ do_webUI = do_webUI
+ ui_report_dir = ui_report_dir
+ debug = debug
+ # Print debug information if debugging is enabled
+ if debug:
+ logging.info('''Specified configuration:
+ ip: {}
+ port: {}
+ Duration: {}
+ debug: {}
+ '''.format(mgr_ip, mgr_port, duration, debug))
+
+ if True:
+ if group_name is not None:
+ group_name = group_name.strip()
+ selected_groups = group_name.split(',')
+ else:
+ selected_groups = []
+
+ if profile_name is not None:
+ profile_name = profile_name.strip()
+ selected_profiles = profile_name.split(',')
+ else:
+ selected_profiles = []
+
+
+ Devices = RealDevice(manager_ip=mgr_ip,
+ server_ip='192.168.1.61',
+ ssid_2g='Test Configured',
+ passwd_2g='',
+ encryption_2g='',
+ ssid_5g='Test Configured',
+ passwd_5g='',
+ encryption_5g='',
+ ssid_6g='Test Configured',
+ passwd_6g='',
+ encryption_6g='',
+ selected_bands=['5G'])
+ Devices.get_devices()
+
+ # Create a YouTube object with the specified parameters
+ upstream_port = "10.253.8.126"
+ self.yt_test_obj = Youtube(
+ host=mgr_ip,
+ port=mgr_port,
+ url=url,
+ duration=duration,
+ lanforge_password='lanforge',
+ sta_list=[],
+ do_webUI=do_webUI,
+ ui_report_dir=ui_report_dir,
+ debug=debug,
+ resolution=res,
+ ap_name=ap_name,
+ ssid=ssid,
+ security=encryp,
+ band=band,
+ test_name=test_name,
+ upstream_port=upstream_port,
+ config=config,
+ selected_groups=selected_groups,
+ selected_profiles=selected_profiles,
+ no_browser_precleanup=True,
+ no_browser_postcleanup=True)
+
+ print('CHECKING PORT AVAILBILITY for YT TEST')
+ self.port_clean_up(5002)
+ self.yt_test_obj.start_flask_server()
+ upstream_port = self.yt_test_obj.change_port_to_ip(upstream_port)
+
+ resources = []
+ self.yt_test_obj.Devices = Devices
+ if file_name:
+ new_filename = file_name.removesuffix(".csv")
+ else:
+ new_filename = file_name
+ config_obj = DeviceConfig.DeviceConfig(lanforge_ip=self.lanforge_ip, file_name=new_filename)
+ # if not expected_passfail_value and device_csv_name is None:
+ # config_obj.device_csv_file(csv_name="device.csv")
+ if group_name is not None and file_name is not None and profile_name is not None:
+ selected_groups = group_name.split(',')
+ selected_profiles = profile_name.split(',')
+ config_devices = {}
+ for i in range(len(selected_groups)):
+ config_devices[selected_groups[i]] = selected_profiles[i]
+
+ config_obj.initiate_group()
+
+ asyncio.run(config_obj.connectivity(config_devices))
+
+ adbresponse = config_obj.adb_obj.get_devices()
+ resource_manager = config_obj.laptop_obj.get_devices()
+ all_res = {}
+ df1 = config_obj.display_groups(config_obj.groups)
+ groups_list = df1.to_dict(orient='list')
+ group_devices = {}
+
+ for adb in adbresponse:
+ group_devices[adb['serial']] = adb['eid']
+ for res in resource_manager:
+ all_res[res['hostname']] = res['shelf'] + '.' + res['resource']
+ eid_list = []
+ for grp_name in groups_list.keys():
+ for g_name in selected_groups:
+ if grp_name == g_name:
+ for j in groups_list[grp_name]:
+ if j in group_devices.keys():
+ eid_list.append(group_devices[j])
+ elif j in all_res.keys():
+ eid_list.append(all_res[j])
+ resource_list = ",".join(id for id in eid_list)
+ else:
+ config_dict = {
+ 'ssid': ssid,
+ 'passwd': passwd,
+ 'enc': encryp,
+ 'eap_method': eap_method,
+ 'eap_identity': eap_identity,
+ 'ieee80211': ieee8021x,
+ 'ieee80211u': ieee80211u,
+ 'ieee80211w': ieee80211w,
+ 'enable_pkc': enable_pkc,
+ 'bss_transition': bss_transition,
+ 'power_save': power_save,
+ 'disable_ofdma': disable_ofdma,
+ 'roam_ft_ds': roam_ft_ds,
+ 'key_management': key_management,
+ 'pairwise': pairwise,
+ 'private_key': private_key,
+ 'ca_cert': ca_cert,
+ 'client_cert': client_cert,
+ 'pk_passwd': pk_passwd,
+ 'pac_file': pac_file,
+ 'server_ip': upstream_port,
+ }
+ if resource_list:
+ all_devices = config_obj.get_all_devices()
+ if group_name is None and file_name is None and profile_name is None:
+ dev_list = resource_list.split(',')
+ if config:
+ asyncio.run(config_obj.connectivity(device_list=dev_list, wifi_config=config_dict))
+ else:
+ all_devices = config_obj.get_all_devices()
+ device_list = []
+ for device in all_devices:
+ if device["type"] != 'laptop':
+ device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["serial"])
+ elif device["type"] == 'laptop':
+ device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["hostname"])
+
+ print("Available devices:")
+ for device in device_list:
+ print(device)
+
+ resource_list = input("Enter the desired resources to run the test:")
+ dev1_list = resource_list.split(',')
+ if config:
+ asyncio.run(config_obj.connectivity(device_list=dev1_list, wifi_config=config_dict))
+
+ if not do_webUI:
+ if resource_list:
+ resources = [r.strip() for r in resource_list.split(',')]
+ resources = [r for r in resources if len(r.split('.')) > 1]
+
+ self.yt_test_obj.select_real_devices(real_devices=Devices, real_sta_list=resources, base_interop_obj=Devices)
+
+ else:
+ self.yt_test_obj.select_real_devices(real_devices=Devices)
+ else:
+ resources = [r.strip() for r in resource_list.split(',')]
+
+ extracted_parts = [res.split('.')[:2] for res in resources]
+ formatted_parts = ['.'.join(parts) for parts in extracted_parts]
+ self.yt_test_obj.select_real_devices(real_devices=Devices, real_sta_list=formatted_parts, base_interop_obj=Devices)
+
+ if do_webUI:
+
+ if len(self.yt_test_obj.real_sta_hostname) == 0:
+ logging.error("No device is available to run the test")
+ obj = {
+ "status": "Stopped",
+ "configuration_status": "configured"
+ }
+ self.yt_test_obj.updating_webui_runningjson(obj)
+ return
+ else:
+ obj = {
+ "configured_devices": self.yt_test_obj.real_sta_hostname,
+ "configuration_status": "configured",
+ "no_of_devices": f' Total({len(self.yt_test_obj.real_sta_os_types)}) : W({self.yt_test_obj.windows}),L({self.yt_test_obj.linux}),M({self.yt_test_obj.mac})',
+ "device_list": self.yt_test_obj.hostname_os_combination
+
+ }
+ self.yt_test_obj.updating_webui_runningjson(obj)
+
+ # Perform pre-test cleanup if not skipped
+ if not no_pre_cleanup:
+ self.yt_test_obj.cleanup()
+
+ # Check if the required tab exists, and exit if not
+ if not self.yt_test_obj.check_tab_exists():
+ logging.error('Generic Tab is not available.\nAborting the test.')
+ return False
+
+ if len(self.yt_test_obj.real_sta_list) > 0:
+ logging.info(f"checking real sta list while creating endpionts {self.yt_test_obj.real_sta_list}")
+ print('HII',self.yt_test_obj.real_sta_list)
+ self.yt_test_obj.create_generic_endp(self.yt_test_obj.real_sta_list)
+ else:
+ logging.info(f"checking real sta list while creating endpionts {self.yt_test_obj.real_sta_list}")
+ logging.error("No Real Devies Available")
+ return False
+
+ logging.info("TEST STARTED")
+ logging.info('Running the youtube Streaming test for {} minutes'.format(duration))
+
+ time.sleep(10)
+
+ self.yt_test_obj.start_time = datetime.datetime.now()
+ self.yt_test_obj.start_generic()
+ logging.info(f"yt_test_obj: {self.yt_test_obj}")
+ logging.info(f"generic_endps_profile: {getattr(self.yt_test_obj, 'generic_endps_profile', None)}")
+ logging.info(f"device_names: {getattr(self.yt_test_obj, 'device_names', None)}")
+ logging.info(f"stats_api_response: {getattr(self.yt_test_obj, 'stats_api_response', None)}")
+
+ # duration = duration
+ end_time = datetime.datetime.now() + datetime.timedelta(minutes=duration)
+ initial_data = self.yt_test_obj.get_data_from_api()
+
+ while not initial_data or len(initial_data) == 0:
+ initial_data = self.yt_test_obj.get_data_from_api()
+ time.sleep(1)
+ if initial_data:
+ end_time_webgui = []
+ for i in range(len(self.yt_test_obj.device_names)):
+ end_time_webgui.append(initial_data['result'].get(self.yt_test_obj.device_names[i], {}).get('stop', False))
+ else:
+ for i in range(len(self.yt_test_obj.device_names)):
+ end_time_webgui.append("")
+
+ end_time = datetime.datetime.now() + datetime.timedelta(minutes=duration)
+
+ while datetime.datetime.now() < end_time or not self.yt_test_obj.check_gen_cx():
+ self.yt_test_obj.get_data_from_api()
+ time.sleep(1)
+
+ if getattr(self.yt_test_obj, "generic_endps_profile", None):
+ logger.info("Stopping all the endpoints")
+ self.yt_test_obj.generic_endps_profile.stop_cx()
+ else:
+ logging.warning("⚠️ generic_endps_profile is None — skipping stop_cx()")
+ logging.info("Duration ended")
+
+ logging.info('Stopping the test')
+ if do_webUI:
+ print("hii here data",self.yt_test_obj.stats_api_response)
+ self.yt_test_obj.create_report(self.yt_test_obj.stats_api_response, self.yt_test_obj.ui_report_dir)
+ else:
+
+ self.yt_test_obj.create_report(self.yt_test_obj.stats_api_response, '')
+
+ # Perform post-test cleanup if not skipped
+ # if not no_post_cleanup:
+ # self.yt_test_obj.generic_endps_profile.cleanup()
+ except Exception as e:
+ logging.error(f"Error occured {e}")
+ # traceback.print_exc()
+ finally:
+ if not ('--help' in sys.argv or '-h' in sys.argv):
+ # traceback.print_exc()
+ self.yt_test_obj.stop()
+ if self.current_exec == "parallel":
+ self.yt_obj_dict["parallel"]["yt_test"]["obj"] =self.yt_test_obj
+ else:
+ for i in range(len(self.yt_obj_dict["series"])):
+ if self.yt_obj_dict["series"][f"yt_test_{i+1}"]["obj"] is None:
+ self.yt_obj_dict["series"][f"yt_test_{i+1}"]["obj"] = self.yt_test_obj
+ break
+ # Stopping the Youtube test
+ if do_webUI:
+ self.yt_test_obj.stop_test_yt()
+ if self.dowebgui:
+ self.webgui_test_done("yt")
+ logging.info("Waiting for Cleanup of Browsers in Devices")
+ time.sleep(10)
+ return True
+
+ def run_zoom_test(
+ self,
+ duration: int,
+ signin_email: str,
+ signin_passwd: str,
+ participants: int,
+ audio: bool = False,
+ video: bool = False,
+ wait_time: int = 30,
+ log_level: str = None,
+ lf_logger_config_json: str = None,
+ resource_list: str = None,
+ do_webUI: bool = False,
+ report_dir: str = None,
+ testname: str = None,
+ zoom_host: str = None,
+ file_name: str = None,
+ group_name: str = None,
+ profile_name: str = None,
+ ssid: str = None,
+ passwd: str = None,
+ encryp: str = None,
+ eap_method: str = 'DEFAULT',
+ eap_identity: str = 'DEFAULT',
+ ieee8021x: bool = False,
+ ieee80211u: bool = False,
+ ieee80211w: int = 1,
+ enable_pkc: bool = False,
+ bss_transition: bool = False,
+ power_save: bool = False,
+ disable_ofdma: bool = False,
+ roam_ft_ds: bool = False,
+ key_management: str = 'DEFAULT',
+ pairwise: str = 'NA',
+ private_key: str = 'NA',
+ ca_cert: str = 'NA',
+ client_cert: str = 'NA',
+ pk_passwd: str = 'NA',
+ pac_file: str = 'NA',
+ upstream_port: str = 'NA',
+ help_summary: str = None,
+ expected_passfail_value: str = None,
+ device_csv_name: str = None,
+ config: bool = False,
+ exec_type: str = None
+ ):
+ try:
+ lanforge_ip = self.lanforge_ip
+ if self.dowebgui:
+ if not self.webgui_stop_check("zoom"):
+ return False
+ if True:
+
+ if group_name is not None:
+ group_name = group_name.strip()
+ selected_groups = group_name.split(',')
+ else:
+ selected_groups = []
+
+ if profile_name is not None:
+ profile_name = profile_name.strip()
+ selected_profiles = profile_name.split(',')
+ else:
+ selected_profiles = []
+
+ upstream_port = "10.253.8.126"
+ self.zoom_test_obj = ZoomAutomation(audio=audio, video=video, lanforge_ip=lanforge_ip, wait_time=wait_time, testname=testname,
+ upstream_port=upstream_port, config=config, selected_groups=selected_groups, selected_profiles=selected_profiles,no_browser_precleanup = True,no_browser_postcleanup = True)
+ upstream_port = self.zoom_test_obj.change_port_to_ip(upstream_port)
+ realdevice = RealDevice(manager_ip=lanforge_ip,
+ server_ip="192.168.1.61",
+ ssid_2g='Test Configured',
+ passwd_2g='',
+ encryption_2g='',
+ ssid_5g='Test Configured',
+ passwd_5g='',
+ encryption_5g='',
+ ssid_6g='Test Configured',
+ passwd_6g='',
+ encryption_6g='',
+ selected_bands=['5G'])
+ laptops = realdevice.get_devices()
+ print('CHECKING PORT AVAILBILITY for ZOOM TEST')
+ self.port_clean_up(5000)
+
+ if file_name:
+ new_filename = file_name.removesuffix(".csv")
+ else:
+ new_filename = file_name
+ config_obj = DeviceConfig.DeviceConfig(lanforge_ip=lanforge_ip, file_name=new_filename)
+
+ # if not expected_passfail_value and device_csv_name is None:
+ # config_obj.device_csv_file(csv_name="device.csv")
+ if group_name is not None and file_name is not None and profile_name is not None:
+ selected_groups = group_name.split(',')
+ selected_profiles = profile_name.split(',')
+ config_devices = {}
+ for i in range(len(selected_groups)):
+ config_devices[selected_groups[i]] = selected_profiles[i]
+
+ config_obj.initiate_group()
+ asyncio.run(config_obj.connectivity(config_devices))
+
+ adbresponse = config_obj.adb_obj.get_devices()
+ resource_manager = config_obj.laptop_obj.get_devices()
+ all_res = {}
+ df1 = config_obj.display_groups(config_obj.groups)
+ groups_list = df1.to_dict(orient='list')
+ group_devices = {}
+
+ for adb in adbresponse:
+ group_devices[adb['serial']] = adb['eid']
+ for res in resource_manager:
+ all_res[res['hostname']] = res['shelf'] + '.' + res['resource']
+ eid_list = []
+ for grp_name in groups_list.keys():
+ for g_name in selected_groups:
+ if grp_name == g_name:
+ for j in groups_list[grp_name]:
+ if j in group_devices.keys():
+ eid_list.append(group_devices[j])
+ elif j in all_res.keys():
+ eid_list.append(all_res[j])
+ if zoom_host in eid_list:
+ # Remove the existing instance of zoom_host from the list
+ eid_list.remove(zoom_host)
+ # Insert zoom_host at the beginning of the list
+ eid_list.insert(0, zoom_host)
+
+ resource_list = ",".join(id for id in eid_list)
+ else:
+ config_dict = {
+ 'ssid': ssid,
+ 'passwd': passwd,
+ 'enc': encryp,
+ 'eap_method': eap_method,
+ 'eap_identity': eap_identity,
+ 'ieee80211': ieee8021x,
+ 'ieee80211u': ieee80211u,
+ 'ieee80211w': ieee80211w,
+ 'enable_pkc': enable_pkc,
+ 'bss_transition': bss_transition,
+ 'power_save': power_save,
+ 'disable_ofdma': disable_ofdma,
+ 'roam_ft_ds': roam_ft_ds,
+ 'key_management': key_management,
+ 'pairwise': pairwise,
+ 'private_key': private_key,
+ 'ca_cert': ca_cert,
+ 'client_cert': client_cert,
+ 'pk_passwd': pk_passwd,
+ 'pac_file': pac_file,
+ 'server_ip': upstream_port,
+
+ }
+ if resource_list:
+ all_devices = config_obj.get_all_devices()
+ if group_name is None and file_name is None and profile_name is None:
+ dev_list = resource_list.split(',')
+ if not do_webUI:
+ zoom_host = zoom_host.strip()
+ if zoom_host in dev_list:
+ dev_list.remove(zoom_host)
+ dev_list.insert(0, zoom_host)
+ if config:
+ asyncio.run(config_obj.connectivity(device_list=dev_list, wifi_config=config_dict))
+ resource_list = ",".join(id for id in dev_list)
+ else:
+ # If no resources provided, prompt user to select devices manually
+ if config:
+ all_devices = config_obj.get_all_devices()
+ device_list = []
+ for device in all_devices:
+ if device["type"] != 'laptop':
+ device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["serial"])
+ elif device["type"] == 'laptop':
+ device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["hostname"])
+ print("Available Devices For Testing")
+ for device in device_list:
+ print(device)
+ zm_host = input("Enter Host Resource for the Test : ")
+ zm_host = zm_host.strip()
+ resource_list = input("Enter client Resources to run the test :")
+ resource_list = zm_host + "," + resource_list
+ dev1_list = resource_list.split(',')
+ asyncio.run(config_obj.connectivity(device_list=dev1_list, wifi_config=config_dict))
+
+ result_list = []
+ if not do_webUI:
+ if resource_list:
+ resources = resource_list.split(',')
+ resources = [r for r in resources if len(r.split('.')) > 1]
+ # resources = sorted(resources, key=lambda x: int(x.split('.')[1]))
+ get_data = self.zoom_test_obj.select_real_devices(real_device_obj=realdevice, real_sta_list=resources)
+ for item in get_data:
+ item = item.strip()
+ # Find and append the matching lap to result_list
+ matching_laps = [lap for lap in laptops if lap.startswith(item)]
+ result_list.extend(matching_laps)
+ if not result_list:
+ logging.info("Resources donot exist hence Terminating the test.")
+ return
+ if len(result_list) != len(get_data):
+ logging.info("Few Resources donot exist")
+ else:
+ resources = self.zoom_test_obj.select_real_devices(real_device_obj=realdevice)
+ else:
+ if do_webUI:
+ self.zoom_test_obj.path = report_dir
+ resources = resource_list.split(',')
+ extracted_parts = [res.split('.')[:2] for res in resources]
+ formatted_parts = ['.'.join(parts) for parts in extracted_parts]
+
+ self.zoom_test_obj.select_real_devices(real_device_obj=realdevice, real_sta_list=formatted_parts)
+ if do_webUI:
+
+ if len(self.zoom_test_obj.real_sta_hostname) == 0:
+ logging.info("No device is available to run the test")
+ obj = {
+ "status": "Stopped",
+ "configuration_status": "configured"
+ }
+ self.zoom_test_obj.updating_webui_runningjson(obj)
+ return False
+ else:
+ obj = {
+ "configured_devices": self.zoom_test_obj.real_sta_hostname,
+ "configuration_status": "configured",
+ "no_of_devices": f' Total({len(self.zoom_test_obj.real_sta_os_type)}) : W({self.zoom_test_obj.windows}),L({self.zoom_test_obj.linux}),M({self.zoom_test_obj.mac})',
+ "device_list": self.zoom_test_obj.hostname_os_combination,
+ # "zoom_host":self.zoom_test_obj.zoom_host
+
+ }
+ self.zoom_test_obj.updating_webui_runningjson(obj)
+
+ if not self.zoom_test_obj.check_tab_exists():
+ logging.error('Generic Tab is not available.\nAborting the test.')
+ return False
+ self.zoom_test_obj.run(duration, upstream_port, signin_email, signin_passwd, participants)
+ self.zoom_test_obj.data_store.clear()
+ self.zoom_test_obj.generate_report()
+ logging.info("Test Completed Sucessfully")
+ except Exception as e:
+ logging.error(f"AN ERROR OCCURED WHILE RUNNING TEST {e}")
+ # traceback.print_exc()
+ finally:
+ if not ('--help' in sys.argv or '-h' in sys.argv):
+ if do_webUI:
+ try:
+ url = f"http://{lanforge_ip}:5454/update_status_yt"
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ data = {
+ 'status': 'Completed',
+ 'name': testname
+ }
+
+ response = requests.post(url, json=data, headers=headers)
+
+ if response.status_code == 200:
+ logging.info("Successfully updated STOP status to 'Completed'")
+ pass
+ else:
+ logging.error(f"Failed to update STOP status: {response.status_code} - {response.text}")
+
+ except Exception as e:
+ # Print an error message if an exception occurs during the request
+ logging.error(f"An error occurred while updating status: {e}")
+
+ self.zoom_test_obj.redis_client.set('login_completed', 0)
+ self.zoom_test_obj.stop_signal = True
+ self.zoom_test_obj.app = None
+ self.zoom_test_obj.redis_client = None
+ if self.dowebgui:
+ self.webgui_test_done("zoom")
+ if self.current_exec == "parallel":
+ self.zoom_obj_dict["parallel"]["zoom_test"]["obj"] =self.zoom_test_obj
+ else:
+ for i in range(len(self.zoom_obj_dict["series"])):
+ if self.zoom_obj_dict["series"][f"zoom_test_{i+1}"]["obj"] is None:
+ self.zoom_obj_dict["series"][f"zoom_test_{i+1}"]["obj"] = self.zoom_test_obj
+ break
+ logging.info("Waiting for Browser Cleanup in Laptops")
+ self.zoom_test_obj.generic_endps_profile.cleanup()
+ # self.zoom_test_obj.generic_endps_profile.cleanup()
+ time.sleep(10)
+
+ return True
+
+
+ def run_rb_test1(self,args):
+ try:
+ if self.dowebgui:
+ if not self.webgui_stop_check("rb"):
+ return False
+ logger_config = lf_logger_config.lf_logger_config()
+
+ if args.log_level:
+ logger_config.set_level(level=args.log_level)
+
+ if args.lf_logger_config_json:
+ logger_config.lf_logger_config_json = args.lf_logger_config_json
+ logger_config.load_lf_logger_config()
+ if args.url.lower().startswith("www."):
+ args.url = "https://" + args.url
+ if args.url.lower().startswith("http://"):
+ args.url = "https://" + args.url.removeprefix("http://")
+ self.rb_test = RealBrowserTest(host=args.host,
+ ssid=args.ssid,
+ passwd=args.passwd,
+ encryp=args.encryp,
+ suporrted_release=["7.0", "10", "11", "12"],
+ max_speed=args.max_speed,
+ url=args.url, count=args.count,
+ duration=args.duration,
+ resource_ids=args.device_list,
+ dowebgui=args.dowebgui,
+ result_dir=args.result_dir,
+ test_name=args.test_name,
+ incremental=args.incremental,
+ no_postcleanup=args.no_postcleanup,
+ no_precleanup=args.no_precleanup,
+ file_name=args.file_name,
+ group_name=args.group_name,
+ profile_name=args.profile_name,
+ eap_method=args.eap_method,
+ eap_identity=args.eap_identity,
+ ieee80211=args.ieee80211,
+ ieee80211u=args.ieee80211u,
+ ieee80211w=args.ieee80211w,
+ enable_pkc=args.enable_pkc,
+ bss_transition=args.bss_transition,
+ power_save=args.power_save,
+ disable_ofdma=args.disable_ofdma,
+ roam_ft_ds=args.roam_ft_ds,
+ key_management=args.key_management,
+ pairwise=args.pairwise,
+ private_key=args.private_key,
+ ca_cert=args.ca_cert,
+ client_cert=args.client_cert,
+ pk_passwd=args.pk_passwd,
+ pac_file=args.pac_file,
+ upstream_port="10.253.8.126",
+ expected_passfail_value=args.expected_passfail_value,
+ device_csv_name=args.device_csv_name,
+ wait_time=args.wait_time,
+ config=args.config,
+ selected_groups=args.group_name,
+ selected_profiles=args.profile_name,
+ no_browser_precleanup=True,
+ no_browser_postcleanup=True
+ )
+ print('CHECKING PORT AVAILBILITY for RB TEST')
+ self.port_clean_up(5003)
+ self.rb_test.change_port_to_ip()
+ self.rb_test.validate_and_process_args()
+ self.rb_test.config_obj = DeviceConfig.DeviceConfig(lanforge_ip=self.rb_test.host, file_name=self.rb_test.file_name, wait_time=self.rb_test.wait_time)
+ # if not self.rb_test.expected_passfail_value and self.rb_test.device_csv_name is None:
+ # self.rb_test.config_self.rb_test.device_csv_file(csv_name="device.csv")
+ self.rb_test.run_flask_server()
+ if self.rb_test.group_name and self.rb_test.profile_name and self.rb_test.file_name:
+ available_resources = self.rb_test.process_group_profiles()
+ else:
+ # --- Build configuration dictionary for WiFi parameters ---
+ config_dict = {
+ 'ssid': args.ssid,
+ 'passwd': args.passwd,
+ 'enc': args.encryp,
+ 'eap_method': args.eap_method,
+ 'eap_identity': args.eap_identity,
+ 'ieee80211': args.ieee80211,
+ 'ieee80211u': args.ieee80211u,
+ 'ieee80211w': args.ieee80211w,
+ 'enable_pkc': args.enable_pkc,
+ 'bss_transition': args.bss_transition,
+ 'power_save': args.power_save,
+ 'disable_ofdma': args.disable_ofdma,
+ 'roam_ft_ds': args.roam_ft_ds,
+ 'key_management': args.key_management,
+ 'pairwise': args.pairwise,
+ 'private_key': args.private_key,
+ 'ca_cert': args.ca_cert,
+ 'client_cert': args.client_cert,
+ 'pk_passwd': args.pk_passwd,
+ 'pac_file': args.pac_file,
+ 'server_ip': self.rb_test.upstream_port,
+ }
+ available_resources = self.rb_test.process_resources(config_dict)
+ if len(available_resources) != 0:
+ available_resources = self.rb_test.filter_ios_devices(available_resources)
+ if len(available_resources) == 0:
+ logging.error("No devices available to run the test. Exiting...")
+ return False
+
+ # --- Print available resources ---
+ logging.info("Devices available: {}".format(available_resources))
+ if self.rb_test.expected_passfail_value or self.rb_test.device_csv_name:
+ self.rb_test.update_passfail_value(available_resources)
+ # --- Handle incremental values ---
+ self.rb_test.handle_incremental(args, self.rb_test, available_resources, available_resources)
+ self.rb_test.handle_duration()
+ self.rb_test.run_test(available_resources)
+
+ except Exception as e:
+ logging.error("Error occured", e)
+ # traceback.print_exc()
+ finally:
+ if '--help' not in sys.argv and '-h' not in sys.argv:
+ self.rb_test.create_report()
+ if self.rb_test.dowebgui:
+ self.rb_test.webui_stop()
+ self.rb_test.stop()
+
+ # if not args.no_postcleanup:
+ # self.rb_test_obj.postcleanup()
+ if self.dowebgui:
+ self.webgui_test_done("rb")
+ self.rb_test.app = None
+ if self.current_exec == "parallel":
+ self.rb_obj_dict["parallel"]["rb_test"]["obj"] =self.rb_test
+ else:
+ for i in range(len(self.rb_obj_dict["series"])):
+ if self.rb_obj_dict["series"][f"rb_test_{i+1}"]["obj"] is None:
+ self.rb_obj_dict["series"][f"rb_test_{i+1}"]["obj"] = self.rb_test
+ break
+
+
+
+ return True
+
+
+ def run_rb_test(
+ self,
+ ssid: str = None,
+ passwd: str = None,
+ encryp: str = None,
+ url: str = "https://google.com",
+ max_speed: int = 0,
+ count: int = 1,
+ duration: str = None,
+ test_name: str = None,
+ dowebgui: bool = False,
+ result_dir: str = '',
+ lf_logger_config_json: str = None,
+ log_level: str = None,
+ debug: bool = False,
+ device_list: str = None,
+ webgui_incremental: str = None,
+ incremental: bool = False,
+ no_laptops: bool = False,
+ no_postcleanup: bool = False,
+ no_precleanup: bool = False,
+ file_name: str = None,
+ group_name: str = None,
+ profile_name: str = None,
+ eap_method: str = 'DEFAULT',
+ eap_identity: str = 'DEFAULT',
+ ieee80211: bool = False,
+ ieee80211u: bool = False,
+ ieee80211w: int = 1,
+ enable_pkc: bool = False,
+ bss_transition: bool = False,
+ power_save: bool = False,
+ disable_ofdma: bool = False,
+ roam_ft_ds: bool = False,
+ key_management: str = 'DEFAULT',
+ pairwise: str = 'NA',
+ private_key: str = 'NA',
+ ca_cert: str = 'NA',
+ client_cert: str = 'NA',
+ pk_passwd: str = 'NA',
+ pac_file: str = 'NA',
+ upstream_port: str = 'NA',
+ help_summary: str = None,
+ expected_passfail_value: str = None,
+ device_csv_name: str = None,
+ wait_time: int = 60,
+ config: bool = False,
+ exec_type: str = None,
+ ):
+ args = SimpleNamespace(**locals())
+ args.host = self.lanforge_ip
+ return self.run_rb_test1(args)
+
+ def browser_cleanup(self,rb_test=False,yt_test=False):
+ # count = 0
+ # series_tests = args.series_tests.split(',') if args.series_tests else None
+ # parallel_tests = args.parallel_tests.split(',') if args.parallel_tests else None
+ # zoom_test = False
+ # yt_test = False
+ # rb_test = False
+ # if 'zoom_test' in parallel_tests:
+ # count += 1
+ # if 'yt_test' in parallel_tests:
+ # count += 1
+ # if 'rb_test' in parallel_tests:
+ # count += 1
+ # if count <=1:
+ # self.browser_kill = True
+ # if args.series_test and not parallel_tests:
+ # self.browser_kill = True
+ # return True
+ # if rb_test:
+ # cnt = 0
+ # flag = False
+ # while not self.rb_build_done:
+ # time.sleep(1)
+ # cnt+=1
+ # if cnt >= 30:
+ # flag = True
+ # break
+ # if flag:
+ # return False
+ print('calledddddd')
+ # time.sleep(20)
+ if rb_test:
+ print('inn000')
+ print('laptop_os_types',self.rb_test_obj.laptop_os_types)
+ print('endpsss',self.rb_test_obj.generic_endps_profile.created_endp)
+ for i in range(0, len(self.rb_test_obj.laptop_os_types)):
+ print('inn1111')
+ if self.rb_test_obj.laptop_os_types[i] == 'windows':
+ cmd = "echo Performing POST cleanup of browser processes... & taskkill /F /IM chrome.exe /T >nul 2>&1 & taskkill /F /IM chromedriver.exe /T >nul 2>&1 & echo Browser processes terminated."
+ self.rb_test_obj.generic_endps_profile.set_cmd(self.rb_test_obj.generic_endps_profile.created_endp[i], cmd)
+ elif self.rb_test_obj.laptop_os_types[i] == 'linux':
+ # cmd = "su -l lanforge ctrb.bash %s %s %s %s" % (self.rb_test_obj.new_port_list[i], self.rb_test_obj.url, self.rb_test_obj.upstream_port, self.rb_test_obj.duration)
+ cmd = "pkill -f chrome; pkill -f chromedriver"
+ self.rb_test_obj.generic_endps_profile.set_cmd(self.rb_test_obj.generic_endps_profile.created_endp[i], cmd)
+ elif self.rb_test_obj.laptop_os_types[i] == 'macos':
+ cmd = "pkill -f Google Chrome; pkill -f chromedriver;"
+ self.rb_test_obj.generic_endps_profile.set_cmd(self.rb_test_obj.generic_endps_profile.created_endp[i], cmd)
+ if self.rb_test_obj.browser_precleanup:
+ cmd+=" precleanup"
+ if self.rb_test_obj.browser_postcleanup:
+ cmd+=" postcleanup"
+
+ for i, cx_batch in enumerate(self.rb_test_obj.cx_order_list):
+ self.rb_test_obj.start_specific(cx_batch)
+ logging.info(f"browser cleanup on {cx_batch}")
+ print('realbrowser test laptop cleaing.....')
+ time.sleep(20)
+
+
+ if yt_test:
+ for i in range(0, len(self.yt_test_obj.real_sta_os_types)):
+ if self.yt_test_obj.real_sta_os_types[i] == 'windows':
+ cmd = "echo Performing POST cleanup of browser processes... & taskkill /F /IM chrome.exe /T >nul 2>&1 & taskkill /F /IM chromedriver.exe /T >nul 2>&1 & echo Browser processes terminated."
+ self.yt_test_obj.generic_endps_profile.set_cmd(self.yt_test_obj.generic_endps_profile.created_endp[i], cmd)
+ elif self.yt_test_obj.real_sta_os_types[i] == 'linux':
+ cmd = "pkill -f chrome; pkill -f chromedriver"
+ self.yt_test_obj.generic_endps_profile.set_cmd(self.yt_test_obj.generic_endps_profile.created_endp[i], cmd)
+
+ elif self.yt_test_obj.real_sta_os_types[i] == 'macos':
+ cmd = "pkill -f Google Chrome; pkill -f chromedriver;"
+ self.yt_test_obj.generic_endps_profile.set_cmd(self.yt_test_obj.generic_endps_profile.created_endp[i], cmd)
+
+ self.yt_test_obj.generic_endps_profile.start_cx()
+ print('youtube test laptop cleaing.....')
+ time.sleep(20)
+
+ # if zoom_test:
+ # for i in range(len(self.zoom_test_obj.real_sta_os_type)):
+ # if self.zoom_test_obj.real_sta_os_type[i] == "windows":
+ # cmd = f"py zoom_client.py --ip {self.zoom_test_obj.upstream_port}"
+ # self.zoom_test_obj.generic_endps_profile.set_cmd(self.zoom_test_obj.generic_endps_profile.created_endp[i], cmd)
+ # elif self.zoom_test_obj.real_sta_os_type[i] == 'linux':
+ # cmd = "su -l lanforge ctzoom.bash %s %s %s" % (self.zoom_test_obj.new_port_list[i], self.zoom_test_obj.upstream_port, "client")
+ # self.zoom_test_obj.generic_endps_profile.set_cmd(self.zoom_test_obj.generic_endps_profile.created_endp[i], cmd)
+ # elif self.zoom_test_obj.real_sta_os_type[i] == 'macos':
+ # cmd = "sudo bash ctzoom.bash %s %s" % (self.zoom_test_obj.upstream_port, "client")
+ # self.zoom_test_obj.generic_endps_profile.set_cmd(self.zoom_test_obj.generic_endps_profile.created_endp[i], cmd)
+
+ # self.zoom_test_obj.generic_endps_profile.start_cx()
+ def render_each_test(self,ce):
+ # ce = "series"
+ unq_tests = []
+ test_map = {}
+ print('self.rb_obj_dict',self.rb_obj_dict)
+ print('self.yt_obj_dict',self.yt_obj_dict)
+ if ce == "series":
+ series_tests = self.series_tests.copy()
+ for test in series_tests:
+ if test not in test_map:
+ test_map[test] = 1
+ unq_tests.append(test)
+ else:
+ test_map[test] += 1
+ else:
+ unq_tests = self.parallel_tests.copy()
+ print('self.series_tests',self.series_tests)
+ print('test_map',test_map)
+ print('unq_tests',unq_tests)
+ for test_name in unq_tests:
+ try:
+ if test_name == "http_test":
+ # obj = []
+ obj_no = 1
+ obj_name = 'http_test'
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.http_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ # report_path = self.result_path
+ # print("Current working directory:", os.getcwd())
+ http_data = self.http_obj_dict[ce][obj_name]["data"]
+ if http_data["bands"] == "Both":
+ num_stations = num_stations * 2
+
+ # report.set_title("HTTP DOWNLOAD TEST")
+ # report.set_date(date)
+ # if 'http_test' not in self.test_count_dict:
+ # self.test_count_dict['http_test']=0
+ # self.test_count_dict['http_test']+=1
+ self.overall_report.set_obj_html(_obj_title=f'HTTP Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ self.overall_report.set_table_title("Test Setup Information")
+ self.overall_report.build_table_title()
+ self.overall_report.test_setup_table(value="Test Setup Information", test_setup_data=http_data["test_setup_info"])
+
+ graph2 = self.http_obj_dict[ce][obj_name]["obj"].graph_2(http_data["dataset2"], lis=http_data["lis"], bands=http_data["bands"],graph_no=obj_no)
+ print("graph name {}".format(graph2))
+ self.overall_report.set_graph_image(graph2)
+ self.overall_report.set_csv_filename(graph2)
+ self.overall_report.move_csv_file()
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_obj_html(
+ "Average time taken to download file ",
+ "The below graph represents average time taken to download for each client "
+ ". X- axis shows “Average time taken to download a file ” and Y-axis shows "
+ "Client names."
+ )
+ self.overall_report.build_objective()
+
+ graph = self.http_obj_dict[ce][obj_name]["obj"].generate_graph(dataset=http_data["dataset"], lis=http_data["lis"], bands=http_data["bands"],graph_no=obj_no)
+ self.overall_report.set_graph_image(graph)
+ self.overall_report.set_csv_filename(graph)
+ self.overall_report.move_csv_file()
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_obj_html(
+ "Download Time Table Description",
+ "This Table will provide you information of the "
+ "minimum, maximum and the average time taken by clients to download a webpage in seconds"
+ )
+ self.overall_report.build_objective()
+
+ self.http_obj_dict[ce][obj_name]["obj"].response_port = self.http_obj_dict[ce][obj_name]["obj"].local_realm.json_get("/port/all")
+ self.http_obj_dict[ce][obj_name]["obj"].channel_list, self.http_obj_dict[ce][obj_name]["obj"].mode_list, self.http_obj_dict[ce][obj_name]["obj"].ssid_list = [], [], []
+
+ if self.http_obj_dict[ce][obj_name]["obj"].client_type == "Real":
+ self.http_obj_dict[ce][obj_name]["obj"].devices = self.http_obj_dict[ce][obj_name]["obj"].devices_list
+ for interface in self.http_obj_dict[ce][obj_name]["obj"].response_port['interfaces']:
+ for port, port_data in interface.items():
+ if port in self.http_obj_dict[ce][obj_name]["obj"].port_list:
+ self.http_obj_dict[ce][obj_name]["obj"].channel_list.append(str(port_data['channel']))
+ self.http_obj_dict[ce][obj_name]["obj"].mode_list.append(str(port_data['mode']))
+ self.http_obj_dict[ce][obj_name]["obj"].ssid_list.append(str(port_data['ssid']))
+ elif self.http_obj_dict[ce][obj_name]["obj"].client_type == "Virtual":
+ self.http_obj_dict[ce][obj_name]["obj"].devices = self.http_obj_dict[ce][obj_name]["obj"].station_list[0]
+ for interface in self.http_obj_dict[ce][obj_name]["obj"].response_port['interfaces']:
+ for port, port_data in interface.items():
+ if port in self.http_obj_dict[ce][obj_name]["obj"].station_list[0]:
+ self.http_obj_dict[ce][obj_name]["obj"].channel_list.append(str(port_data['channel']))
+ self.http_obj_dict[ce][obj_name]["obj"].mode_list.append(str(port_data['mode']))
+ self.http_obj_dict[ce][obj_name]["obj"].macid_list.append(str(port_data['mac']))
+ self.http_obj_dict[ce][obj_name]["obj"].ssid_list.append(str(port_data['ssid']))
+
+ # Processing result_data
+ z, z1, z2 = [], [], []
+ for fcc in list(http_data["result_data"].keys()):
+ z.extend([str(round(i / 1000, 1)) for i in http_data["result_data"][fcc]["min"]])
+ z1.extend([str(round(i / 1000, 1)) for i in http_data["result_data"][fcc]["max"]])
+ z2.extend([str(round(i / 1000, 1)) for i in http_data["result_data"][fcc]["avg"]])
+
+ download_table_value_dup = {"Minimum": z, "Maximum": z1, "Average": z2}
+ download_table_value = {"Band": http_data["bands"], "Minimum": z, "Maximum": z1, "Average": z2}
+
+ # KPI reporting
+ kpi_path = self.overall_report.get_report_path()
+ print("kpi_path :{kpi_path}".format(kpi_path=kpi_path))
+
+ kpi_csv = lf_kpi_csv.lf_kpi_csv(
+ _kpi_path=kpi_path,
+ _kpi_test_rig=http_data["test_rig"],
+ _kpi_test_tag=http_data["test_tag"],
+ _kpi_dut_hw_version=http_data["dut_hw_version"],
+ _kpi_dut_sw_version=http_data["dut_sw_version"],
+ _kpi_dut_model_num=http_data["dut_model_num"],
+ _kpi_dut_serial_num=http_data["dut_serial_num"],
+ _kpi_test_id=http_data["test_id"]
+ )
+ kpi_csv.kpi_dict['Units'] = "Mbps"
+ for band in range(len(download_table_value["Band"])):
+ kpi_csv.kpi_csv_get_dict_update_time()
+ kpi_csv.kpi_dict['Graph-Group'] = "Webpage Download {band}".format(
+ band=download_table_value['Band'][band])
+ kpi_csv.kpi_dict['short-description'] = "Webpage download {band} Minimum".format(
+ band=download_table_value['Band'][band])
+ kpi_csv.kpi_dict['numeric-score'] = "{min}".format(min=download_table_value['Minimum'][band])
+ kpi_csv.kpi_csv_write_dict(kpi_csv.kpi_dict)
+
+ kpi_csv.kpi_dict['short-description'] = "Webpage download {band} Maximum".format(
+ band=download_table_value['Band'][band])
+ kpi_csv.kpi_dict['numeric-score'] = "{max}".format(max=download_table_value['Maximum'][band])
+ kpi_csv.kpi_csv_write_dict(kpi_csv.kpi_dict)
+
+ kpi_csv.kpi_dict['short-description'] = "Webpage download {band} Average".format(
+ band=download_table_value['Band'][band])
+ kpi_csv.kpi_dict['numeric-score'] = "{avg}".format(avg=download_table_value['Average'][band])
+ kpi_csv.kpi_csv_write_dict(kpi_csv.kpi_dict)
+
+ if http_data["csv_outfile"] is not None:
+ current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
+ http_data["csv_outfile"] = "{}_{}-test_l3_longevity.csv".format(http_data["csv_outfile"], current_time)
+ http_data["csv_outfile"] = self.overall_report.file_add_path(http_data["csv_outfile"])
+ print("csv output file : {}".format(http_data["csv_outfile"]))
+
+ test_setup = pd.DataFrame(download_table_value_dup)
+ self.overall_report.set_table_dataframe(test_setup)
+ self.overall_report.build_table()
+
+ if self.http_obj_dict[ce][obj_name]["obj"].group_name:
+ self.overall_report.set_table_title("Overall Results for Groups")
+ else:
+ self.overall_report.set_table_title("Overall Results")
+ self.overall_report.build_table_title()
+
+ if self.http_obj_dict[ce][obj_name]["obj"].client_type == "Real":
+ if self.http_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.http_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ test_input_list, pass_fail_list = self.http_obj_dict[ce][obj_name]["obj"].get_pass_fail_list(http_data["dataset2"])
+
+ if self.http_obj_dict[ce][obj_name]["obj"].group_name:
+ for key, val in self.http_obj_dict[ce][obj_name]["obj"].group_device_map.items():
+ if self.http_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.http_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ dataframe = self.http_obj_dict[ce][obj_name]["obj"].generate_dataframe(
+ val, self.http_obj_dict[ce][obj_name]["obj"].devices, self.http_obj_dict[ce][obj_name]["obj"].macid_list, self.http_obj_dict[ce][obj_name]["obj"].channel_list,
+ self.http_obj_dict[ce][obj_name]["obj"].ssid_list, self.http_obj_dict[ce][obj_name]["obj"].mode_list, http_data["dataset2"], test_input_list,
+ http_data["dataset"], http_data["dataset1"], http_data["rx_rate"], pass_fail_list
+ )
+ else:
+ dataframe = self.http_obj_dict[ce][obj_name]["obj"].generate_dataframe(
+ val, self.http_obj_dict[ce][obj_name]["obj"].devices, self.http_obj_dict[ce][obj_name]["obj"].macid_list, self.http_obj_dict[ce][obj_name]["obj"].channel_list,
+ self.http_obj_dict[ce][obj_name]["obj"].ssid_list, self.http_obj_dict[ce][obj_name]["obj"].mode_list, http_data["dataset2"], [], http_data["dataset"],
+ http_data["dataset1"], http_data["rx_rate"], []
+ )
+ if dataframe:
+ self.overall_report.set_obj_html("", "Group: {}".format(key))
+ self.overall_report.build_objective()
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+ else:
+ dataframe = {
+ " Clients": self.http_obj_dict[ce][obj_name]["obj"].devices,
+ " MAC ": self.http_obj_dict[ce][obj_name]["obj"].macid_list,
+ " Channel": self.http_obj_dict[ce][obj_name]["obj"].channel_list,
+ " SSID ": self.http_obj_dict[ce][obj_name]["obj"].ssid_list,
+ " Mode": self.http_obj_dict[ce][obj_name]["obj"].mode_list,
+ " No of times File downloaded ": http_data["dataset2"],
+ " Average time taken to Download file (ms)": http_data["dataset"],
+ " Bytes-rd (Mega Bytes) ": http_data["dataset1"],
+ "Rx Rate (Mbps)": http_data["rx_rate"],
+ "Failed url's": self.http_obj_dict[ce][obj_name]["obj"].data["total_err"]
+ }
+ if self.http_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.http_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ dataframe[" Expected value of no of times file downloaded"] = test_input_list
+ dataframe["Status"] = pass_fail_list
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+ else:
+ dataframe = {
+ " Clients": self.http_obj_dict[ce][obj_name]["obj"].devices,
+ " MAC ": self.http_obj_dict[ce][obj_name]["obj"].macid_list,
+ " Channel": self.http_obj_dict[ce][obj_name]["obj"].channel_list,
+ " SSID ": self.http_obj_dict[ce][obj_name]["obj"].ssid_list,
+ " Mode": self.http_obj_dict[ce][obj_name]["obj"].mode_list,
+ " No of times File downloaded ": http_data["dataset2"],
+ " Average time taken to Download file (ms)": http_data["dataset"],
+ " Bytes-rd (Mega Bytes) ": http_data["dataset1"]
+ }
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ # self.http_obj_dict[ce]
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"http_test_{obj_no}"
+ else:
+ break
+
+ elif test_name == "ftp_test":
+ obj_no=1
+ obj_name = "ftp_test"
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.ftp_obj_dict[ce]:
+ # obj_name = f"ftp_test_{obj_no}"
+ if ce == "parallel":
+ obj_no = ''
+ params = self.ftp_obj_dict[ce][obj_name]["data"].copy()
+ ftp_data = params["ftp_data"].copy() if isinstance(params["ftp_data"], (list, dict, set)) else params["ftp_data"]
+ date = params["date"].copy() if isinstance(params["date"], (list, dict, set)) else params["date"]
+ input_setup_info = params["input_setup_info"].copy() if isinstance(params["input_setup_info"], (list, dict, set)) else params["input_setup_info"]
+ test_rig = params["test_rig"].copy() if isinstance(params["test_rig"], (list, dict, set)) else params["test_rig"]
+ test_tag = params["test_tag"].copy() if isinstance(params["test_tag"], (list, dict, set)) else params["test_tag"]
+ dut_hw_version = params["dut_hw_version"].copy() if isinstance(params["dut_hw_version"], (list, dict, set)) else params["dut_hw_version"]
+ dut_sw_version = params["dut_sw_version"].copy() if isinstance(params["dut_sw_version"], (list, dict, set)) else params["dut_sw_version"]
+ dut_model_num = params["dut_model_num"].copy() if isinstance(params["dut_model_num"], (list, dict, set)) else params["dut_model_num"]
+ dut_serial_num = params["dut_serial_num"].copy() if isinstance(params["dut_serial_num"], (list, dict, set)) else params["dut_serial_num"]
+ test_id = params["test_id"].copy() if isinstance(params["test_id"], (list, dict, set)) else params["test_id"]
+ bands = params["bands"].copy() if isinstance(params["bands"], (list, dict, set)) else params["bands"]
+ csv_outfile = params["csv_outfile"].copy() if isinstance(params["csv_outfile"], (list, dict, set)) else params["csv_outfile"]
+ local_lf_report_dir = params["local_lf_report_dir"].copy() if isinstance(params["local_lf_report_dir"], (list, dict, set)) else params["local_lf_report_dir"]
+ report_path = params["report_path"].copy() if isinstance(params["report_path"], (list, dict, set)) else params["report_path"]
+
+ # Optional parameter
+ config_devices = ""
+ if "config_devices" in params:
+ config_devices = params["config_devices"].copy() if isinstance(params["config_devices"], (list, dict, set)) else params["config_devices"]
+
+ no_of_stations = ""
+ duration = ""
+ x_fig_size = 18
+ y_fig_size = len(self.ftp_obj_dict[ce][obj_name]["obj"].real_client_list1) * .5 + 4
+
+ if int(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration) < 60:
+ duration = str(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration) + "s"
+ elif int(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration == 60) or (int(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration) > 60 and int(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration) < 3600):
+ duration = str(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration / 60) + "m"
+ else:
+ if int(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration == 3600) or (int(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration) > 3600):
+ duration = str(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration / 3600) + "h"
+
+ client_list = []
+ if self.ftp_obj_dict[ce][obj_name]["obj"].clients_type == "Real":
+ client_list = self.ftp_obj_dict[ce][obj_name]["obj"].real_client_list1
+ android_devices, windows_devices, linux_devices, mac_devices = 0, 0, 0, 0
+ all_devices_names = []
+ device_type = []
+ total_devices = ""
+ for i in self.ftp_obj_dict[ce][obj_name]["obj"].real_client_list:
+ split_device_name = i.split(" ")
+ if 'android' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Android)"))
+ device_type.append("Android")
+ android_devices += 1
+ elif 'Win' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Windows)"))
+ device_type.append("Windows")
+ windows_devices += 1
+ elif 'Lin' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Linux)"))
+ device_type.append("Linux")
+ linux_devices += 1
+ elif 'Mac' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Mac)"))
+ device_type.append("Mac")
+ mac_devices += 1
+
+ if android_devices > 0:
+ total_devices += f" Android({android_devices})"
+ if windows_devices > 0:
+ total_devices += f" Windows({windows_devices})"
+ if linux_devices > 0:
+ total_devices += f" Linux({linux_devices})"
+ if mac_devices > 0:
+ total_devices += f" Mac({mac_devices})"
+ else:
+ if self.ftp_obj_dict[ce][obj_name]["obj"].clients_type == "Virtual":
+ client_list = self.ftp_obj_dict[ce][obj_name]["obj"].station_list
+ if 'ftp_test' not in self.test_count_dict:
+ self.test_count_dict['ftp_test']=0
+ self.test_count_dict['ftp_test']+=1
+ self.overall_report.set_obj_html(_obj_title=f'FTP Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ self.overall_report.set_table_title("Test Setup Information")
+ self.overall_report.build_table_title()
+
+ if self.ftp_obj_dict[ce][obj_name]["obj"].clients_type == "Virtual":
+ no_of_stations = str(len(self.ftp_obj_dict[ce][obj_name]["obj"].station_list))
+ else:
+ no_of_stations = str(len(self.ftp_obj_dict[ce][obj_name]["obj"].input_devices_list))
+
+ if self.ftp_obj_dict[ce][obj_name]["obj"].clients_type == "Real":
+ if config_devices == "":
+ test_setup_info = {
+ "AP Name": self.ftp_obj_dict[ce][obj_name]["obj"].ap_name,
+ "SSID": self.ftp_obj_dict[ce][obj_name]["obj"].ssid,
+ "Security": self.ftp_obj_dict[ce][obj_name]["obj"].security,
+ "Device List": ", ".join(all_devices_names),
+ "No of Devices": "Total" + f"({no_of_stations})" + total_devices,
+ "Failed CXs": self.ftp_obj_dict[ce][obj_name]["obj"].failed_cx if self.ftp_obj_dict[ce][obj_name]["obj"].failed_cx else "NONE",
+ "File size": self.ftp_obj_dict[ce][obj_name]["obj"].file_size,
+ "File location": "/home/lanforge",
+ "Traffic Direction": self.ftp_obj_dict[ce][obj_name]["obj"].direction,
+ "Traffic Duration ": duration
+ }
+ else:
+ group_names = ', '.join(config_devices.keys())
+ profile_names = ', '.join(config_devices.values())
+ configmap = "Groups:" + group_names + " -> Profiles:" + profile_names
+ test_setup_info = {
+ "AP Name": self.ftp_obj_dict[ce][obj_name]["obj"].ap_name,
+ 'Configuration': configmap,
+ "No of Devices": "Total" + f"({no_of_stations})" + total_devices,
+ "File size": self.ftp_obj_dict[ce][obj_name]["obj"].file_size,
+ "File location": "/home/lanforge",
+ "Traffic Direction": self.ftp_obj_dict[ce][obj_name]["obj"].direction,
+ "Traffic Duration ": duration
+ }
+ else:
+ test_setup_info = {
+ "AP Name": self.ftp_obj_dict[ce][obj_name]["obj"].ap_name,
+ "SSID": self.ftp_obj_dict[ce][obj_name]["obj"].ssid,
+ "Security": self.ftp_obj_dict[ce][obj_name]["obj"].security,
+ "No of Devices": no_of_stations,
+ "File size": self.ftp_obj_dict[ce][obj_name]["obj"].file_size,
+ "File location": "/home/lanforge",
+ "Traffic Direction": self.ftp_obj_dict[ce][obj_name]["obj"].direction,
+ "Traffic Duration ": duration
+ }
+
+ self.overall_report.test_setup_table(value="Test Setup Information", test_setup_data=test_setup_info)
+
+ self.overall_report.set_obj_html(
+ _obj_title=f"No of times file {self.ftp_obj_dict[ce][obj_name]['obj'].direction}",
+ _obj=f"The below graph represents number of times a file {self.ftp_obj_dict[ce][obj_name]['obj'].direction} for each client"
+ f"(WiFi) traffic. X- axis shows “No of times file {self.ftp_obj_dict[ce][obj_name]['obj'].direction}” and Y-axis shows "
+ f"Client names.")
+
+ self.overall_report.build_objective()
+ graph = lf_bar_graph_horizontal(_data_set=[self.ftp_obj_dict[ce][obj_name]["obj"].url_data], _xaxis_name=f"No of times file {self.ftp_obj_dict[ce][obj_name]['obj'].direction}",
+ _yaxis_name="Client names",
+ _yaxis_categories=[i for i in client_list],
+ _yaxis_label=[i for i in client_list],
+ _yaxis_step=1,
+ _yticks_font=8,
+ _yticks_rotation=None,
+ _graph_title=f"No of times file {self.ftp_obj_dict[ce][obj_name]['obj'].direction} (Count)",
+ _title_size=16,
+ _figsize=(x_fig_size, y_fig_size),
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _color_name=['orange'],
+ _show_bar_value=True,
+ _enable_csv=True,
+ _graph_image_name=f"Total-url_ftp_{obj_no}", _color_edge=['black'],
+ _color=['orange'],
+ _label=[self.ftp_obj_dict[ce][obj_name]["obj"].direction])
+ graph_png = graph.build_bar_graph_horizontal()
+ print("graph name {}".format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ # need to move the graph image to the results
+ self.overall_report.move_graph_image()
+ self.overall_report.set_csv_filename(graph_png)
+ self.overall_report.move_csv_file()
+ self.overall_report.build_graph()
+ self.overall_report.set_obj_html(
+ _obj_title=f"Average time taken to {self.ftp_obj_dict[ce][obj_name]['obj'].direction} file ",
+ _obj=f"The below graph represents average time taken to {self.ftp_obj_dict[ce][obj_name]['obj'].direction} for each client "
+ f"(WiFi) traffic. X- axis shows “Average time taken to {self.ftp_obj_dict[ce][obj_name]['obj'].direction} a file ” and Y-axis shows "
+ f"Client names.")
+
+ self.overall_report.build_objective()
+ graph = lf_bar_graph_horizontal(_data_set=[self.ftp_obj_dict[ce][obj_name]["obj"].uc_avg], _xaxis_name=f"Average time taken to {self.ftp_obj_dict[ce][obj_name]['obj'].direction} file in ms",
+ _yaxis_name="Client names",
+ _yaxis_categories=[i for i in client_list],
+ _yaxis_label=[i for i in client_list],
+ _yaxis_step=1,
+ _yticks_font=8,
+ _yticks_rotation=None,
+ _graph_title=f"Average time taken to {self.ftp_obj_dict[ce][obj_name]['obj'].direction} file",
+ _title_size=16,
+ _figsize=(x_fig_size, y_fig_size),
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _color_name=['steelblue'],
+ _show_bar_value=True,
+ _enable_csv=True,
+ _graph_image_name=f"ucg-avg_ftp_{obj_no}", _color_edge=['black'],
+ _color=['steelblue'],
+ _label=[self.ftp_obj_dict[ce][obj_name]["obj"].direction])
+ graph_png = graph.build_bar_graph_horizontal()
+ print("graph name {}".format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ # need to move the graph image to the results
+ self.overall_report.set_csv_filename(graph_png)
+ self.overall_report.move_csv_file()
+ self.overall_report.build_graph()
+ if(self.ftp_obj_dict[ce][obj_name]["obj"].dowebgui and self.ftp_obj_dict[ce][obj_name]["obj"].get_live_view):
+ for floor in range(0,int(self.ftp_obj_dict[ce][obj_name]["obj"].total_floors)):
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ throughput_image_path = os.path.join(script_dir, "heatmap_images", f"ftp_{self.ftp_obj_dict[ce][obj_name]['obj'].test_name}_{floor+1}.png")
+ # rssi_image_path = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_rssi_{floor+1}.png")
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(throughput_image_path)):
+ if time.time() - start_time > timeout:
+ print("Timeout: Images not found within 60 seconds.")
+ break
+ time.sleep(1)
+ while not os.path.exists(throughput_image_path):
+ if os.path.exists(throughput_image_path):
+ break
+ # time.sleep(10)
+ if os.path.exists(throughput_image_path):
+ self.overall_report.set_custom_html('')
+ self.overall_report.build_custom()
+ # self.overall_report.set_custom_html("Average Throughput Heatmap:
")
+ # self.overall_report.build_custom()
+ self.overall_report.set_custom_html(f'
')
+ self.overall_report.build_custom()
+ # os.remove(throughput_image_path)
+ self.overall_report.set_obj_html("File Download Time (sec)", "The below table will provide information of "
+ "minimum, maximum and the average time taken by clients to download a file in seconds")
+ self.overall_report.build_objective()
+ dataframe2 = {
+ "Minimum": [str(round(min(self.ftp_obj_dict[ce][obj_name]["obj"].uc_min) / 1000, 1))],
+ "Maximum": [str(round(max(self.ftp_obj_dict[ce][obj_name]["obj"].uc_max) / 1000, 1))],
+ "Average": [str(round((sum(self.ftp_obj_dict[ce][obj_name]["obj"].uc_avg) / len(client_list)) / 1000, 1))]
+ }
+ dataframe3 = pd.DataFrame(dataframe2)
+ self.overall_report.set_table_dataframe(dataframe3)
+ self.overall_report.build_table()
+ self.overall_report.set_table_title("Overall Results")
+ self.overall_report.build_table_title()
+ if self.ftp_obj_dict[ce][obj_name]["obj"].clients_type == 'Real':
+ # Calculating the pass/fail criteria when either expected_passfail_val or csv_name is provided
+ if self.ftp_obj_dict[ce][obj_name]["obj"].expected_passfail_val or self.ftp_obj_dict[ce][obj_name]["obj"].csv_name:
+ self.ftp_obj_dict[ce][obj_name]["obj"].get_pass_fail_list(client_list)
+ # When groups are provided a seperate table will be generated for each group using generate_dataframe
+ if self.ftp_obj_dict[ce][obj_name]["obj"].group_name:
+ for key, val in self.ftp_obj_dict[ce][obj_name]["obj"].group_device_map.items():
+ if self.ftp_obj_dict[ce][obj_name]["obj"].expected_passfail_val or self.ftp_obj_dict[ce][obj_name]["obj"].csv_name:
+ dataframe = self.ftp_obj_dict[ce][obj_name]["obj"].generate_dataframe(val, client_list, self.ftp_obj_dict[ce][obj_name]["obj"].mac_id_list, self.ftp_obj_dict[ce][obj_name]["obj"].channel_list, self.ftp_obj_dict[ce][obj_name]["obj"].ssid_list, self.ftp_obj_dict[ce][obj_name]["obj"].mode_list,
+ self.ftp_obj_dict[ce][obj_name]["obj"].url_data, self.ftp_obj_dict[ce][obj_name]["obj"].test_input_list, self.ftp_obj_dict[ce][obj_name]["obj"].uc_avg, self.ftp_obj_dict[ce][obj_name]["obj"].bytes_rd, self.ftp_obj_dict[ce][obj_name]["obj"].rx_rate, self.ftp_obj_dict[ce][obj_name]["obj"].pass_fail_list)
+ else:
+ dataframe = self.ftp_obj_dict[ce][obj_name]["obj"].generate_dataframe(val, client_list, self.ftp_obj_dict[ce][obj_name]["obj"].mac_id_list, self.ftp_obj_dict[ce][obj_name]["obj"].channel_list, self.ftp_obj_dict[ce][obj_name]["obj"].ssid_list,
+ self.ftp_obj_dict[ce][obj_name]["obj"].mode_list, self.ftp_obj_dict[ce][obj_name]["obj"].url_data, [], self.ftp_obj_dict[ce][obj_name]["obj"].uc_avg, self.ftp_obj_dict[ce][obj_name]["obj"].bytes_rd, self.ftp_obj_dict[ce][obj_name]["obj"].rx_rate, [])
+
+ if dataframe:
+ self.overall_report.set_obj_html("", "Group: {}".format(key))
+ self.overall_report.build_objective()
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+ else:
+ dataframe = {
+ " Clients": client_list,
+ " MAC ": self.ftp_obj_dict[ce][obj_name]["obj"].mac_id_list,
+ " Channel": self.ftp_obj_dict[ce][obj_name]["obj"].channel_list,
+ " SSID ": self.ftp_obj_dict[ce][obj_name]["obj"].ssid_list,
+ " Mode": self.ftp_obj_dict[ce][obj_name]["obj"].mode_list,
+ " No of times File downloaded ": self.ftp_obj_dict[ce][obj_name]["obj"].url_data,
+ " Time Taken to Download file (ms)": self.ftp_obj_dict[ce][obj_name]["obj"].uc_avg,
+ " Bytes-rd (Mega Bytes)": self.ftp_obj_dict[ce][obj_name]["obj"].bytes_rd,
+ " RX RATE (Mbps) ": self.ftp_obj_dict[ce][obj_name]["obj"].rx_rate,
+ "Failed Urls": self.ftp_obj_dict[ce][obj_name]["obj"].total_err
+ }
+ if self.ftp_obj_dict[ce][obj_name]["obj"].expected_passfail_val or self.ftp_obj_dict[ce][obj_name]["obj"].csv_name:
+ dataframe[" Expected output "] = self.ftp_obj_dict[ce][obj_name]["obj"].test_input_list
+ dataframe[" Status "] = self.ftp_obj_dict[ce][obj_name]["obj"].pass_fail_list
+
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ else:
+ dataframe = {
+ " Clients": client_list,
+ " MAC ": self.ftp_obj_dict[ce][obj_name]["obj"].mac_id_list,
+ " Channel": self.ftp_obj_dict[ce][obj_name]["obj"].channel_list,
+ " SSID ": self.ftp_obj_dict[ce][obj_name]["obj"].ssid_list,
+ " Mode": self.ftp_obj_dict[ce][obj_name]["obj"].mode_list,
+ " No of times File downloaded ": self.ftp_obj_dict[ce][obj_name]["obj"].url_data,
+ " Time Taken to Download file (ms)": self.ftp_obj_dict[ce][obj_name]["obj"].uc_avg,
+ " Bytes-rd (Mega Bytes)": self.ftp_obj_dict[ce][obj_name]["obj"].bytes_rd,
+ }
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+ # self.overall_report.build_footer()
+ # html_file = self.overall_report.write_html()
+ # logger.info("returned file {}".format(html_file))
+ # logger.info(html_file)
+ # self.overall_report.write_pdf()
+
+ if csv_outfile is not None:
+ current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
+ csv_outfile = "{}_{}-test_l4_ftp.csv".format(
+ csv_outfile, current_time)
+ csv_outfile = self.overall_report.file_add_path(csv_outfile)
+ logger.info("csv output file : {}".format(csv_outfile))
+ if ce == "series":
+ obj_no+=1
+ obj_name = f"ftp_test_{obj_no}"
+ else:
+ break
+
+ elif test_name == "thput_test":
+ obj_no=1
+ obj_name = "thput_test"
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.thput_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ self.overall_report.set_obj_html(_obj_title=f'THROUGHPUT Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ # obj_name = f"thput_test_{obj_no}"
+ params = self.thput_obj_dict[ce][obj_name]["data"].copy()
+ iterations_before_test_stopped_by_user = params["iterations_before_test_stopped_by_user"].copy() if isinstance(params["iterations_before_test_stopped_by_user"], (list, dict, set)) else params["iterations_before_test_stopped_by_user"]
+ incremental_capacity_list = params["incremental_capacity_list"].copy() if isinstance(params["incremental_capacity_list"], (list, dict, set)) else params["incremental_capacity_list"]
+ data = params["data"].copy() if isinstance(params["data"], (list, dict, set)) else params["data"]
+ data1 = params["data1"].copy() if isinstance(params["data1"], (list, dict, set)) else params["data1"]
+ report_path = params["report_path"].copy() if isinstance(params["report_path"], (list, dict, set)) else params["report_path"]
+
+ self.thput_obj_dict[ce][obj_name]["obj"].ssid_list = self.thput_obj_dict[ce][obj_name]["obj"].get_ssid_list(self.thput_obj_dict[ce][obj_name]["obj"].input_devices_list)
+ self.thput_obj_dict[ce][obj_name]["obj"].signal_list, self.thput_obj_dict[ce][obj_name]["obj"].channel_list, self.thput_obj_dict[ce][obj_name]["obj"].mode_list, self.thput_obj_dict[ce][obj_name]["obj"].link_speed_list, rx_rate_list = self.thput_obj_dict[ce][obj_name]["obj"].get_signal_and_channel_data(self.thput_obj_dict[ce][obj_name]["obj"].input_devices_list)
+ selected_real_clients_names = params["selected_real_clients_names"] if "selected_real_clients_names" in params else None
+ if selected_real_clients_names is not None:
+ self.thput_obj_dict[ce][obj_name]["obj"].num_stations = selected_real_clients_names
+
+ # Initialize the report object
+ if self.thput_obj_dict[ce][obj_name]["obj"].do_interopability == False:
+ # df.to_csv(os.path.join(report_path_date_time, 'throughput_data.csv'))
+ # For groups and profiles configuration through webgui
+
+ self.overall_report.set_obj_html(_obj_title="Input Parameters",
+ _obj="The below tables provides the input parameters for the test")
+ self.overall_report.build_objective()
+
+ # Initialize counts and lists for device types
+ android_devices, windows_devices, linux_devices, mac_devices, ios_devices = 0, 0, 0, 0, 0
+ all_devices_names = []
+ device_type = []
+ packet_size_text = ''
+ total_devices = ""
+ if self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu == -1:
+ packet_size_text = 'AUTO'
+ else:
+ packet_size_text = str(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu) + ' Bytes'
+ # Determine load type name based on self.thput_obj_dict[ce][obj_name]["obj"].load_type
+ if self.thput_obj_dict[ce][obj_name]["obj"].load_type == "wc_intended_load":
+ load_type_name = "Intended Load"
+ else:
+ load_type_name = "Per Client Load"
+ for i in self.thput_obj_dict[ce][obj_name]["obj"].real_client_list:
+ split_device_name = i.split(" ")
+ if 'android' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Android)"))
+ device_type.append("Android")
+ android_devices += 1
+ elif 'Win' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Windows)"))
+ device_type.append("Windows")
+ windows_devices += 1
+ elif 'Lin' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Linux)"))
+ device_type.append("Linux")
+ linux_devices += 1
+ elif 'Mac' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Mac)"))
+ device_type.append("Mac")
+ mac_devices += 1
+ elif 'iOS' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(iOS)"))
+ device_type.append("iOS")
+ ios_devices += 1
+
+ # Build total_devices string based on counts
+ if android_devices > 0:
+ total_devices += f" Android({android_devices})"
+ if windows_devices > 0:
+ total_devices += f" Windows({windows_devices})"
+ if linux_devices > 0:
+ total_devices += f" Linux({linux_devices})"
+ if mac_devices > 0:
+ total_devices += f" Mac({mac_devices})"
+ if ios_devices > 0:
+ total_devices += f" iOS({ios_devices})"
+
+ # Determine incremental_capacity_data based on self.thput_obj_dict[ce][obj_name]["obj"].incremental_capacity
+ if self.thput_obj_dict[ce][obj_name]["obj"].gave_incremental:
+ incremental_capacity_data = "No Incremental values provided"
+ elif len(self.thput_obj_dict[ce][obj_name]["obj"].incremental_capacity) == 1:
+ if len(incremental_capacity_list) == 1:
+ incremental_capacity_data = str(self.thput_obj_dict[ce][obj_name]["obj"].incremental_capacity[0])
+ else:
+ incremental_capacity_data = ','.join(map(str, incremental_capacity_list))
+ elif (len(self.thput_obj_dict[ce][obj_name]["obj"].incremental_capacity) > 1):
+ self.thput_obj_dict[ce][obj_name]["obj"].incremental_capacity = self.thput_obj_dict[ce][obj_name]["obj"].incremental_capacity.split(',')
+ incremental_capacity_data = ', '.join(self.thput_obj_dict[ce][obj_name]["obj"].incremental_capacity)
+ else:
+ incremental_capacity_data = "None"
+
+ # Construct test_setup_info dictionary for test setup table
+ if self.thput_obj_dict[ce][obj_name]["obj"].group_name:
+ group_names = ', '.join(self.thput_obj_dict[ce][obj_name]["obj"].configdevices.keys())
+ profile_names = ', '.join(self.thput_obj_dict[ce][obj_name]["obj"].configdevices.values())
+ configmap = "Groups:" + group_names + " -> Profiles:" + profile_names
+ test_setup_info = {
+ "Test name": self.thput_obj_dict[ce][obj_name]["obj"].test_name,
+ "Configuration": configmap,
+ "Configured Devices": ", ".join(all_devices_names),
+ "No of Devices": "Total" + f"({str(self.thput_obj_dict[ce][obj_name]['obj'].num_stations)})" + total_devices,
+ "Increment": incremental_capacity_data,
+ "Traffic Duration in minutes": round(int(self.thput_obj_dict[ce][obj_name]["obj"].test_duration) * len(incremental_capacity_list) / 60, 2),
+ "Traffic Type": (self.thput_obj_dict[ce][obj_name]["obj"].traffic_type.strip("lf_")).upper(),
+ "Traffic Direction": self.thput_obj_dict[ce][obj_name]["obj"].direction,
+ "Upload Rate(Mbps)": str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps",
+ "Download Rate(Mbps)": str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps",
+ "Load Type": load_type_name,
+ "Packet Size": packet_size_text
+ }
+ else:
+ test_setup_info = {
+ "Test name": self.thput_obj_dict[ce][obj_name]["obj"].test_name,
+ "Device List": ", ".join(all_devices_names),
+ "No of Devices": "Total" + f"({str(self.thput_obj_dict[ce][obj_name]['obj'].num_stations)})" + total_devices,
+ "Increment": incremental_capacity_data,
+ "Traffic Duration in minutes": round(int(self.thput_obj_dict[ce][obj_name]["obj"].test_duration) * len(incremental_capacity_list) / 60, 2),
+ "Traffic Type": (self.thput_obj_dict[ce][obj_name]["obj"].traffic_type.strip("lf_")).upper(),
+ "Traffic Direction": self.thput_obj_dict[ce][obj_name]["obj"].direction,
+ "Upload Rate(Mbps)": str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps",
+ "Download Rate(Mbps)": str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps",
+ "Load Type": load_type_name,
+ "Packet Size": packet_size_text
+ }
+ self.overall_report.test_setup_table(test_setup_data=test_setup_info, value="Test Configuration")
+
+ # Loop through iterations and build graphs, tables for each iteration
+ for i in range(len(iterations_before_test_stopped_by_user)):
+ # rssi_signal_data=[]
+ devices_on_running = []
+ download_data = []
+ upload_data = []
+ upload_drop = []
+ download_drop = []
+ devices_data_to_create_bar_graph = []
+ # signal_data=[]
+ direction_in_table = []
+ packet_size_in_table = []
+ upload_list, download_list = [], []
+ rssi_data = []
+ data_iter = data[data['Iteration'] == i + 1]
+ avg_rtt_data = []
+
+ # for sig in self.thput_obj_dict[ce][obj_name]["obj"].signal_list[0:int(incremental_capacity_list[i])]:
+ # signal_data.append(int(sig)*(-1))
+ # rssi_signal_data.append(signal_data)
+
+ # Fetch devices_on_running from real_client_list
+ for j in range(data1[i][-1]):
+ devices_on_running.append(self.thput_obj_dict[ce][obj_name]["obj"].real_client_list[j].split(" ")[-1])
+
+ # Fetch download_data and upload_data based on load_type and direction
+ for k in devices_on_running:
+ # individual_device_data=[]
+
+ # Checking individual device download and upload rate by searching device name in dataframe
+ columns_with_substring = [col for col in data_iter.columns if k in col]
+ filtered_df = data_iter[columns_with_substring]
+ dl_len = len(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()) - 1
+ ul_len = len(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()) - 1
+ if self.thput_obj_dict[ce][obj_name]["obj"].load_type == "wc_intended_load":
+ if self.thput_obj_dict[ce][obj_name]["obj"].direction == "Bi-direction":
+
+ # Append average download and upload data from filtered dataframe
+ download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ # Append average upload and download drop from filtered dataframe
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round((int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+ download_list.append(str(round((int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+ if self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu == -1:
+ packet_size_in_table.append('AUTO')
+ else:
+ packet_size_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu)
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Download':
+
+ # Append average download data from filtered dataframe
+ download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+
+ # Append 0 for upload data
+ upload_data.append(0)
+
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round((int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+ download_list.append(str(round((int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ # Append average download drop data from filtered dataframe
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ if self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu == -1:
+ packet_size_in_table.append('AUTO')
+ else:
+ packet_size_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu)
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Upload':
+
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round((int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+ download_list.append(str(round((int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+
+ # Append Average upload data from filtered dataframe
+ upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ # Append 0 for download data
+ download_data.append(0)
+ # Append average upload drop data from filtered dataframe
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ if self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu == -1:
+ packet_size_in_table.append('AUTO')
+ else:
+ packet_size_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu)
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+
+ else:
+
+ if self.thput_obj_dict[ce][obj_name]["obj"].direction == "Bi-direction":
+ # Append average download and upload data from filtered dataframe
+ download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ # Append average download and upload drop data from filtered dataframe
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ # upload_data.append(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[-1])
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)))
+
+ if self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu == -1:
+ packet_size_in_table.append('AUTO')
+ else:
+ packet_size_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu)
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Download':
+
+ # Append average download data from filtered dataframe
+ download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ # Append 0 for upload data
+ upload_data.append(0)
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)))
+ # Append average download drop data from filtered dataframe
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ if self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu == -1:
+ packet_size_in_table.append('AUTO')
+ else:
+ packet_size_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu)
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Upload':
+
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)))
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ # Append average upload data from filtered dataframe
+ upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ # Append average upload drop data from filtered dataframe
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+
+ # Append 0 for download data
+ download_data.append(0)
+
+ if self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu == -1:
+ packet_size_in_table.append('AUTO')
+ else:
+ packet_size_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu)
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+
+ data_set_in_graph = []
+
+ # Depending on the test direction, retrieve corresponding throughput data,
+ # organize it into datasets for graphing, and calculate real-time average throughput values accordingly.
+ if self.thput_obj_dict[ce][obj_name]["obj"].direction == "Bi-direction":
+ download_values_list = data['Overall Download'][data['Iteration'] == i + 1].values.tolist()
+ upload_values_list = data['Overall Upload'][data['Iteration'] == i + 1].values.tolist()
+ data_set_in_graph.append(download_values_list)
+ data_set_in_graph.append(upload_values_list)
+ devices_data_to_create_bar_graph.append(download_data)
+ devices_data_to_create_bar_graph.append(upload_data)
+ label_data = ['Download', 'Upload']
+ real_time_data = (
+ f"Real Time Throughput: Achieved Throughput: Download: {round(sum(download_data[0:int(incremental_capacity_list[i])]), 2)} Mbps, "
+ f"Upload: {round(sum(upload_data[0:int(incremental_capacity_list[i])]), 2)} Mbps"
+ )
+
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Download':
+ download_values_list = data['Overall Download'][data['Iteration'] == i + 1].values.tolist()
+ data_set_in_graph.append(download_values_list)
+ devices_data_to_create_bar_graph.append(download_data)
+ label_data = ['Download']
+ real_time_data = f"Real Time Throughput: Achieved Throughput: Download : {round(((sum(download_data[0:int(incremental_capacity_list[i])]))), 2)} Mbps"
+
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Upload':
+ upload_values_list = data['Overall Upload'][data['Iteration'] == i + 1].values.tolist()
+ data_set_in_graph.append(upload_values_list)
+ devices_data_to_create_bar_graph.append(upload_data)
+ label_data = ['Upload']
+ real_time_data = f"Real Time Throughput: Achieved Throughput: Upload : {round((sum(upload_data[0:int(incremental_capacity_list[i])])), 2)} Mbps"
+
+ if len(incremental_capacity_list) > 1:
+ self.overall_report.set_custom_html(f"Iteration-{i + 1}: Number of Devices Running : {len(devices_on_running)}
")
+ self.overall_report.build_custom()
+
+ self.overall_report.set_obj_html(
+ _obj_title=f"{real_time_data}",
+ _obj=" ")
+ self.overall_report.build_objective()
+ graph_png = self.thput_obj_dict[ce][obj_name]["obj"].build_line_graph(
+ data_set=data_set_in_graph,
+ xaxis_name="Time",
+ yaxis_name="Throughput (Mbps)",
+ xaxis_categories=data['TIMESTAMP'][data['Iteration'] == i + 1].values.tolist(),
+ label=label_data,
+ graph_image_name=f"line_graph{i}"
+ )
+ logger.info("graph name {}".format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+
+ self.overall_report.build_graph()
+ x_fig_size = 15
+ y_fig_size = len(devices_on_running) * .5 + 4
+ self.overall_report.set_obj_html(
+ _obj_title="Per Client Avg-Throughput",
+ _obj=" ")
+ self.overall_report.build_objective()
+ devices_on_running_trimmed = [n[:17] if len(n) > 17 else n for n in devices_on_running]
+ graph = lf_bar_graph_horizontal(_data_set=devices_data_to_create_bar_graph,
+ _xaxis_name="Avg Throughput(Mbps)",
+ _yaxis_name="Devices",
+ _graph_image_name=f"image_name{i}_{obj_no}",
+ _label=label_data,
+ _yaxis_categories=devices_on_running_trimmed,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size)
+ )
+
+ graph_png = graph.build_bar_graph_horizontal()
+ logger.info("graph name {}".format(graph_png))
+ graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+ self.overall_report.set_obj_html(
+ _obj_title="RSSI Of The Clients Connected",
+ _obj=" ")
+ self.overall_report.build_objective()
+ graph = lf_bar_graph_horizontal(_data_set=[rssi_data],
+ _xaxis_name="Signal(-dBm)",
+ _yaxis_name="Devices",
+ _graph_image_name=f"signal_image_name{i}_{obj_no}",
+ _label=['RSSI'],
+ _yaxis_categories=devices_on_running_trimmed,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size)
+ # _color=['lightcoral']
+ )
+ graph_png = graph.build_bar_graph_horizontal()
+ logger.info("graph name {}".format(graph_png))
+ graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+ if(self.thput_obj_dict[ce][obj_name]["obj"].dowebgui and self.thput_obj_dict[ce][obj_name]["obj"].get_live_view):
+ self.thput_obj_dict[ce][obj_name]["obj"].add_live_view_images_to_report(self.overall_report)
+
+ if self.thput_obj_dict[ce][obj_name]["obj"].group_name:
+ self.overall_report.set_obj_html(
+ _obj_title="Detailed Result Table For Groups ",
+ _obj="The below tables provides detailed information for the throughput test on each group.")
+ else:
+
+ self.overall_report.set_obj_html(
+ _obj_title="Detailed Result Table ",
+ _obj="The below tables provides detailed information for the throughput test on each device.")
+ self.overall_report.build_objective()
+ self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list = [item.split()[-1] if ' ' in item else item for item in self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list]
+ if self.thput_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.thput_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ test_input_list, pass_fail_list = self.thput_obj_dict[ce][obj_name]["obj"].get_pass_fail_list(device_type, incremental_capacity_list[i], devices_on_running, download_data, upload_data)
+ if self.thput_obj_dict[ce][obj_name]["obj"].group_name:
+ for key, val in self.thput_obj_dict[ce][obj_name]["obj"].group_device_map.items():
+ if self.thput_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.thput_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ # Generating Dataframe when Groups with their profiles and pass_fail case is specified
+ dataframe = self.thput_obj_dict[ce][obj_name]["obj"].generate_dataframe(val,
+ device_type[0:int(incremental_capacity_list[i])],
+ devices_on_running[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].ssid_list[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].channel_list[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].mode_list[0:int(incremental_capacity_list[i])],
+ direction_in_table[0:int(incremental_capacity_list[i])],
+ download_list[0:int(incremental_capacity_list[i])],
+ [str(n) for n in avg_rtt_data[0:int(incremental_capacity_list[i])]],
+ [str(n) + " Mbps" for n in download_data[0:int(incremental_capacity_list[i])]],
+ upload_list[0:int(incremental_capacity_list[i])],
+ [str(n) + " Mbps" for n in upload_data[0:int(incremental_capacity_list[i])]],
+ ['' if n == 0 else '-' + str(n) + " dbm" for n in rssi_data[0:int(incremental_capacity_list[i])]],
+ test_input_list,
+ self.thput_obj_dict[ce][obj_name]["obj"].link_speed_list[0:int(incremental_capacity_list[i])],
+ [str(n) for n in packet_size_in_table[0:int(incremental_capacity_list[i])]],
+ pass_fail_list,
+ upload_drop,
+ download_drop)
+ # Generating Dataframe for groups when pass_fail case is not specified
+ else:
+ dataframe = self.thput_obj_dict[ce][obj_name]["obj"].generate_dataframe(val,
+ device_type[0:int(incremental_capacity_list[i])],
+ devices_on_running[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].ssid_list[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].channel_list[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].mode_list[0:int(incremental_capacity_list[i])],
+ direction_in_table[0:int(incremental_capacity_list[i])],
+ download_list[0:int(incremental_capacity_list[i])],
+ [str(n) for n in avg_rtt_data[0:int(incremental_capacity_list[i])]],
+ [str(n) + " Mbps" for n in download_data[0:int(incremental_capacity_list[i])]],
+ upload_list[0:int(incremental_capacity_list[i])],
+ [str(n) + " Mbps" for n in upload_data[0:int(incremental_capacity_list[i])]],
+ ['' if n == 0 else '-' + str(n) + " dbm" for n in rssi_data[0:int(incremental_capacity_list[i])]],
+ [],
+ self.thput_obj_dict[ce][obj_name]["obj"].link_speed_list[0:int(incremental_capacity_list[i])],
+ [str(n) for n in packet_size_in_table[0:int(incremental_capacity_list[i])]],
+ [],
+ upload_drop,
+ download_drop)
+ if dataframe:
+ self.overall_report.set_obj_html("", "Group: {}".format(key))
+ self.overall_report.build_objective()
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+ else:
+ bk_dataframe = {
+ " Device Type ": device_type[0:int(incremental_capacity_list[i])],
+ " Username": devices_on_running[0:int(incremental_capacity_list[i])],
+ " SSID ": self.thput_obj_dict[ce][obj_name]["obj"].ssid_list[0:int(incremental_capacity_list[i])],
+ " MAC ": self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list[0:int(incremental_capacity_list[i])],
+ " Channel ": self.thput_obj_dict[ce][obj_name]["obj"].channel_list[0:int(incremental_capacity_list[i])],
+ " Mode": self.thput_obj_dict[ce][obj_name]["obj"].mode_list[0:int(incremental_capacity_list[i])],
+ # " Direction":direction_in_table[0:int(incremental_capacity_list[i])],
+ " Offered download rate (Mbps) ": download_list[0:int(incremental_capacity_list[i])],
+ " Observed Average download rate (Mbps) ": [str(n) for n in download_data[0:int(incremental_capacity_list[i])]],
+ " Offered upload rate (Mbps) ": upload_list[0:int(incremental_capacity_list[i])],
+ " Observed Average upload rate (Mbps) ": [str(n) for n in upload_data[0:int(incremental_capacity_list[i])]],
+ " RSSI (dBm) ": ['' if n == 0 else '-' + str(n) for n in rssi_data[0:int(incremental_capacity_list[i])]],
+ # " Link Speed ":self.thput_obj_dict[ce][obj_name]["obj"].link_speed_list[0:int(incremental_capacity_list[i])],
+ " Average RTT (ms)" : avg_rtt_data[0:int(incremental_capacity_list[i])],
+ " Packet Size(Bytes) ": [str(n) for n in packet_size_in_table[0:int(incremental_capacity_list[i])]],
+ }
+ if self.thput_obj_dict[ce][obj_name]["obj"].direction == "Bi-direction":
+ bk_dataframe[" Average Tx Drop % "] = upload_drop
+ bk_dataframe[" Average Rx Drop % "] = download_drop
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Download':
+ bk_dataframe[" Average Rx Drop % "] = download_drop
+ # adding rx drop while uploading as 0
+ bk_dataframe[" Average Tx Drop % "] = [0.0] * len(download_drop)
+
+ else:
+ bk_dataframe[" Average Tx Drop % "] = upload_drop
+ # adding rx drop while downloading as 0
+ bk_dataframe[" Average Rx Drop % "] = [0.0] * len(upload_drop)
+ if self.thput_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.thput_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ bk_dataframe[" Expected " + self.thput_obj_dict[ce][obj_name]["obj"].direction + " rate "] = [str(n) + " Mbps" for n in test_input_list]
+ bk_dataframe[" Status "] = pass_fail_list
+ dataframe1 = pd.DataFrame(bk_dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ self.overall_report.set_custom_html('
')
+ self.overall_report.build_custom()
+
+ elif self.thput_obj_dict[ce][obj_name]["obj"].do_interopability:
+
+ self.overall_report.set_obj_html(_obj_title="Input Parameters",
+ _obj="The below tables provides the input parameters for the test")
+ self.overall_report.build_objective()
+
+ # Initialize counts and lists for device types
+ android_devices, windows_devices, linux_devices, mac_devices, ios_devices = 0, 0, 0, 0, 0
+ all_devices_names = []
+ device_type = []
+ total_devices = ""
+
+ for i in self.thput_obj_dict[ce][obj_name]["obj"].real_client_list:
+ split_device_name = i.split(" ")
+ if 'android' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Android)"))
+ device_type.append("Android")
+ android_devices += 1
+ elif 'Win' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Windows)"))
+ device_type.append("Windows")
+ windows_devices += 1
+ elif 'Lin' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Linux)"))
+ device_type.append("Linux")
+ linux_devices += 1
+ elif 'Mac' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Mac)"))
+ device_type.append("Mac")
+ mac_devices += 1
+ elif 'iOS' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(iOS)"))
+ device_type.append("iOS")
+ ios_devices += 1
+
+ # Build total_devices string based on counts
+ if android_devices > 0:
+ total_devices += f" Android({android_devices})"
+ if windows_devices > 0:
+ total_devices += f" Windows({windows_devices})"
+ if linux_devices > 0:
+ total_devices += f" Linux({linux_devices})"
+ if mac_devices > 0:
+ total_devices += f" Mac({mac_devices})"
+ if ios_devices > 0:
+ total_devices += f" iOS({ios_devices})"
+
+ # Construct test_setup_info dictionary for test setup table
+ test_setup_info = {
+ "Test name": self.thput_obj_dict[ce][obj_name]["obj"].test_name,
+ "Device List": ", ".join(all_devices_names),
+ "No of Devices": "Total" + f"({str(self.thput_obj_dict[ce][obj_name]['obj'].num_stations)})" + total_devices,
+ "Traffic Duration in minutes": round(int(self.thput_obj_dict[ce][obj_name]["obj"].test_duration) * len(incremental_capacity_list) / 60, 2),
+ "Traffic Type": (self.thput_obj_dict[ce][obj_name]["obj"].traffic_type.strip("lf_")).upper(),
+ "Traffic Direction": self.thput_obj_dict[ce][obj_name]["obj"].direction,
+ "Upload Rate(Mbps)": str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps",
+ "Download Rate(Mbps)": str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps",
+ # "Packet Size" : str(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu) + " Bytes"
+ }
+ self.overall_report.test_setup_table(test_setup_data=test_setup_info, value="Test Configuration")
+
+ if(not self.thput_obj_dict[ce][obj_name]["obj"].default_config):
+
+ self.overall_report.set_obj_html(_obj_title="Configuration Status of Devices",
+ _obj="The table below shows the configuration status of each device (except iOS) with respect to the SSID connection.")
+ self.overall_report.build_objective()
+
+ configured_dataframe = self.thput_obj_dict[ce][obj_name]["obj"].convert_to_table(self.thput_obj_dict[ce][obj_name]["obj"].configured_devices_check)
+ dataframe1 = pd.DataFrame(configured_dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ # Loop through iterations and build graphs, tables for each device
+ for i in range(len(iterations_before_test_stopped_by_user)):
+ rssi_signal_data = []
+ devices_on_running = []
+ download_data = []
+ upload_data = []
+ devices_data_to_create_bar_graph = []
+ signal_data = []
+ upload_drop = []
+ download_drop = []
+ direction_in_table = []
+ # packet_size_in_table=[]
+ upload_list, download_list = [], []
+ rssi_data = []
+ data_iter = data[data['Iteration'] == i + 1]
+ avg_rtt_data = []
+
+ # Fetch devices_on_running from real_client_list
+ devices_on_running.append(self.thput_obj_dict[ce][obj_name]["obj"].real_client_list[data1[i][-1] - 1].split(" ")[-1])
+
+ if not self.thput_obj_dict[ce][obj_name]["obj"].default_config and devices_on_running[0] in self.thput_obj_dict[ce][obj_name]["obj"].configured_devices_check and not self.thput_obj_dict[ce][obj_name]["obj"].configured_devices_check[devices_on_running[0]]:
+ continue
+
+ for k in devices_on_running:
+ # individual_device_data=[]
+
+ # Checking individual device download and upload rate by searching device name in dataframe
+ columns_with_substring = [col for col in data_iter.columns if k in col]
+ filtered_df = data_iter[columns_with_substring]
+ dl_len = len(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()) - 1
+ ul_len = len(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()) - 1
+ if self.thput_obj_dict[ce][obj_name]["obj"].direction == "Bi-direction":
+
+ # Append download and upload data from filtered dataframe
+ download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)))
+
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Download':
+
+ # Append download data from filtered dataframe
+ download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+
+ # Append 0 for upload data
+ upload_data.append(0)
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)))
+
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Upload':
+
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)))
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ # Append upload data from filtered dataframe
+ upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+
+ # Append 0 for download data
+ download_data.append(0)
+
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+
+ data_set_in_graph = []
+
+ # Depending on the test direction, retrieve corresponding throughput data,
+ # organize it into datasets for graphing, and calculate real-time average throughput values accordingly.
+ if self.thput_obj_dict[ce][obj_name]["obj"].direction == "Bi-direction":
+ download_values_list = data['Overall Download'][data['Iteration'] == i + 1].values.tolist()
+ upload_values_list = data['Overall Upload'][data['Iteration'] == i + 1].values.tolist()
+ data_set_in_graph.append(download_values_list)
+ data_set_in_graph.append(upload_values_list)
+ devices_data_to_create_bar_graph.append(download_data)
+ devices_data_to_create_bar_graph.append(upload_data)
+ label_data = ['Download', 'Upload']
+ real_time_data = (
+ f"Real Time Throughput: Achieved Throughput: Download: "
+ f"{round(sum(download_data[0:int(incremental_capacity_list[i])]) / len(download_data[0:int(incremental_capacity_list[i])]), 2)} Mbps, "
+ f"Upload: {round(sum(upload_data[0:int(incremental_capacity_list[i])]) / len(upload_data[0:int(incremental_capacity_list[i])]), 2)} Mbps"
+ )
+
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Download':
+ download_values_list = data['Overall Download'][data['Iteration'] == i + 1].values.tolist()
+ data_set_in_graph.append(download_values_list)
+ devices_data_to_create_bar_graph.append(download_data)
+ label_data = ['Download']
+ real_time_data = (
+ f"Real Time Throughput: Achieved Throughput: Download: "
+ f"{round(sum(download_data[0:int(incremental_capacity_list[i])]) / len(download_data[0:int(incremental_capacity_list[i])]), 2)} Mbps"
+ )
+
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Upload':
+ upload_values_list = data['Overall Upload'][data['Iteration'] == i + 1].values.tolist()
+ data_set_in_graph.append(upload_values_list)
+ devices_data_to_create_bar_graph.append(upload_data)
+ label_data = ['Upload']
+ real_time_data = (
+ f"Real Time Throughput: Achieved Throughput: Upload: "
+ f"{round(sum(upload_data[0:int(incremental_capacity_list[i])]) / len(upload_data[0:int(incremental_capacity_list[i])]), 2)} Mbps"
+ )
+
+ self.overall_report.set_custom_html(f"{i + 1}. Test On Device {', '.join(devices_on_running)}:
")
+ self.overall_report.build_custom()
+
+ self.overall_report.set_obj_html(
+ _obj_title=f"{real_time_data}",
+ _obj=" ")
+ self.overall_report.build_objective()
+ graph_png = self.thput_obj_dict[ce][obj_name]["obj"].build_line_graph(
+ data_set=data_set_in_graph,
+ xaxis_name="Time",
+ yaxis_name="Throughput (Mbps)",
+ xaxis_categories=data['TIMESTAMP'][data['Iteration'] == i + 1].values.tolist(),
+ label=label_data,
+ graph_image_name=f"line_graph{i}"
+ )
+ logger.info("graph name {}".format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+
+ self.overall_report.build_graph()
+ x_fig_size = 15
+ y_fig_size = len(devices_on_running) * .5 + 4
+ self.overall_report.set_obj_html(
+ _obj_title="Per Client Avg-Throughput",
+ _obj=" ")
+ self.overall_report.build_objective()
+ devices_on_running_trimmed = [n[:17] if len(n) > 17 else n for n in devices_on_running]
+ graph = lf_bar_graph_horizontal(_data_set=devices_data_to_create_bar_graph,
+ _xaxis_name="Avg Throughput(Mbps)",
+ _yaxis_name="Devices",
+ _graph_image_name=f"image_name{i}_{obj_no}",
+ _label=label_data,
+ _yaxis_categories=devices_on_running_trimmed,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size)
+ )
+
+ graph_png = graph.build_bar_graph_horizontal()
+ logger.info("graph name {}".format(graph_png))
+ graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+ self.overall_report.set_obj_html(
+ _obj_title="RSSI Of The Clients Connected",
+ _obj=" ")
+ self.overall_report.build_objective()
+ graph = lf_bar_graph_horizontal(_data_set=[rssi_data],
+ _xaxis_name="Signal(-dBm)",
+ _yaxis_name="Devices",
+ _graph_image_name=f"signal_image_name{i}_{obj_no}",
+ _label=['RSSI'],
+ _yaxis_categories=devices_on_running_trimmed,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size)
+ # _color=['lightcoral']
+ )
+ graph_png = graph.build_bar_graph_horizontal()
+ logger.info("graph name {}".format(graph_png))
+ graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_obj_html(
+ _obj_title="Detailed Result Table ",
+ _obj="The below tables provides detailed information for the throughput test on each device.")
+ self.overall_report.build_objective()
+ self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list = [item.split()[-1] if ' ' in item else item for item in self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list]
+ if self.thput_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.thput_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ test_input_list, pass_fail_list = self.thput_obj_dict[ce][obj_name]["obj"].get_pass_fail_list(device_type, incremental_capacity_list[i], devices_on_running, download_data, upload_data)
+ bk_dataframe = {}
+
+ # Dataframe changes with respect to groups and profiles in case of interopability
+ if self.thput_obj_dict[ce][obj_name]["obj"].group_name:
+ interop_tab_data = self.thput_obj_dict[ce][obj_name]["obj"].json_get('/adb/')["devices"]
+ res_list = []
+ grp_name = []
+ if device_type[int(incremental_capacity_list[i]) - 1] != 'Android':
+ res_list.append(devices_on_running[-1])
+ else:
+ for dev in interop_tab_data:
+ for item in dev.values():
+ if item['user-name'] == devices_on_running[-1]:
+ res_list.append(item['name'].split('.')[2])
+ break
+ for key, value in self.thput_obj_dict[ce][obj_name]["obj"].group_device_map.items():
+ if res_list[-1] in value:
+ grp_name.append(key)
+ break
+ bk_dataframe["Group Name"] = grp_name[-1]
+
+ bk_dataframe[" Device Type "] = device_type[int(incremental_capacity_list[i]) - 1]
+ bk_dataframe[" Username"] = devices_on_running[-1]
+ bk_dataframe[" SSID "] = self.thput_obj_dict[ce][obj_name]["obj"].ssid_list[int(incremental_capacity_list[i]) - 1]
+ bk_dataframe[" MAC "] = self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list[int(incremental_capacity_list[i]) - 1]
+ bk_dataframe[" Channel "] = self.thput_obj_dict[ce][obj_name]["obj"].channel_list[int(incremental_capacity_list[i]) - 1]
+ bk_dataframe[" Mode"] = self.thput_obj_dict[ce][obj_name]["obj"].mode_list[int(incremental_capacity_list[i]) - 1]
+ bk_dataframe[" Offered download rate (Mbps)"] = download_list[-1]
+ bk_dataframe[" Observed Average download rate (Mbps)"] = [str(download_data[-1])]
+ bk_dataframe[" Offered upload rate (Mbps)"] = upload_list[-1]
+ bk_dataframe[" Observed Average upload rate (Mbps)"] = [str(upload_data[-1])]
+ bk_dataframe[" Average RTT (ms) "] = avg_rtt_data[-1]
+ bk_dataframe[" RSSI (dBm)"] = ['' if rssi_data[-1] == 0 else '-' + str(rssi_data[-1])]
+ if self.thput_obj_dict[ce][obj_name]["obj"].direction == "Bi-direction":
+ bk_dataframe[" Average Tx Drop % "] = upload_drop
+ bk_dataframe[" Average Rx Drop % "] = download_drop
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Download':
+ bk_dataframe[" Average Rx Drop % "] = download_drop
+ bk_dataframe[" Average Tx Drop % "] = [0.0] * len(download_drop)
+ else:
+ bk_dataframe[" Average Tx Drop % "] = upload_drop
+ bk_dataframe[" Average Rx Drop % "] = [0.0] * len(upload_drop)
+ # When pass fail criteria is specified
+ if self.thput_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.thput_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ bk_dataframe[" Expected " + self.thput_obj_dict[ce][obj_name]["obj"].direction + " rate "] = test_input_list
+ bk_dataframe[" Status "] = pass_fail_list
+ dataframe1 = pd.DataFrame(bk_dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ self.overall_report.set_custom_html('
')
+ self.overall_report.build_custom()
+
+ if(self.thput_obj_dict[ce][obj_name]["obj"].dowebgui and self.thput_obj_dict[ce][obj_name]["obj"].get_live_view and self.thput_obj_dict[ce][obj_name]["obj"].do_interopability):
+ self.thput_obj_dict[ce][obj_name]["obj"].add_live_view_images_to_report(self.overall_report)
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"ftp_test_{obj_no}"
+ else:
+ break
+
+ elif test_name == "ping_test":
+ obj_no = 1
+ obj_name = 'ping_test'
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.ping_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ params = self.ping_obj_dict[ce][obj_name]["data"].copy()
+ result_json = params["result_json"]
+ result_dir = params["result_dir"]
+ report_path = params["report_path"]
+ config_devices = params["config_devices"]
+ group_device_map = params["group_device_map"]
+ if result_json is not None:
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json = result_json
+ self.overall_report.set_obj_html(_obj_title=f'PING Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ # Test setup information table for devices in device list
+ if config_devices == '':
+ test_setup_info = {
+ 'SSID': self.ping_obj_dict[ce][obj_name]["obj"].ssid,
+ 'Security': self.ping_obj_dict[ce][obj_name]["obj"].security,
+ 'Website / IP': self.ping_obj_dict[ce][obj_name]["obj"].target,
+ 'No of Devices': '{} (V:{}, A:{}, W:{}, L:{}, M:{})'.format(len(self.ping_obj_dict[ce][obj_name]["obj"].sta_list), len(self.ping_obj_dict[ce][obj_name]["obj"].sta_list) - len(self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list), self.ping_obj_dict[ce][obj_name]["obj"].android, self.ping_obj_dict[ce][obj_name]["obj"].windows, self.ping_obj_dict[ce][obj_name]["obj"].linux, self.ping_obj_dict[ce][obj_name]["obj"].mac),
+ 'Duration (in minutes)': self.ping_obj_dict[ce][obj_name]["obj"].duration
+ }
+ # Test setup information table for devices in groups
+ else:
+ group_names = ', '.join(config_devices.keys())
+ profile_names = ', '.join(config_devices.values())
+ configmap = "Groups:" + group_names + " -> Profiles:" + profile_names
+ test_setup_info = {
+ 'Configuration': configmap,
+ 'Website / IP': self.ping_obj_dict[ce][obj_name]["obj"].target,
+ 'No of Devices': '{} (V:{}, A:{}, W:{}, L:{}, M:{})'.format(len(self.ping_obj_dict[ce][obj_name]["obj"].sta_list), len(self.ping_obj_dict[ce][obj_name]["obj"].sta_list) - len(self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list), self.ping_obj_dict[ce][obj_name]["obj"].android, self.ping_obj_dict[ce][obj_name]["obj"].windows, self.ping_obj_dict[ce][obj_name]["obj"].linux, self.ping_obj_dict[ce][obj_name]["obj"].mac),
+ 'Duration (in minutes)': self.ping_obj_dict[ce][obj_name]["obj"].duration
+ }
+ self.overall_report.test_setup_table(
+ test_setup_data=test_setup_info, value='Test Setup Information')
+
+ # packets sent vs received vs dropped
+ self.overall_report.set_table_title(
+ 'Packets sent vs packets received vs packets dropped')
+ self.overall_report.build_table_title()
+ # graph for the above
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_sent = []
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_received = []
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_dropped = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_names = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_modes = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_channels = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_min = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_max = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_avg = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_mac = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_names_with_errors = []
+ self.ping_obj_dict[ce][obj_name]["obj"].devices_with_errors = []
+ self.ping_obj_dict[ce][obj_name]["obj"].report_names = []
+ self.ping_obj_dict[ce][obj_name]["obj"].remarks = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_ssid = []
+ # packet_count_data = {}
+ os_type = []
+ for device, device_data in self.ping_obj_dict[ce][obj_name]["obj"].result_json.items():
+ logging.info('Device data: {} {}'.format(device, device_data))
+ os_type.append(device_data['os'])
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_sent.append(int(device_data['sent']))
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_received.append(int(device_data['recv']))
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_dropped.append(int(device_data['dropped']))
+ self.ping_obj_dict[ce][obj_name]["obj"].device_names.append(device_data['name'] + ' ' + device_data['os'])
+ self.ping_obj_dict[ce][obj_name]["obj"].device_modes.append(device_data['mode'])
+ self.ping_obj_dict[ce][obj_name]["obj"].device_channels.append(device_data['channel'])
+ self.ping_obj_dict[ce][obj_name]["obj"].device_mac.append(device_data['mac'])
+ self.ping_obj_dict[ce][obj_name]["obj"].device_ssid.append(device_data['ssid'])
+ self.ping_obj_dict[ce][obj_name]["obj"].device_min.append(float(device_data['min_rtt'].replace(',', '')))
+ self.ping_obj_dict[ce][obj_name]["obj"].device_max.append(float(device_data['max_rtt'].replace(',', '')))
+ self.ping_obj_dict[ce][obj_name]["obj"].device_avg.append(float(device_data['avg_rtt'].replace(',', '')))
+ if (device_data['os'] == 'Virtual'):
+ self.ping_obj_dict[ce][obj_name]["obj"].report_names.append('{} {}'.format(device, device_data['os'])[0:25])
+ else:
+ self.ping_obj_dict[ce][obj_name]["obj"].report_names.append('{} {} {}'.format(device, device_data['os'], device_data['name']))
+ if (device_data['remarks'] != []):
+ self.ping_obj_dict[ce][obj_name]["obj"].device_names_with_errors.append(device_data['name'])
+ self.ping_obj_dict[ce][obj_name]["obj"].devices_with_errors.append(device)
+ self.ping_obj_dict[ce][obj_name]["obj"].remarks.append(','.join(device_data['remarks']))
+ x_fig_size = 15
+ y_fig_size = len(self.ping_obj_dict[ce][obj_name]["obj"].device_names) * .5 + 4
+ graph = lf_bar_graph_horizontal(_data_set=[self.ping_obj_dict[ce][obj_name]["obj"].packets_dropped, self.ping_obj_dict[ce][obj_name]["obj"].packets_received, self.ping_obj_dict[ce][obj_name]["obj"].packets_sent],
+ _xaxis_name='Packets Count',
+ _yaxis_name='Wireless Clients',
+ _label=[
+ 'Packets Loss', 'Packets Received', 'Packets Sent'],
+ _graph_image_name=f'Packets sent vs received vs dropped {obj_no}',
+ _yaxis_label=self.ping_obj_dict[ce][obj_name]["obj"].report_names,
+ _yaxis_categories=self.ping_obj_dict[ce][obj_name]["obj"].report_names,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _graph_title='Packets sent vs received vs dropped',
+ _title_size=16,
+ _color=['lightgrey',
+ 'orange', 'steelblue'],
+ _color_edge=['black'],
+ _bar_height=0.15,
+ _figsize=(x_fig_size, y_fig_size),
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _dpi=96,
+ _show_bar_value=False,
+ _enable_csv=True,
+ _color_name=['lightgrey', 'orange', 'steelblue'])
+
+ graph_png = graph.build_bar_graph_horizontal()
+ logging.info('graph name {}'.format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ # need to move the graph image to the results directory
+ self.overall_report.move_graph_image()
+ self.overall_report.set_csv_filename(graph_png)
+ self.overall_report.move_csv_file()
+ self.overall_report.build_graph()
+ if self.ping_obj_dict[ce][obj_name]["obj"].real:
+ # Calculating the pass/fail criteria when either expected_passfail_val or csv_name is provided
+ if self.ping_obj_dict[ce][obj_name]["obj"].expected_passfail_val or self.ping_obj_dict[ce][obj_name]["obj"].csv_name:
+ self.ping_obj_dict[ce][obj_name]["obj"].get_pass_fail_list(os_type)
+ # When groups are provided a seperate table will be generated for each group using generate_dataframe
+ if self.ping_obj_dict[ce][obj_name]["obj"].group_name:
+ for key, val in group_device_map.items():
+ if self.ping_obj_dict[ce][obj_name]["obj"].expected_passfail_val or self.ping_obj_dict[ce][obj_name]["obj"].csv_name:
+ dataframe = self.ping_obj_dict[ce][obj_name]["obj"].generate_dataframe(
+ val,
+ self.ping_obj_dict[ce][obj_name]["obj"].device_names,
+ self.ping_obj_dict[ce][obj_name]["obj"].device_mac,
+ self.ping_obj_dict[ce][obj_name]["obj"].device_channels,
+ self.ping_obj_dict[ce][obj_name]["obj"].device_ssid,
+ self.ping_obj_dict[ce][obj_name]["obj"].device_modes,
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_sent,
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_received,
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_dropped,
+ self.ping_obj_dict[ce][obj_name]["obj"].percent_pac_loss,
+ self.ping_obj_dict[ce][obj_name]["obj"].test_input_list,
+ self.ping_obj_dict[ce][obj_name]["obj"].pass_fail_list)
+ else:
+ dataframe = self.ping_obj_dict[ce][obj_name]["obj"].generate_dataframe(val, self.ping_obj_dict[ce][obj_name]["obj"].device_names, self.ping_obj_dict[ce][obj_name]["obj"].device_mac, self.ping_obj_dict[ce][obj_name]["obj"].device_channels, self.ping_obj_dict[ce][obj_name]["obj"].device_ssid,
+ self.ping_obj_dict[ce][obj_name]["obj"].device_modes, self.ping_obj_dict[ce][obj_name]["obj"].packets_sent, self.ping_obj_dict[ce][obj_name]["obj"].packets_received, self.ping_obj_dict[ce][obj_name]["obj"].packets_dropped, [], [], [])
+ if dataframe:
+ self.overall_report.set_obj_html("", "Group: {}".format(key))
+ self.overall_report.build_objective()
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ else:
+ dataframe1 = pd.DataFrame({
+ 'Wireless Client': self.ping_obj_dict[ce][obj_name]["obj"].device_names,
+ 'MAC': self.ping_obj_dict[ce][obj_name]["obj"].device_mac,
+ 'Channel': self.ping_obj_dict[ce][obj_name]["obj"].device_channels,
+ 'SSID ': self.ping_obj_dict[ce][obj_name]["obj"].device_ssid,
+ 'Mode': self.ping_obj_dict[ce][obj_name]["obj"].device_modes,
+ 'Packets Sent': self.ping_obj_dict[ce][obj_name]["obj"].packets_sent,
+ 'Packets Received': self.ping_obj_dict[ce][obj_name]["obj"].packets_received,
+ 'Packets Loss': self.ping_obj_dict[ce][obj_name]["obj"].packets_dropped,
+ })
+ if self.ping_obj_dict[ce][obj_name]["obj"].expected_passfail_val or self.ping_obj_dict[ce][obj_name]["obj"].csv_name:
+ dataframe1[" Percentage of Packet loss %"] = self.ping_obj_dict[ce][obj_name]["obj"].percent_pac_loss
+ dataframe1['Expected Packet loss %'] = self.ping_obj_dict[ce][obj_name]["obj"].test_input_list
+ dataframe1['Status'] = self.ping_obj_dict[ce][obj_name]["obj"].pass_fail_list
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+ else:
+ dataframe1 = pd.DataFrame({
+ 'Wireless Client': self.ping_obj_dict[ce][obj_name]["obj"].device_names,
+ 'MAC': self.ping_obj_dict[ce][obj_name]["obj"].device_mac,
+ 'Channel': self.ping_obj_dict[ce][obj_name]["obj"].device_channels,
+ 'SSID ': self.ping_obj_dict[ce][obj_name]["obj"].device_ssid,
+ 'Mode': self.ping_obj_dict[ce][obj_name]["obj"].device_modes,
+ 'Packets Sent': self.ping_obj_dict[ce][obj_name]["obj"].packets_sent,
+ 'Packets Received': self.ping_obj_dict[ce][obj_name]["obj"].packets_received,
+ 'Packets Loss': self.ping_obj_dict[ce][obj_name]["obj"].packets_dropped,
+ })
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ # packets latency graph
+ self.overall_report.set_table_title('Ping Latency Graph')
+ self.overall_report.build_table_title()
+
+ graph = lf_bar_graph_horizontal(_data_set=[self.ping_obj_dict[ce][obj_name]["obj"].device_min, self.ping_obj_dict[ce][obj_name]["obj"].device_avg, self.ping_obj_dict[ce][obj_name]["obj"].device_max],
+ _xaxis_name='Time (ms)',
+ _yaxis_name='Wireless Clients',
+ _label=[
+ 'Min Latency (ms)', 'Average Latency (ms)', 'Max Latency (ms)'],
+ _graph_image_name=f'Ping Latency per client {obj_no}',
+ _yaxis_label=self.ping_obj_dict[ce][obj_name]["obj"].report_names,
+ _yaxis_categories=self.ping_obj_dict[ce][obj_name]["obj"].report_names,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _graph_title='Ping Latency per client',
+ _title_size=16,
+ _color=['lightgrey',
+ 'orange', 'steelblue'],
+ _color_edge='black',
+ _bar_height=0.15,
+ _figsize=(x_fig_size, y_fig_size),
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _dpi=96,
+ _show_bar_value=False,
+ _enable_csv=True,
+ _color_name=['lightgrey', 'orange', 'steelblue'])
+
+ graph_png = graph.build_bar_graph_horizontal()
+ logging.info('graph name {}'.format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ # need to move the graph image to the results directory
+ self.overall_report.move_graph_image()
+ self.overall_report.set_csv_filename(graph_png)
+ self.overall_report.move_csv_file()
+ self.overall_report.build_graph()
+
+ dataframe2 = pd.DataFrame({
+ 'Wireless Client': self.ping_obj_dict[ce][obj_name]["obj"].device_names,
+ 'MAC': self.ping_obj_dict[ce][obj_name]["obj"].device_mac,
+ 'Channel': self.ping_obj_dict[ce][obj_name]["obj"].device_channels,
+ 'SSID ': self.ping_obj_dict[ce][obj_name]["obj"].device_ssid,
+ 'Mode': self.ping_obj_dict[ce][obj_name]["obj"].device_modes,
+ 'Min Latency (ms)': self.ping_obj_dict[ce][obj_name]["obj"].device_min,
+ 'Average Latency (ms)': self.ping_obj_dict[ce][obj_name]["obj"].device_avg,
+ 'Max Latency (ms)': self.ping_obj_dict[ce][obj_name]["obj"].device_max
+ })
+ self.overall_report.set_table_dataframe(dataframe2)
+ self.overall_report.build_table()
+
+ # check if there are remarks for any device. If there are remarks, build table else don't
+ if (self.ping_obj_dict[ce][obj_name]["obj"].remarks != []):
+ self.overall_report.set_table_title('Notes')
+ self.overall_report.build_table_title()
+ dataframe3 = pd.DataFrame({
+ 'Wireless Client': self.ping_obj_dict[ce][obj_name]["obj"].device_names_with_errors,
+ 'Port': self.ping_obj_dict[ce][obj_name]["obj"].devices_with_errors,
+ 'Remarks': self.ping_obj_dict[ce][obj_name]["obj"].remarks
+ })
+ self.overall_report.set_table_dataframe(dataframe3)
+ self.overall_report.build_table()
+
+ # closing
+ # self.overall_report.build_custom()
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"ping_test_{obj_no}"
+ else:
+ break
+ elif test_name == "qos_test":
+ obj_no = 1
+ obj_name = 'qos_test'
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.qos_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ params = self.qos_obj_dict[ce][obj_name]["data"]
+ data = params["data"].copy() if isinstance(params["data"], (list, dict, set)) else params["data"]
+ input_setup_info = params["input_setup_info"].copy() if isinstance(params["input_setup_info"], (list, dict, set)) else params["input_setup_info"]
+ connections_download_avg = params["connections_download_avg"].copy() if isinstance(params["connections_download_avg"], (list, dict, set)) else params["connections_download_avg"]
+ connections_upload_avg = params["connections_upload_avg"].copy() if isinstance(params["connections_upload_avg"], (list, dict, set)) else params["connections_upload_avg"]
+ avg_drop_a = params["avg_drop_a"].copy() if isinstance(params["avg_drop_a"], (list, dict, set)) else params["avg_drop_a"]
+ avg_drop_b = params["avg_drop_b"].copy() if isinstance(params["avg_drop_b"], (list, dict, set)) else params["avg_drop_b"]
+ report_path = params["report_path"].copy() if isinstance(params["report_path"], (list, dict, set)) else params["report_path"]
+ result_dir_name = params["result_dir_name"].copy() if isinstance(params["result_dir_name"], (list, dict, set)) else params["result_dir_name"]
+ selected_real_clients_names = params["selected_real_clients_names"].copy() if isinstance(params["selected_real_clients_names"], (list, dict, set)) else params["selected_real_clients_names"]
+ config_devices = params["config_devices"].copy() if isinstance(params["config_devices"], (list, dict, set)) else params["config_devices"]
+ self.qos_obj_dict[ce][obj_name]["obj"].ssid_list = self.qos_obj_dict[ce][obj_name]["obj"].get_ssid_list(self.qos_obj_dict[ce][obj_name]["obj"].input_devices_list)
+ if selected_real_clients_names is not None:
+ self.qos_obj_dict[ce][obj_name]["obj"].num_stations = selected_real_clients_names
+ data_set, load, res = self.qos_obj_dict[ce][obj_name]["obj"].generate_graph_data_set(data)
+ # Initialize counts and lists for device types
+ android_devices, windows_devices, linux_devices, ios_devices, ios_mob_devices = 0, 0, 0, 0, 0
+ all_devices_names = []
+ device_type = []
+ total_devices = ""
+ for i in self.qos_obj_dict[ce][obj_name]["obj"].real_client_list:
+ split_device_name = i.split(" ")
+ if 'android' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Android)"))
+ device_type.append("Android")
+ android_devices += 1
+ elif 'Win' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Windows)"))
+ device_type.append("Windows")
+ windows_devices += 1
+ elif 'Lin' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Linux)"))
+ device_type.append("Linux")
+ linux_devices += 1
+ elif 'Mac' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Mac)"))
+ device_type.append("Mac")
+ ios_devices += 1
+ elif 'iOS' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(iOS)"))
+ device_type.append("iOS")
+ ios_mob_devices += 1
+
+ # Build total_devices string based on counts
+ if android_devices > 0:
+ total_devices += f" Android({android_devices})"
+ if windows_devices > 0:
+ total_devices += f" Windows({windows_devices})"
+ if linux_devices > 0:
+ total_devices += f" Linux({linux_devices})"
+ if ios_devices > 0:
+ total_devices += f" Mac({ios_devices})"
+ if ios_mob_devices > 0:
+ total_devices += f" iOS({ios_mob_devices})"
+
+ # Test setup information table for devices in device list
+ if config_devices == "":
+ test_setup_info = {
+ "Device List": ", ".join(all_devices_names),
+ "Number of Stations": "Total" + f"({self.qos_obj_dict[ce][obj_name]['obj'].num_stations})" + total_devices,
+ "AP Model": self.qos_obj_dict[ce][obj_name]["obj"].ap_name,
+ "SSID": self.qos_obj_dict[ce][obj_name]["obj"].ssid,
+ "Traffic Duration in hours": round(int(self.qos_obj_dict[ce][obj_name]["obj"].test_duration) / 3600, 2),
+ "Security": self.qos_obj_dict[ce][obj_name]["obj"].security,
+ "Protocol": (self.qos_obj_dict[ce][obj_name]["obj"].traffic_type.strip("lf_")).upper(),
+ "Traffic Direction": self.qos_obj_dict[ce][obj_name]["obj"].direction,
+ "TOS": self.qos_obj_dict[ce][obj_name]["obj"].tos,
+ "Per TOS Load in Mbps": load
+ }
+ # Test setup information table for devices in groups
+ else:
+ group_names = ', '.join(config_devices.keys())
+ profile_names = ', '.join(config_devices.values())
+ configmap = "Groups:" + group_names + " -> Profiles:" + profile_names
+ test_setup_info = {
+ "AP Model": self.qos_obj_dict[ce][obj_name]["obj"].ap_name,
+ 'Configuration': configmap,
+ "Traffic Duration in hours": round(int(self.qos_obj_dict[ce][obj_name]["obj"].test_duration) / 3600, 2),
+ "Security": self.qos_obj_dict[ce][obj_name]["obj"].security,
+ "Protocol": (self.qos_obj_dict[ce][obj_name]["obj"].traffic_type.strip("lf_")).upper(),
+ "Traffic Direction": self.qos_obj_dict[ce][obj_name]["obj"].direction,
+ "TOS": self.qos_obj_dict[ce][obj_name]["obj"].tos,
+ "Per TOS Load in Mbps": load
+ }
+ print(res["throughput_table_df"])
+ self.overall_report.set_obj_html(_obj_title=f'QOS Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ self.overall_report.test_setup_table(test_setup_data=test_setup_info, value="Test Configuration")
+ self.overall_report.set_table_title(
+ f"Overall {self.qos_obj_dict[ce][obj_name]['obj'].direction} Throughput for all TOS i.e BK | BE | Video (VI) | Voice (VO)")
+ self.overall_report.build_table_title()
+ df_throughput = pd.DataFrame(res["throughput_table_df"])
+ self.overall_report.set_table_dataframe(df_throughput)
+ self.overall_report.build_table()
+ for key in res["graph_df"]:
+ self.overall_report.set_obj_html(
+ _obj_title=f"Overall {self.qos_obj_dict[ce][obj_name]['obj'].direction} throughput for {len(self.qos_obj_dict[ce][obj_name]['obj'].input_devices_list)} clients with different TOS.",
+ _obj=f"The below graph represents overall {self.qos_obj_dict[ce][obj_name]['obj'].direction} throughput for all "
+ "connected stations running BK, BE, VO, VI traffic with different "
+ f"intended loads{load} per tos")
+ self.overall_report.build_objective()
+ graph = lf_bar_graph(_data_set=data_set,
+ _xaxis_name="Load per Type of Service",
+ _yaxis_name="Throughput (Mbps)",
+ _xaxis_categories=["BK,BE,VI,VO"],
+ _xaxis_label=['1 Mbps', '2 Mbps', '3 Mbps', '4 Mbps', '5 Mbps'],
+ _graph_image_name=f"tos_download_{key}Hz {obj_no}",
+ _label=["BK", "BE", "VI", "VO"],
+ _xaxis_step=1,
+ _graph_title=f"Overall {self.qos_obj_dict[ce][obj_name]['obj'].direction} throughput – BK,BE,VO,VI traffic streams",
+ _title_size=16,
+ _color=['orange', 'lightcoral', 'steelblue', 'lightgrey'],
+ _color_edge='black',
+ _bar_width=0.15,
+ _figsize=(18, 6),
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _dpi=96,
+ _show_bar_value=True,
+ _enable_csv=True,
+ _color_name=['orange', 'lightcoral', 'steelblue', 'lightgrey'])
+ graph_png = graph.build_bar_graph()
+ print("graph name {}".format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ # need to move the graph image to the results directory
+ self.overall_report.move_graph_image()
+ self.overall_report.set_csv_filename(graph_png)
+ self.overall_report.move_csv_file()
+ self.overall_report.build_graph()
+ self.qos_obj_dict[ce][obj_name]["obj"].generate_individual_graph(res, self.overall_report, connections_download_avg, connections_upload_avg, avg_drop_a, avg_drop_b,obj_no)
+ self.overall_report.test_setup_table(test_setup_data=input_setup_info, value="Information")
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"qos_test_{obj_no}"
+ else:
+ break
+
+ elif test_name == "mcast_test":
+ obj_no=1
+ obj_name = "mcast_test"
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.mcast_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ print('is error',self.mcast_obj_dict)
+ params = self.mcast_obj_dict[ce][obj_name]["data"].copy()
+ config_devices = params["config_devices"].copy() if isinstance(params["config_devices"], (list, dict, set)) else params["config_devices"]
+ group_device_map = params["group_device_map"].copy() if isinstance(params["group_device_map"], (list, dict, set)) else params["group_device_map"]
+
+ # self.mcast_obj_dict[ce][obj_name]["obj"].update_a()
+ # self.mcast_obj_dict[ce][obj_name]["obj"].update_b()
+ test_setup_info = {
+ "DUT Name": self.mcast_obj_dict[ce][obj_name]["obj"].dut_model_num,
+ "DUT Hardware Version": self.mcast_obj_dict[ce][obj_name]["obj"].dut_hw_version,
+ "DUT Software Version": self.mcast_obj_dict[ce][obj_name]["obj"].dut_sw_version,
+ "DUT Serial Number": self.mcast_obj_dict[ce][obj_name]["obj"].dut_serial_num,
+ }
+ self.overall_report.set_obj_html(_obj_title=f'MULTICAST Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ self.overall_report.set_table_title("Device Under Test Information")
+ self.overall_report.build_table_title()
+ self.overall_report.test_setup_table(value="Device Under Test",
+ test_setup_data=test_setup_info)
+ # For real devices when groups specified for configuration
+ if self.mcast_obj_dict[ce][obj_name]["obj"].real and self.mcast_obj_dict[ce][obj_name]["obj"].group_name:
+ group_names = ', '.join(config_devices.keys())
+ profile_names = ', '.join(config_devices.values())
+ configmap = "Groups:" + group_names + " -> Profiles:" + profile_names
+ test_input_info = {
+ "LANforge ip": self.mcast_obj_dict[ce][obj_name]["obj"].lfmgr,
+ "LANforge port": self.mcast_obj_dict[ce][obj_name]["obj"].lfmgr_port,
+ "Upstream": self.mcast_obj_dict[ce][obj_name]["obj"].upstream_port,
+ "Test Duration": self.mcast_obj_dict[ce][obj_name]["obj"].test_duration,
+ "Test Configuration": configmap,
+ "Polling Interval": self.mcast_obj_dict[ce][obj_name]["obj"].polling_interval,
+ "Total No. of Devices": self.mcast_obj_dict[ce][obj_name]["obj"].station_count,
+ }
+ else:
+ test_input_info = {
+ "LANforge ip": self.mcast_obj_dict[ce][obj_name]["obj"].lfmgr,
+ "LANforge port": self.mcast_obj_dict[ce][obj_name]["obj"].lfmgr_port,
+ "Upstream": self.mcast_obj_dict[ce][obj_name]["obj"].upstream_port,
+ "Test Duration": self.mcast_obj_dict[ce][obj_name]["obj"].test_duration,
+ "Polling Interval": self.mcast_obj_dict[ce][obj_name]["obj"].polling_interval,
+ "Total No. of Devices": self.mcast_obj_dict[ce][obj_name]["obj"].station_count,
+ }
+
+ self.overall_report.set_table_title("Test Configuration")
+ self.overall_report.build_table_title()
+ self.overall_report.test_setup_table(value="Test Configuration",
+ test_setup_data=test_input_info)
+
+ self.overall_report.set_table_title("Radio Configuration")
+ self.overall_report.build_table_title()
+
+ wifi_mode_dict = {
+ 0: 'AUTO', # 802.11g
+ 1: '802.11a', # 802.11a
+ 2: '802.11b', # 802.11b
+ 3: '802.11g', # 802.11g
+ 4: '802.11abg', # 802.11abg
+ 5: '802.11abgn', # 802.11abgn
+ 6: '802.11bgn', # 802.11bgn
+ 7: '802.11bg', # 802.11bg
+ 8: '802.11abgnAC', # 802.11abgn-AC
+ 9: '802.11anAC', # 802.11an-AC
+ 10: '802.11an', # 802.11an
+ 11: '802.11bgnAC', # 802.11bgn-AC
+ 12: '802.11abgnAX', # 802.11abgn-A+
+ # a/b/g/n/AC/AX (dual-band AX) support
+ 13: '802.11bgnAX', # 802.11bgn-AX
+ 14: '802.11anAX', # 802.11an-AX
+ 15: '802.11aAX', # 802.11a-AX (6E disables /n and /ac)
+ 16: '802.11abgnEHT', # 802.11abgn-EHT a/b/g/n/AC/AX/EHT (dual-band AX) support
+ 17: '802.11bgnEHT', # 802.11bgn-EHT
+ 18: '802.11anEHT', # 802.11an-ETH
+ 19: '802.11aBE', # 802.11a-EHT (6E disables /n and /ac)
+ }
+
+ for (
+ radio_,
+ ssid_,
+ _ssid_password_, # do not print password
+ ssid_security_,
+ mode_,
+ wifi_enable_flags_list_,
+ _reset_port_enable_,
+ _reset_port_time_min_,
+ _reset_port_time_max_) in zip(
+ self.mcast_obj_dict[ce][obj_name]["obj"].radio_name_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].ssid_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].ssid_password_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].ssid_security_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].wifi_mode_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].enable_flags_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].reset_port_enable_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].reset_port_time_min_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].reset_port_time_max_list):
+
+ mode_value = wifi_mode_dict[int(mode_)]
+
+ radio_info = {
+ "SSID": ssid_,
+ "Security": ssid_security_,
+ "Wifi mode set": mode_value,
+ 'Wifi Enable Flags': wifi_enable_flags_list_
+ }
+ self.overall_report.test_setup_table(value=radio_, test_setup_data=radio_info)
+
+ # TODO move the graphing to the class so it may be called as a service
+
+ # Graph TOS data
+ # Once the data is stopped can collect the data for the cx's both multi cast and uni cast
+ # if the traffic is still running will gather the running traffic
+ # self.mcast_obj_dict[ce][obj_name]["obj"].evaluate_qos()
+
+ # graph BK A
+ # try to do as a loop
+ logger.info(f"BEFORE REAL A {self.mcast_obj_dict[ce][obj_name]['obj'].client_dict_A}")
+ tos_list = ['BK', 'BE', 'VI', 'VO']
+ if self.mcast_obj_dict[ce][obj_name]["obj"].real:
+ tos_types = ['BE', 'BK', 'VI', 'VO']
+ print("BOOLLLLL",self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B is self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A)
+ for tos_key in tos_types:
+ if tos_key in self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A:
+ tos_data = self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos_key]
+
+ # Filter A side
+ traffic_proto_A = tos_data.get("traffic_protocol_A", [])
+ indices_to_keep_A = [i for i, proto in enumerate(traffic_proto_A) if proto == "Mcast"]
+
+ # Filter B side
+ traffic_proto_B = tos_data.get("traffic_protocol_B", [])
+ indices_to_keep_B = [i for i, proto in enumerate(traffic_proto_B) if proto == "Mcast"]
+
+ for key in list(tos_data.keys()):
+ if key in ["colors", "labels"]:
+ continue # Keep as-is
+
+ if key.endswith('_A'):
+ filtered_list = [tos_data[key][i] for i in indices_to_keep_A if i < len(tos_data[key])]
+ tos_data[key] = filtered_list
+
+ elif key.endswith('_B'):
+ filtered_list = [tos_data[key][i] for i in indices_to_keep_B if i < len(tos_data[key])]
+ tos_data[key] = filtered_list
+ for tos_key in tos_types:
+ if tos_key in self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B:
+ tos_data = self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos_key]
+
+ # Filter A side
+ traffic_proto_A = tos_data.get("traffic_protocol_A", [])
+ indices_to_keep_A = [i for i, proto in enumerate(traffic_proto_A) if proto == "Mcast"]
+
+ # Filter B side
+ traffic_proto_B = tos_data.get("traffic_protocol_B", [])
+ indices_to_keep_B = [i for i, proto in enumerate(traffic_proto_B) if proto == "Mcast"]
+
+ for key in list(tos_data.keys()):
+ if key in ["colors", "labels"]:
+ continue # Keep as-is
+
+ if key.endswith('_A'):
+ filtered_list = [tos_data[key][i] for i in indices_to_keep_A if i < len(tos_data[key])]
+ tos_data[key] = filtered_list
+
+ elif key.endswith('_B'):
+ filtered_list = [tos_data[key][i] for i in indices_to_keep_B if i < len(tos_data[key])]
+ tos_data[key] = filtered_list
+ # logger.info(f"AFTER REAL A {self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A}")
+ for tos in tos_list:
+ print(self.mcast_obj_dict[ce][obj_name]["obj"].tos)
+ if tos not in self.mcast_obj_dict[ce][obj_name]["obj"].tos:
+ continue
+ if (self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["ul_A"] and self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["dl_A"]):
+ min_bps_a = self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A["min_bps_a"]
+ min_bps_b = self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A["min_bps_b"]
+
+ dataset_list = [self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["ul_A"], self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["dl_A"]]
+ # TODO possibly explain the wording for upload and download
+ dataset_length = len(self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["ul_A"])
+ x_fig_size = 20
+ y_fig_size = len(self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"]) * .4 + 5
+ logger.debug("length of clients_A {clients} resource_alias_A {alias_A}".format(
+ clients=len(self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"]), alias_A=len(self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["resource_alias_A"])))
+ logger.debug("clients_A {clients}".format(clients=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"]))
+ logger.debug("resource_alias_A {alias_A}".format(alias_A=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["resource_alias_A"]))
+
+ if int(min_bps_a) != 0:
+ self.overall_report.set_obj_html(
+ _obj_title=f"Individual throughput measured upload tcp or udp bps: {min_bps_a}, download tcp, udp, or mcast bps: {min_bps_b} station for traffic {tos} (WiFi).",
+ _obj=f"The below graph represents individual throughput for {dataset_length} clients running {tos} "
+ f"(WiFi) traffic. Y- axis shows “Client names“ and X-axis shows “"
+ f"Throughput in Mbps”.")
+ else:
+ self.overall_report.set_obj_html(
+ _obj_title=f"Individual throughput mcast download bps: {min_bps_b} traffic {tos} (WiFi).",
+ _obj=f"The below graph represents individual throughput for {dataset_length} clients running {tos} "
+ f"(WiFi) traffic. Y- axis shows “Client names“ and X-axis shows “"
+ f"Throughput in Mbps”.")
+
+ self.overall_report.build_objective()
+
+ graph = lf_bar_graph_horizontal(_data_set=dataset_list,
+ _xaxis_name="Throughput in bps",
+ _yaxis_name="Client names",
+ # _yaxis_categories=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"],
+ _yaxis_categories=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["resource_alias_A"],
+ _graph_image_name=f"{tos}_A{obj_no}",
+ _label=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['labels'],
+ _color_name=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['colors'],
+ _color_edge=['black'],
+ # traditional station side -A
+ _graph_title=f"Individual {tos} client side traffic measurement - side a (downstream)",
+ _title_size=10,
+ _figsize=(x_fig_size, y_fig_size),
+ _show_bar_value=True,
+ _enable_csv=True,
+ _text_font=8,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0)
+ )
+ graph_png = graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+ self.overall_report.set_csv_filename(graph_png)
+ self.overall_report.move_csv_file()
+ if(self.mcast_obj_dict[ce][obj_name]["obj"].dowebgui and self.mcast_obj_dict[ce][obj_name]["obj"].get_live_view):
+ for floor in range(0,int(self.mcast_obj_dict[ce][obj_name]["obj"].total_floors)):
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ throughput_image_path = os.path.join(script_dir, "heatmap_images", f"{self.mcast_obj_dict[ce][obj_name]['obj'].test_name}_throughput_{floor+1}.png")
+ rssi_image_path = os.path.join(script_dir, "heatmap_images", f"{self.mcast_obj_dict[ce][obj_name]['obj'].test_name}_rssi_{floor+1}.png")
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(throughput_image_path) and os.path.exists(rssi_image_path)):
+ if time.time() - start_time > timeout:
+ print("Timeout: Images not found within 60 seconds.")
+ break
+ time.sleep(1)
+ while not os.path.exists(throughput_image_path) and not os.path.exists(rssi_image_path):
+ if os.path.exists(throughput_image_path) and os.path.exists(rssi_image_path):
+ break
+ # time.sleep(10)
+ if os.path.exists(throughput_image_path):
+ self.overall_report.set_custom_html('')
+ self.overall_report.build_custom()
+ # self.overall_report.set_custom_html("Average Throughput Heatmap:
")
+ # self.overall_report.build_custom()
+ self.overall_report.set_custom_html(f'
')
+ self.overall_report.build_custom()
+ # os.remove(throughput_image_path)
+
+ if os.path.exists(rssi_image_path):
+ self.overall_report.set_custom_html('')
+ self.overall_report.build_custom()
+ # self.overall_report.set_custom_html("Average RSSI Heatmap:
")
+ # self.overall_report.build_custom()
+ self.overall_report.set_custom_html(f'
')
+ self.overall_report.build_custom()
+ # os.remove(rssi_image_path)
+
+ # For real devices appending the required data for pass fail criteria
+ if self.mcast_obj_dict[ce][obj_name]["obj"].real:
+ up, down, off_up, off_down = [], [], [], []
+ for i in self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['ul_A']:
+ up.append(int(i) / 1000000)
+ for i in self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['dl_A']:
+ down.append(int(i) / 1000000)
+ for i in self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['offered_upload_rate_A']:
+ off_up.append(int(i) / 1_000_000)
+ for i in self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['offered_download_rate_A']:
+ off_down.append(int(i) / 1000000)
+ # if either 'expected_passfail_value' or 'device_csv_name' is provided for pass/fail evaluation
+ if self.mcast_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.mcast_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ test_input_list, pass_fail_list = self.mcast_obj_dict[ce][obj_name]["obj"].get_pass_fail_list(tos, up, down)
+
+ if self.mcast_obj_dict[ce][obj_name]["obj"].real:
+ # When groups and profiles specifed for configuration
+ if self.mcast_obj_dict[ce][obj_name]["obj"].group_name:
+ for key, val in group_device_map.items():
+ # Generating Dataframe when Groups with their profiles and pass_fail case is specified
+ if self.mcast_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.mcast_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ dataframe = self.mcast_obj_dict[ce][obj_name]["obj"].generate_dataframe(
+ val,
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_alias_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_eid_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_host_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_hw_ver_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['port_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mode_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mac_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['ssid_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['channel_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_type_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_protocol_A'],
+ off_up,
+ off_down,
+ up,
+ down,
+ test_input_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['download_rx_drop_percent_A'],
+ pass_fail_list)
+ # Generating Dataframe for groups when pass_fail case is not specified
+ else:
+ dataframe = self.mcast_obj_dict[ce][obj_name]["obj"].generate_dataframe(
+ val,
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_alias_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_eid_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_host_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_hw_ver_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['port_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mode_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mac_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['ssid_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['channel_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_type_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_protocol_A'],
+ off_up,
+ off_down,
+ up,
+ down,
+ [],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['download_rx_drop_percent_A'],
+ [],)
+ # When the client exists in either group.
+ if dataframe:
+ self.overall_report.set_obj_html("", "Group: {}".format(key))
+ self.overall_report.build_objective()
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+ else:
+ tos_dataframe_A = {
+ " Client Alias ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_alias_A'],
+ " Host eid ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_eid_A'],
+ " Host Name ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_host_A'],
+ " Device Type / Hw Ver ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_hw_ver_A'],
+ " Endp Name": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"],
+ # TODO : port A being set to many times
+ " Port Name ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['port_A'],
+ " Mode ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mode_A'],
+ " Mac ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mac_A'],
+ " SSID ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['ssid_A'],
+ " Channel ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['channel_A'],
+ " Type of traffic ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_type_A'],
+ " Traffic Protocol ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_protocol_A'],
+ " Offered Upload Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['offered_upload_rate_A'],
+ " Offered Download Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['offered_download_rate_A'],
+ " Upload Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['ul_A'],
+ " Download Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['dl_A'],
+ " Drop Percentage (%)": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['download_rx_drop_percent_A'],
+ }
+ # When pass_Fail criteria specified
+ if self.mcast_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.mcast_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ tos_dataframe_A[" Expected " + 'Download' + " Rate"] = [float(x) * 10**6 for x in test_input_list]
+ tos_dataframe_A[" Status "] = pass_fail_list
+
+ dataframe3 = pd.DataFrame(tos_dataframe_A)
+ self.overall_report.set_table_dataframe(dataframe3)
+ self.overall_report.build_table()
+
+ # For virtual clients
+ else:
+ tos_dataframe_A = {
+ " Client Alias ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_alias_A'],
+ " Host eid ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_eid_A'],
+ " Host Name ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_host_A'],
+ " Device Type / Hw Ver ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_hw_ver_A'],
+ " Endp Name": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"],
+ " Port Name ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['port_A'],
+ " Mode ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mode_A'],
+ " Mac ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mac_A'],
+ " SSID ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['ssid_A'],
+ " Channel ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['channel_A'],
+ " Type of traffic ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_type_A'],
+ " Traffic Protocol ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_protocol_A'],
+ " Offered Upload Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['offered_upload_rate_A'],
+ " Offered Download Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['offered_download_rate_A'],
+ " Upload Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['ul_A'],
+ " Download Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['dl_A'],
+ " Drop Percentage (%)": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['download_rx_drop_percent_A'],
+ }
+ dataframe3 = pd.DataFrame(tos_dataframe_A)
+ self.overall_report.set_table_dataframe(dataframe3)
+ self.overall_report.build_table()
+
+ # TODO both client_dict_A and client_dict_B contains the same information
+ for tos in tos_list:
+ if (self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["ul_B"] and self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["dl_B"]):
+ min_bps_a = self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B["min_bps_a"]
+ min_bps_b = self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B["min_bps_b"]
+
+ dataset_list = [self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["ul_B"], self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["dl_B"]]
+ dataset_length = len(self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["ul_B"])
+
+ x_fig_size = 20
+ y_fig_size = len(self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["clients_B"]) * .4 + 5
+
+ self.overall_report.set_obj_html(
+ _obj_title=f"Individual throughput upstream endp, offered upload bps: {min_bps_a} offered download bps: {min_bps_b} /station for traffic {tos} (WiFi).",
+ _obj=f"The below graph represents individual throughput for {dataset_length} clients running {tos} "
+ f"(WiFi) traffic. Y- axis shows “Client names“ and X-axis shows “"
+ f"Throughput in Mbps”.")
+ self.overall_report.build_objective()
+
+ graph = lf_bar_graph_horizontal(_data_set=dataset_list,
+ _xaxis_name="Throughput in bps",
+ _yaxis_name="Client names",
+ # _yaxis_categories=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["clients_B"],
+ _yaxis_categories=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["resource_alias_B"],
+ _graph_image_name=f"{tos}_B{obj_no}",
+ _label=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['labels'],
+ _color_name=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['colors'],
+ _color_edge=['black'],
+ _graph_title=f"Individual {tos} upstream side traffic measurement - side b (WIFI) traffic",
+ _title_size=10,
+ _figsize=(x_fig_size, y_fig_size),
+ _show_bar_value=True,
+ _enable_csv=True,
+ _text_font=8,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0)
+ )
+ graph_png = graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+ self.overall_report.set_csv_filename(graph_png)
+ self.overall_report.move_csv_file()
+
+ tos_dataframe_B = {
+ " Client Alias ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['resource_alias_B'],
+ " Host eid ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['resource_eid_B'],
+ " Host Name ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['resource_host_B'],
+ " Device Type / HW Ver ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['resource_hw_ver_B'],
+ " Endp Name": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["clients_B"],
+ # TODO get correct size
+ " Port Name ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['port_B'],
+ " Mode ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['mode_B'],
+ " Mac ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['mac_B'],
+ " SSID ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['ssid_B'],
+ " Channel ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['channel_B'],
+ " Type of traffic ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['traffic_type_B'],
+ " Traffic Protocol ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['traffic_protocol_B'],
+ " Offered Upload Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['offered_upload_rate_B'],
+ " Offered Download Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['offered_download_rate_B'],
+ " Upload Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['ul_B'],
+ " Download Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['dl_B'],
+ " Drop Percentage (%)": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['download_rx_drop_percent_B']
+ }
+
+ dataframe3 = pd.DataFrame(tos_dataframe_B)
+ self.overall_report.set_table_dataframe(dataframe3)
+ self.overall_report.build_table()
+
+ # L3 total traffic # TODO csv_results_file present yet not readable
+ # self.overall_report.set_table_title("Total Layer 3 Cross-Connect Traffic across all Stations")
+ # self.overall_report.build_table_title()
+ # self.overall_report.set_table_dataframe_from_csv(self.mcast_obj_dict[ce][obj_name]["obj"].csv_results_file)
+ # self.overall_report.build_table()
+
+ # empty dictionarys evaluate to false , placing tables in output
+ if bool(self.mcast_obj_dict[ce][obj_name]["obj"].dl_port_csv_files):
+ for key, value in self.mcast_obj_dict[ce][obj_name]["obj"].dl_port_csv_files.items():
+ if self.mcast_obj_dict[ce][obj_name]["obj"].csv_data_to_report:
+ # read the csv file
+ self.overall_report.set_table_title("Layer 3 Cx Traffic {key}".format(key=key))
+ self.overall_report.build_table_title()
+ self.overall_report.set_table_dataframe_from_csv(value.name)
+ self.overall_report.build_table()
+
+ # read in column heading and last line
+ df = pd.read_csv(value.name)
+ last_row = df.tail(1)
+ self.overall_report.set_table_title(
+ "Layer 3 Cx Traffic Last Reporting Interval {key}".format(key=key))
+ self.overall_report.build_table_title()
+ self.overall_report.set_table_dataframe(last_row)
+ self.overall_report.build_table()
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"mcast_test_{obj_no}"
+ else:
+ break
+
+ elif test_name == "vs_test":
+ obj_no=1
+ obj_name = "vs_test"
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.vs_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ params = self.vs_obj_dict[ce][obj_name]["data"].copy()
+ date = params["date"]
+
+ iterations_before_test_stopped_by_user = (
+ params["iterations_before_test_stopped_by_user"].copy()
+ if isinstance(params["iterations_before_test_stopped_by_user"], (list, dict, set))
+ else params["iterations_before_test_stopped_by_user"]
+ )
+
+ test_setup_info = (
+ params["test_setup_info"].copy()
+ if isinstance(params["test_setup_info"], (list, dict, set))
+ else params["test_setup_info"]
+ )
+
+ realtime_dataset = (
+ params["realtime_dataset"].copy()
+ if isinstance(params["realtime_dataset"], (list, dict, set))
+ else params["realtime_dataset"]
+ )
+
+ report_path = (
+ params["report_path"].copy()
+ if isinstance(params["report_path"], (list, dict, set))
+ else params["report_path"]
+ )
+
+ cx_order_list = (
+ params["cx_order_list"].copy()
+ if isinstance(params["cx_order_list"], (list, dict, set))
+ else params["cx_order_list"]
+ )
+ self.overall_report.set_obj_html(_obj_title=f'Video Streaming Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ created_incremental_values = self.vs_obj_dict[ce][obj_name]["obj"].get_incremental_capacity_list()
+ keys = list(self.vs_obj_dict[ce][obj_name]["obj"].http_profile.created_cx.keys())
+
+ self.overall_report.set_table_title("Input Parameters")
+ self.overall_report.build_table_title()
+ if self.vs_obj_dict[ce][obj_name]["obj"].config:
+ test_setup_info["SSID"] = self.vs_obj_dict[ce][obj_name]["obj"].ssid
+ test_setup_info["Password"] = self.vs_obj_dict[ce][obj_name]["obj"].passwd
+ test_setup_info["ENCRYPTION"] = self.vs_obj_dict[ce][obj_name]["obj"].encryp
+ elif len(self.vs_obj_dict[ce][obj_name]["obj"].selected_groups) > 0 and len(self.vs_obj_dict[ce][obj_name]["obj"].selected_profiles) > 0:
+ # Map each group with a profile
+ gp_pairs = zip(self.vs_obj_dict[ce][obj_name]["obj"].selected_groups, self.vs_obj_dict[ce][obj_name]["obj"].selected_profiles)
+ # Create a string by joining the mapped pairs
+ gp_map = ", ".join(f"{group} -> {profile}" for group, profile in gp_pairs)
+ test_setup_info["Configuration"] = gp_map
+
+ self.overall_report.test_setup_table(value="Test Setup Information", test_setup_data=test_setup_info)
+
+ device_type = []
+ username = []
+ ssid = []
+ mac = []
+ channel = []
+ mode = []
+ rssi = []
+ channel = []
+ tx_rate = []
+ resource_ids = list(map(int, self.vs_obj_dict[ce][obj_name]["obj"].resource_ids.split(',')))
+ try:
+ eid_data = self.vs_obj_dict[ce][obj_name]["obj"].json_get("ports?fields=alias,mac,mode,Parent Dev,rx-rate,tx-rate,ssid,signal,channel")
+ except KeyError:
+ logger.error("Error: 'interfaces' key not found in port data")
+ exit(1)
+
+ # Loop through interfaces
+ for alias in eid_data["interfaces"]:
+ for i in alias:
+ # Check interface index and alias
+ if int(i.split(".")[1]) > 1 and alias[i]["alias"] == 'wlan0':
+
+ # Get resource data for specific interface
+ resource_hw_data = self.vs_obj_dict[ce][obj_name]["obj"].json_get("/resource/" + i.split(".")[0] + "/" + i.split(".")[1])
+ hw_version = resource_hw_data['resource']['hw version']
+
+ # Filter based on OS and resource ID
+ if not hw_version.startswith(('Win', 'Linux', 'Apple')) and int(resource_hw_data['resource']['eid'].split('.')[1]) in resource_ids:
+ device_type.append('Android')
+ username.append(resource_hw_data['resource']['user'])
+ ssid.append(alias[i]['ssid'])
+ mac.append(alias[i]['mac'])
+ mode.append(alias[i]['mode'])
+ rssi.append(alias[i]['signal'])
+ channel.append(alias[i]['channel'])
+ tx_rate.append(alias[i]['tx-rate'])
+ total_urls = self.vs_obj_dict[ce][obj_name]["obj"].data["total_urls"]
+ total_err = self.vs_obj_dict[ce][obj_name]["obj"].data["total_err"]
+ total_buffer = self.vs_obj_dict[ce][obj_name]["obj"].data["total_buffer"]
+ max_bytes_rd_list = []
+ avg_rx_rate_list = []
+ # Iterate through the length of cx_order_list
+ for iter in range(len(iterations_before_test_stopped_by_user)):
+ data_set_in_graph, wait_time_data, devices_on_running_state, device_names_on_running = [], [], [], []
+ devices_data_to_create_wait_time_bar_graph = []
+ max_video_rate, min_video_rate, avg_video_rate = [], [], []
+ total_url_data, rssi_data = [], []
+ trimmed_data_set_in_graph = []
+ max_bytes_rd_list = []
+ avg_rx_rate_list = []
+ # Retrieve data for the previous iteration, if it's not the first iteration
+ if iter != 0:
+ before_data_iter = realtime_dataset[realtime_dataset['iteration'] == iter]
+ # Retrieve data for the current iteration
+ data_iter = realtime_dataset[realtime_dataset['iteration'] == iter + 1]
+
+ # Populate the list of devices on running state and their corresponding usernames
+ for j in range(created_incremental_values[iter]):
+ devices_on_running_state.append(keys[j])
+ device_names_on_running.append(username[j])
+
+ # Iterate through each device currently running
+ for k in devices_on_running_state:
+ # Filter columns related to the current device
+ columns_with_substring = [col for col in data_iter.columns if k in col]
+ filtered_df = data_iter[columns_with_substring]
+ min_val = self.vs_obj_dict[ce][obj_name]["obj"].process_list(filtered_df[[col for col in filtered_df.columns if "video_format_bitrate" in col][0]].values.tolist())
+ if iter != 0:
+ # Filter columns related to the current device from the previous iteration
+ before_iter_columns_with_substring = [col for col in before_data_iter.columns if k in col]
+ before_filtered_df = before_data_iter[before_iter_columns_with_substring]
+
+ # Extract and compute max, min, and average video rates
+ max_video_rate.append(max(filtered_df[[col for col in filtered_df.columns if "video_format_bitrate" in col][0]].values.tolist()))
+ min_video_rate.append(min_val)
+ avg_video_rate.append(round(sum(filtered_df[[col for col in filtered_df.columns if "video_format_bitrate" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "video_format_bitrate" in col][0]].values.tolist()), 2))
+ wait_time_data.append(filtered_df[[col for col in filtered_df.columns if "total_wait_time" in col][0]].values.tolist()[-1])
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ # Extract maximum bytes read for the device
+ max_bytes_rd = max(filtered_df[[col for col in filtered_df.columns if "bytes_rd" in col][0]].values.tolist())
+ max_bytes_rd_list.append(max_bytes_rd)
+
+ # Calculate and append the average RX rate in Mbps
+ rx_rate_values = filtered_df[[col for col in filtered_df.columns if "rx rate" in col][0]].values.tolist()
+ avg_rx_rate_list.append(round((sum(rx_rate_values) / len(rx_rate_values)) / 1_000_000, 2)) # Convert bps to Mbps
+
+ if iter != 0:
+ # Calculate the difference in total URLs between the current and previous iterations
+ total_url_data.append(abs(filtered_df[[col for col in filtered_df.columns if "total_urls" in col][0]].values.tolist()[-1] -
+ before_filtered_df[[col for col in before_filtered_df.columns if "total_urls" in col][0]].values.tolist()[-1]))
+ else:
+ # Append the total URLs for the first iteration
+ total_url_data.append(filtered_df[[col for col in filtered_df.columns if "total_urls" in col][0]].values.tolist()[-1])
+
+ # Append the wait time data to the list for creating the wait time bar graph
+ devices_data_to_create_wait_time_bar_graph.append(wait_time_data)
+
+ # Extract overall video format bitrate values for the current iteration and append to data_set_in_graph
+ video_streaming_values_list = realtime_dataset['overall_video_format_bitrate'][realtime_dataset['iteration'] == iter + 1].values.tolist()
+ data_set_in_graph.append(video_streaming_values_list)
+
+ # Trim the data in data_set_in_graph and append to trimmed_data_set_in_graph
+ for _ in range(len(data_set_in_graph)):
+ trimmed_data_set_in_graph.append(self.vs_obj_dict[ce][obj_name]["obj"].trim_data(len(data_set_in_graph[_]), data_set_in_graph[_]))
+
+ # If there are multiple incremental values, add custom HTML content to the report for the current iteration
+ if len(created_incremental_values) > 1:
+ self.overall_report.set_custom_html(f"Iteration-{iter + 1}
")
+ self.overall_report.build_custom()
+
+ self.overall_report.set_obj_html(
+ _obj_title=f"Realtime Video Rate: Number of devices running: {len(device_names_on_running)}",
+ _obj="")
+ self.overall_report.build_objective()
+
+ # Create a line graph for video rate over time
+ graph = lf_line_graph(_data_set=trimmed_data_set_in_graph,
+ _xaxis_name="Time",
+ _yaxis_name="Video Rate (Mbps)",
+ _xaxis_categories=self.vs_obj_dict[ce][obj_name]["obj"].trim_data(len(realtime_dataset['timestamp'][realtime_dataset['iteration'] == iter + 1].values.tolist()),
+ realtime_dataset['timestamp'][realtime_dataset['iteration'] == iter + 1].values.tolist()),
+ _label=['Rate'],
+ _graph_image_name=f"vs_line_graph{iter}{obj_no}"
+ )
+ graph_png = graph.build_line_graph()
+ logger.info("graph name {}".format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+
+ self.overall_report.build_graph()
+
+ # Define figure size for horizontal bar graphs
+ x_fig_size = 15
+ y_fig_size = len(devices_on_running_state) * .5 + 4
+
+ self.overall_report.set_obj_html(
+ _obj_title="Total Urls Per Device",
+ _obj="")
+ self.overall_report.build_objective()
+ # Create a horizontal bar graph for total URLs per device
+ graph = lf_bar_graph_horizontal(_data_set=[total_urls[:created_incremental_values[iter]]],
+ _xaxis_name="Total Urls",
+ _yaxis_name="Devices",
+ _graph_image_name=f"total_urls_image_name{iter}{obj_no}",
+ _label=["Total Urls"],
+ _yaxis_categories=device_names_on_running,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size)
+ # _color=['lightcoral']
+ )
+ graph_png = graph.build_bar_graph_horizontal()
+ logger.info("wait time graph name {}".format(graph_png))
+ graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_obj_html(
+ _obj_title="Max/Min Video Rate Per Device",
+ _obj="")
+ self.overall_report.build_objective()
+
+ # Create a horizontal bar graph for max and min video rates per device
+ graph = lf_bar_graph_horizontal(_data_set=[max_video_rate, min_video_rate],
+ _xaxis_name="Max/Min Video Rate(Mbps)",
+ _yaxis_name="Devices",
+ _graph_image_name=f"max-min-video-rate_image_name{iter}{obj_no}",
+ _label=['Max Video Rate', 'Min Video Rate'],
+ _yaxis_categories=device_names_on_running,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size)
+ # _color=['lightcoral']
+ )
+ graph_png = graph.build_bar_graph_horizontal()
+ logger.info("max/min graph name {}".format(graph_png))
+ graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_obj_html(
+ _obj_title="Wait Time Per Device",
+ _obj="")
+ self.overall_report.build_objective()
+
+ # Create a horizontal bar graph for wait time per device
+ graph = lf_bar_graph_horizontal(_data_set=devices_data_to_create_wait_time_bar_graph,
+ _xaxis_name="Wait Time(seconds)",
+ _yaxis_name="Devices",
+ _graph_image_name=f"wait_time_image_name{iter}{obj_no}",
+ _label=['Wait Time'],
+ _yaxis_categories=device_names_on_running,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size)
+ # _color=['lightcoral']
+ )
+ graph_png = graph.build_bar_graph_horizontal()
+ logger.info("wait time graph name {}".format(graph_png))
+ graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ if self.vs_obj_dict[ce][obj_name]["obj"].dowebgui and self.vs_obj_dict[ce][obj_name]["obj"].get_live_view:
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+
+ self.overall_report.set_custom_html("No of Buffers and Wait Time %
")
+ self.overall_report.build_custom()
+
+ for floor in range(int(self.vs_obj_dict[ce][obj_name]["obj"].floors)):
+ # Construct expected image paths
+ vs_buffer_image = os.path.join(script_dir, "heatmap_images", f"{self.vs_obj_dict[ce][obj_name]['obj'].test_name}_vs_buffer_{floor+1}.png")
+ vs_wait_time_image = os.path.join(script_dir, "heatmap_images", f"{self.vs_obj_dict[ce][obj_name]['obj'].test_name}_vs_wait_time_{floor+1}.png")
+
+
+ # Wait for all required images to be generated (up to timeout)
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(vs_buffer_image) and os.path.exists(vs_wait_time_image)):
+ if time.time() - start_time > timeout:
+ print(f"Timeout: Heatmap images for floor {floor + 1} not found within {timeout} seconds.")
+ break
+ time.sleep(1)
+
+ # Generate report sections for each image if it exists
+ for image_path in [vs_buffer_image, vs_wait_time_image,]:
+ if os.path.exists(image_path):
+ self.overall_report.set_custom_html(f'
')
+ self.overall_report.build_custom()
+
+ # Table 1
+ self.overall_report.set_obj_html("Overall - Detailed Result Table", "The below tables provides detailed information for the Video Streaming test.")
+ self.overall_report.build_objective()
+ test_data = {
+ "iter": iter,
+ "created_incremental_values": created_incremental_values,
+ "device_type": device_type,
+ "username": username,
+ "ssid": ssid,
+ "mac": mac,
+ "channel": channel,
+ "mode": mode,
+ "total_buffer": total_buffer,
+ "wait_time_data": wait_time_data,
+ "min_video_rate": min_video_rate,
+ "avg_video_rate": avg_video_rate,
+ "max_video_rate": max_video_rate,
+ "total_urls": total_urls,
+ "total_err": total_err,
+ "rssi_data": rssi_data,
+ "tx_rate": tx_rate,
+ "max_bytes_rd_list": max_bytes_rd_list,
+ "avg_rx_rate_list": avg_rx_rate_list
+ }
+
+ dataframe = self.vs_obj_dict[ce][obj_name]["obj"].handle_passfail_criteria(test_data)
+
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ # Set and build title for the overall results table
+ self.overall_report.set_obj_html("Detailed Total Errors Table", "The below tables provides detailed information of total errors for the web browsing test.")
+ self.overall_report.build_objective()
+ dataframe2 = {
+ " DEVICE": username[:created_incremental_values[iter]],
+ " TOTAL ERRORS ": total_err[:created_incremental_values[iter]],
+ }
+ dataframe3 = pd.DataFrame(dataframe2)
+ self.overall_report.set_table_dataframe(dataframe3)
+ self.overall_report.build_table()
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"vs_test_{obj_no}"
+ else:
+ break
+
+ elif test_name =="rb_test":
+ obj_no=1
+ obj_name = "rb_test"
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.rb_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ self.overall_report.set_obj_html(_obj_title=f'Real Browser Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ self.overall_report.set_table_title("Test Parameters:")
+ self.overall_report.build_table_title()
+
+ final_eid_data = []
+ mac_data = []
+ channel_data = []
+ signal_data = []
+ ssid_data = []
+ tx_rate_data = []
+ device_type_data = []
+ device_names = []
+ total_urls = []
+ time_to_target_urls = []
+ uc_min_data = []
+ uc_max_data = []
+ uc_avg_data = []
+ total_err_data = []
+
+ csv_paths = self.rb_obj_dict[ce][obj_name]["obj"].report_path_date_time if not self.dowebgui else self.result_dir
+ final_eid_data, mac_data, channel_data, signal_data, ssid_data, tx_rate_data, device_names, device_type_data = self.rb_obj_dict[ce][obj_name]["obj"].extract_device_data('{}/real_time_data.csv'.format(csv_paths))
+
+ test_setup_info = self.rb_obj_dict[ce][obj_name]["obj"].generate_test_setup_info()
+ self.overall_report.test_setup_table(
+ test_setup_data=test_setup_info, value='Test Parameters')
+ self.rb_obj_dict[ce][obj_name]["obj"].csv_file_names
+ for i in range(0, len(self.rb_obj_dict[ce][obj_name]["obj"].csv_file_names)):
+ if self.rb_obj_dict[ce][obj_name]["obj"].csv_file_names[i].startswith("real_time_data.csv"):
+ continue
+
+ final_eid_data, mac_data, channel_data, signal_data, ssid_data, tx_rate_data, device_names, device_type_data = self.rb_obj_dict[ce][obj_name]["obj"].extract_device_data("{}/{}".format(csv_paths,self.rb_obj_dict[ce][obj_name]["obj"].csv_file_names[i]))
+ self.overall_report.set_graph_title("Successful URL's per Device")
+ self.overall_report.build_graph_title()
+
+ data = pd.read_csv("{}/{}".format(csv_paths,self.rb_obj_dict[ce][obj_name]["obj"].csv_file_names[i]))
+
+ # Extract device names from CSV
+ if 'total_urls' in data.columns:
+ total_urls = data['total_urls'].tolist()
+ else:
+ raise ValueError("The 'total_urls' column was not found in the CSV file.")
+
+ x_fig_size = 18
+ y_fig_size = len(device_type_data) * 1 + 4
+ print('DEVICE NAMES',device_names)
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=[total_urls],
+ _xaxis_name="URL",
+ _yaxis_name="Devices",
+ _yaxis_label=device_names,
+ _yaxis_categories=device_names,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="URLs",
+ _graph_image_name=f"{self.rb_obj_dict[ce][obj_name]['obj'].csv_file_names[i]}_urls_per_device{obj_no}",
+ _label=["URLs"]
+ )
+ # print('yaxssss)
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_graph_title(f"Time Taken Vs Device For Completing {self.rb_obj_dict[ce][obj_name]['obj'].count} RealTime URLs")
+ self.overall_report.build_graph_title()
+
+ # Extract device names from CSV
+ if 'time_to_target_urls' in data.columns:
+ time_to_target_urls = data['time_to_target_urls'].tolist()
+ else:
+ raise ValueError("The 'time_to_target_urls' column was not found in the CSV file.")
+
+ x_fig_size = 18
+ y_fig_size = len(device_type_data) * 1 + 4
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=[time_to_target_urls],
+ _xaxis_name="Time (in Seconds)",
+ _yaxis_name="Devices",
+ _yaxis_label=device_names,
+ _yaxis_categories=device_names,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="Time Taken",
+ _graph_image_name=f"{self.rb_obj_dict[ce][obj_name]['obj'].csv_file_names[i]}_time_taken_for_urls{obj_no}",
+ _label=["Time (in sec)"]
+ )
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ if 'uc_min' in data.columns:
+ uc_min_data = data['uc_min'].tolist()
+ else:
+ raise ValueError("The 'uc_min' column was not found in the CSV file.")
+
+ if 'uc_max' in data.columns:
+ uc_max_data = data['uc_max'].tolist()
+ else:
+ raise ValueError("The 'uc_max' column was not found in the CSV file.")
+
+ if 'uc_avg' in data.columns:
+ uc_avg_data = data['uc_avg'].tolist()
+ else:
+ raise ValueError("The 'uc_avg' column was not found in the CSV file.")
+
+ if 'total_err' in data.columns:
+ total_err_data = data['total_err'].tolist()
+ else:
+ raise ValueError("The 'total_err' column was not found in the CSV file.")
+
+ self.overall_report.set_table_title("Final Test Results")
+ self.overall_report.build_table_title()
+ if self.rb_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.rb_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ pass_fail_list, test_input_list = self.rb_obj_dict[ce][obj_name]["obj"].generate_pass_fail_list(device_type_data, device_names, total_urls)
+
+ final_test_results = {
+
+ "Device Type": device_type_data,
+ "Hostname": device_names,
+ "SSID": ssid_data,
+ "MAC": mac_data,
+ "Channel": channel_data,
+ "UC-MIN (ms)": uc_min_data,
+ "UC-MAX (ms)": uc_max_data,
+ "UC-AVG (ms)": uc_avg_data,
+ "Total Successful URLs": total_urls,
+ "Expected URLS": test_input_list,
+ "Total Erros": total_err_data,
+ "RSSI": signal_data,
+ "Link Speed": tx_rate_data,
+ "Status ": pass_fail_list
+
+ }
+ else:
+ final_test_results = {
+
+ "Device Type": device_type_data,
+ "Hostname": device_names,
+ "SSID": ssid_data,
+ "MAC": mac_data,
+ "Channel": channel_data,
+ "UC-MIN (ms)": uc_min_data,
+ "UC-MAX (ms)": uc_max_data,
+ "UC-AVG (ms)": uc_avg_data,
+ "Total Successful URLs": total_urls,
+ "Total Erros": total_err_data,
+ "RSSI": signal_data,
+ "Link Speed": tx_rate_data,
+
+ }
+ logger.info(f"dataframe realbrowser {final_test_results}")
+ test_results_df = pd.DataFrame(final_test_results)
+ self.overall_report.set_table_dataframe(test_results_df)
+ self.overall_report.build_table()
+
+ if self.rb_obj_dict[ce][obj_name]["obj"].dowebgui:
+
+ os.chdir(self.rb_obj_dict[ce][obj_name]["obj"].original_dir)
+
+ # self.overall_report.build_custom()
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"rb_test_{obj_no}"
+ else:
+ break
+
+ elif test_name == "yt_test":
+ obj_no=1
+ obj_name = "yt_test"
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.yt_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ result_data = self.yt_obj_dict[ce][obj_name]["obj"].stats_api_response
+ for device, stats in result_data.items():
+ self.yt_obj_dict[ce][obj_name]["obj"].mydatajson.setdefault(device, {}).update({
+ "Viewport": stats.get("Viewport", ""),
+ "DroppedFrames": stats.get("DroppedFrames", "0"),
+ "TotalFrames": stats.get("TotalFrames", "0"),
+ "CurrentRes": stats.get("CurrentRes", ""),
+ "OptimalRes": stats.get("OptimalRes", ""),
+ "BufferHealth": stats.get("BufferHealth", "0.0"),
+ "Timestamp": stats.get("Timestamp", ""),
+ })
+
+ if self.yt_obj_dict[ce][obj_name]["obj"].config:
+
+ # Test setup info
+ test_setup_info = {
+ 'Test Name': 'YouTube Streaming Test',
+ 'Duration (in Minutes)': self.yt_obj_dict[ce][obj_name]["obj"].duration,
+ 'Resolution': self.yt_obj_dict[ce][obj_name]["obj"].resolution,
+ 'Configured Devices': self.yt_obj_dict[ce][obj_name]["obj"].hostname_os_combination,
+ 'No of Devices :': f' Total({len(self.yt_obj_dict[ce][obj_name]["obj"].real_sta_os_types)}) : W({self.yt_obj_dict[ce][obj_name]["obj"].windows}),L({self.yt_obj_dict[ce][obj_name]["obj"].linux}),M({self.yt_obj_dict[ce][obj_name]["obj"].mac})',
+ "Video URL": self.yt_obj_dict[ce][obj_name]["obj"].url,
+ "SSID": self.yt_obj_dict[ce][obj_name]["obj"].ssid,
+ "Security": self.yt_obj_dict[ce][obj_name]["obj"].security,
+
+ }
+
+ elif len(self.yt_obj_dict[ce][obj_name]["obj"].selected_groups) > 0 and len(self.yt_obj_dict[ce][obj_name]["obj"].selected_profiles) > 0:
+ gp_pairs = zip(self.yt_obj_dict[ce][obj_name]["obj"].selected_groups, self.yt_obj_dict[ce][obj_name]["obj"].selected_profiles)
+ gp_map = ", ".join(f"{group} -> {profile}" for group, profile in gp_pairs)
+
+ # Test setup info
+ test_setup_info = {
+ 'Test Name': 'YouTube Streaming Test',
+ 'Duration (in Minutes)': self.yt_obj_dict[ce][obj_name]["obj"].duration,
+ 'Resolution': self.yt_obj_dict[ce][obj_name]["obj"].resolution,
+ "Configuration": gp_map,
+ 'Configured Devices': self.yt_obj_dict[ce][obj_name]["obj"].hostname_os_combination,
+ 'No of Devices :': f' Total({len(self.yt_obj_dict[ce][obj_name]["obj"].real_sta_os_types)}) : W({self.yt_obj_dict[ce][obj_name]["obj"].windows}),L({self.yt_obj_dict[ce][obj_name]["obj"].linux}),M({self.yt_obj_dict[ce][obj_name]["obj"].mac})',
+ "Video URL": self.yt_obj_dict[ce][obj_name]["obj"].url,
+
+ }
+ else:
+ # Test setup info
+ test_setup_info = {
+ 'Test Name': 'YouTube Streaming Test',
+ 'Duration (in Minutes)': self.yt_obj_dict[ce][obj_name]["obj"].duration,
+ 'Resolution': self.yt_obj_dict[ce][obj_name]["obj"].resolution,
+ 'Configured Devices': self.yt_obj_dict[ce][obj_name]["obj"].hostname_os_combination,
+ 'No of Devices :': f' Total({len(self.yt_obj_dict[ce][obj_name]["obj"].real_sta_os_types)}) : W({self.yt_obj_dict[ce][obj_name]["obj"].windows}),L({self.yt_obj_dict[ce][obj_name]["obj"].linux}),M({self.yt_obj_dict[ce][obj_name]["obj"].mac})',
+ "Video URL": self.yt_obj_dict[ce][obj_name]["obj"].url,
+
+ }
+ self.overall_report.set_obj_html(_obj_title=f'Youtube Streaming Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ self.overall_report.test_setup_table(
+ test_setup_data=test_setup_info, value='Test Parameters')
+
+ viewport_list = []
+ current_res_list = []
+ optimal_res_list = []
+
+ dropped_frames_list = []
+ total_frames_list = []
+ max_buffer_health_list = []
+ min_buffer_health_list = []
+
+ for hostname in self.yt_obj_dict[ce][obj_name]["obj"].real_sta_hostname:
+ if hostname in self.yt_obj_dict[ce][obj_name]["obj"].mydatajson:
+ stats = self.yt_obj_dict[ce][obj_name]["obj"].mydatajson[hostname]
+ viewport_list.append(stats.get("Viewport", ""))
+ current_res_list.append(stats.get("CurrentRes", ""))
+ optimal_res_list.append(stats.get("OptimalRes", ""))
+
+ dropped_frames = stats.get("DroppedFrames", "0")
+ total_frames = stats.get("TotalFrames", "0")
+ max_buffer_health = stats.get("maxbufferhealth", "0,0")
+ min_buffer_health = stats.get("minbufferhealth", "0.0")
+ try:
+ dropped_frames_list.append(int(dropped_frames))
+ except ValueError:
+ dropped_frames_list.append(0)
+
+ try:
+ total_frames_list.append(int(total_frames))
+ except ValueError:
+ total_frames_list.append(0)
+ try:
+ max_buffer_health_list.append(float(max_buffer_health))
+ except ValueError:
+ max_buffer_health_list.append(0.0)
+
+ try:
+ min_buffer_health_list.append(float(min_buffer_health))
+ except ValueError:
+ min_buffer_health_list.append(0.0)
+
+ else:
+ viewport_list.append("NA")
+ current_res_list.append("NA")
+ optimal_res_list.append("NA")
+ dropped_frames_list.append(0)
+ total_frames_list.append(0)
+ max_buffer_health_list.append(0.0)
+ min_buffer_health_list.append(0.0)
+
+ # graph of frames dropped
+ self.overall_report.set_graph_title("Total Frames vs Frames dropped")
+ self.overall_report.build_graph_title()
+ x_fig_size = 25
+ y_fig_size = len(self.yt_obj_dict[ce][obj_name]["obj"].device_names) * .5 + 4
+
+ graph = lf_bar_graph_horizontal(_data_set=[dropped_frames_list, total_frames_list],
+ _xaxis_name="No of Frames",
+ _yaxis_name="Devices",
+ _yaxis_categories=self.yt_obj_dict[ce][obj_name]["obj"].real_sta_hostname,
+ _graph_image_name=f"Dropped Frames vs Total Frames{obj_no}",
+ _label=["dropped Frames", "Total Frames"],
+ _color=None,
+ _color_edge='red',
+ _figsize=(x_fig_size, y_fig_size),
+ _show_bar_value=True,
+ _text_font=6,
+ _text_rotation=True,
+ _enable_csv=True,
+ _legend_loc="upper right",
+ _legend_box=(1.1, 1),
+ )
+ graph_image = graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_table_title('Test Results')
+ self.overall_report.build_table_title()
+
+ test_results = {
+ "Hostname": self.yt_obj_dict[ce][obj_name]["obj"].real_sta_hostname,
+ "OS Type": self.yt_obj_dict[ce][obj_name]["obj"].real_sta_os_types,
+ "MAC": self.yt_obj_dict[ce][obj_name]["obj"].mac_list,
+ "RSSI": self.yt_obj_dict[ce][obj_name]["obj"].rssi_list,
+ "Link Rate": self.yt_obj_dict[ce][obj_name]["obj"].link_rate_list,
+ "ViewPort": viewport_list,
+ "SSID": self.yt_obj_dict[ce][obj_name]["obj"].ssid_list,
+ "Video Resoultion": current_res_list,
+ "Max Buffer Health (Seconds)": max_buffer_health_list,
+ "Min Buffer health (Seconds)": min_buffer_health_list,
+ "Total Frames": total_frames_list,
+ "Dropped Frames": dropped_frames_list,
+
+
+ }
+
+ test_results_df = pd.DataFrame(test_results)
+ self.overall_report.set_table_dataframe(test_results_df)
+ self.overall_report.build_table()
+
+ # for file_path in self.yt_obj_dict[ce][obj_name]["obj"].devices_list:
+ # self.yt_obj_dict[ce][obj_name]["obj"].move_files(file_path, self.yt_obj_dict[ce][obj_name]["obj"].report_path_date_time)
+
+ original_dir = os.getcwd()
+
+ if self.yt_obj_dict[ce][obj_name]["obj"].do_webUI:
+ csv_files = [f for f in os.listdir(self.yt_obj_dict[ce][obj_name]["obj"].report_path_date_time) if f.endswith('.csv')]
+ os.chdir(self.yt_obj_dict[ce][obj_name]["obj"].report_path_date_time)
+ else:
+ csv_files = [f for f in os.listdir(self.yt_obj_dict[ce][obj_name]["obj"].report_path_date_time) if f.endswith('.csv')]
+ os.chdir(self.yt_obj_dict[ce][obj_name]["obj"].report_path_date_time)
+ print("CSV FILES",csv_files)
+ print("Script Directory:", os.path.dirname(os.path.abspath(__file__)))
+ scp_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),self.report_path_date_time)
+ for file_name in csv_files:
+ data = pd.read_csv(file_name)
+ print('dataaaaaaaaaaaaa',data)
+ self.overall_report.set_graph_title('Buffer Health vs Time Graph for {}'.format(file_name.split('_')[0]))
+ self.overall_report.build_graph_title()
+
+ try:
+ data['TimeStamp'] = pd.to_datetime(data['TimeStamp'], format="%H:%M:%S").dt.time
+ except Exception as e:
+ logging.error(f"Error in timestamp conversion for {file_name}: {e}")
+ continue
+
+ data = data.drop_duplicates(subset='TimeStamp', keep='first')
+
+ data = data.sort_values(by='TimeStamp')
+
+ timestamps = data['TimeStamp'].apply(lambda t: t.strftime('%H:%M:%S'))
+ buffer_health = data['BufferHealth']
+
+ fig, ax = plt.subplots(figsize=(20, 10))
+ plt.plot(timestamps, buffer_health, color='blue', linewidth=2)
+
+ # Customize the plot
+ plt.xlabel('Time', fontweight='bold', fontsize=15)
+ plt.ylabel('Buffer Health', fontweight='bold', fontsize=15)
+ plt.title('Buffer Health vs Time Graph for {}'.format(file_name.split('_')[0]), fontsize=18)
+
+ if len(timestamps) > 30:
+ tick_interval = len(timestamps) // 30
+ selected_ticks = timestamps[::tick_interval]
+ ax.set_xticks(selected_ticks)
+ else:
+ ax.set_xticks(timestamps)
+
+ plt.xticks(rotation=45, ha='right')
+
+ # output_file = '{}'.format(file_name.split('_')[0]) + 'buffer_health_vs_time.png'
+ output_file = os.path.join(scp_path,f"{file_name.split('_')[0]}buffer_health_vs_time{obj_no}.png")
+ plt.tight_layout()
+ plt.savefig(output_file, dpi=96)
+ plt.close()
+ abs_path = os.path.abspath(output_file)
+ logging.info(f"Graph saved PATH {file_name}: {abs_path}")
+
+ logging.info(f"Graph saved for {file_name}: {output_file}")
+
+ self.overall_report.set_graph_image(output_file)
+
+ self.overall_report.build_graph()
+
+ os.chdir(original_dir)
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"yt_test_{obj_no}"
+ else:
+ break
+
+ elif test_name == "zoom_test":
+ obj_no=1
+ obj_name = "zoom_test"
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.zoom_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ self.overall_report.set_obj_html(_obj_title=f'ZOOM Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ self.overall_report.set_table_title("Test Parameters:")
+ self.overall_report.build_table_title()
+ testtype = ""
+ if self.zoom_obj_dict[ce][obj_name]["obj"].audio and self.zoom_obj_dict[ce][obj_name]["obj"].video:
+ testtype = "AUDIO & VIDEO"
+ elif self.zoom_obj_dict[ce][obj_name]["obj"].audio:
+ testtype = "AUDIO"
+ elif self.zoom_obj_dict[ce][obj_name]["obj"].video:
+ testtype = "VIDEO"
+
+ if self.zoom_obj_dict[ce][obj_name]["obj"].config:
+ test_parameters = pd.DataFrame([{
+ "Configured Devices": self.zoom_obj_dict[ce][obj_name]["obj"].hostname_os_combination,
+ 'No of Clients': f'W({self.zoom_obj_dict[ce][obj_name]["obj"].windows}),L({self.zoom_obj_dict[ce][obj_name]["obj"].linux}),M({self.zoom_obj_dict[ce][obj_name]["obj"].mac})',
+ 'Test Duration(min)': self.zoom_obj_dict[ce][obj_name]["obj"].duration,
+ 'EMAIL ID': self.zoom_obj_dict[ce][obj_name]["obj"].signin_email,
+ "PASSWORD": self.zoom_obj_dict[ce][obj_name]["obj"].signin_passwd,
+ "HOST": self.zoom_obj_dict[ce][obj_name]["obj"].real_sta_list[0],
+ "TEST TYPE": testtype,
+ "SSID": self.zoom_obj_dict[ce][obj_name]["obj"].ssid,
+ "Security": self.zoom_obj_dict[ce][obj_name]["obj"].security
+
+ }])
+ elif len(self.zoom_obj_dict[ce][obj_name]["obj"].selected_groups) > 0 and len(self.zoom_obj_dict[ce][obj_name]["obj"].selected_profiles) > 0:
+ # Map each group with a profile
+ gp_pairs = zip(self.zoom_obj_dict[ce][obj_name]["obj"].selected_groups, self.zoom_obj_dict[ce][obj_name]["obj"].selected_profiles)
+
+ # Create a string by joining the mapped pairs
+ gp_map = ", ".join(f"{group} -> {profile}" for group, profile in gp_pairs)
+
+ test_parameters = pd.DataFrame([{
+ "Configuration": gp_map,
+ "Configured Devices": self.zoom_obj_dict[ce][obj_name]["obj"].hostname_os_combination,
+ 'No of Clients': f'W({self.zoom_obj_dict[ce][obj_name]["obj"].windows}),L({self.zoom_obj_dict[ce][obj_name]["obj"].linux}),M({self.zoom_obj_dict[ce][obj_name]["obj"].mac})',
+ 'Test Duration(min)': self.zoom_obj_dict[ce][obj_name]["obj"].duration,
+ 'EMAIL ID': self.zoom_obj_dict[ce][obj_name]["obj"].signin_email,
+ "PASSWORD": self.zoom_obj_dict[ce][obj_name]["obj"].signin_passwd,
+ "HOST": self.zoom_obj_dict[ce][obj_name]["obj"].real_sta_list[0],
+ "TEST TYPE": testtype,
+
+ }])
+ else:
+
+ test_parameters = pd.DataFrame([{
+ "Configured Devices": self.zoom_obj_dict[ce][obj_name]["obj"].hostname_os_combination,
+ 'No of Clients': f'W({self.zoom_obj_dict[ce][obj_name]["obj"].windows}),L({self.zoom_obj_dict[ce][obj_name]["obj"].linux}),M({self.zoom_obj_dict[ce][obj_name]["obj"].mac})',
+ 'Test Duration(min)': self.zoom_obj_dict[ce][obj_name]["obj"].duration,
+ 'EMAIL ID': self.zoom_obj_dict[ce][obj_name]["obj"].signin_email,
+ "PASSWORD": self.zoom_obj_dict[ce][obj_name]["obj"].signin_passwd,
+ "HOST": self.zoom_obj_dict[ce][obj_name]["obj"].real_sta_list[0],
+ "TEST TYPE": testtype,
+
+ }])
+
+ test_parameters = pd.DataFrame([{
+
+ 'No of Clients': f'W({self.zoom_obj_dict[ce][obj_name]["obj"].windows}),L({self.zoom_obj_dict[ce][obj_name]["obj"].linux}),M({self.zoom_obj_dict[ce][obj_name]["obj"].mac})',
+ 'Test Duration(min)': self.zoom_obj_dict[ce][obj_name]["obj"].duration,
+ 'EMAIL ID': self.zoom_obj_dict[ce][obj_name]["obj"].signin_email,
+ "PASSWORD": self.zoom_obj_dict[ce][obj_name]["obj"].signin_passwd,
+ "HOST": self.zoom_obj_dict[ce][obj_name]["obj"].real_sta_list[0],
+ "TEST TYPE": testtype
+
+ }])
+ self.overall_report.set_table_dataframe(test_parameters)
+ self.overall_report.build_table()
+
+ client_array = []
+ accepted_clients = []
+ no_csv_client = []
+ rejected_clients = []
+ final_dataset = []
+ accepted_ostypes = []
+ max_audio_jitter_s, min_audio_jitter_s = [], []
+ max_audio_jitter_r, min_audio_jitter_r = [], []
+ max_audio_latency_s, min_audio_latency_s = [], []
+ max_audio_latency_r, min_audio_latency_r = [], []
+ max_audio_pktloss_s, min_audio_pktloss_s = [], []
+ max_audio_pktloss_r, min_audio_pktloss_r = [], []
+
+ max_video_jitter_s, min_video_jitter_s = [], []
+ max_video_jitter_r, min_video_jitter_r = [], []
+ max_video_latency_s, min_video_latency_s = [], []
+ max_video_latency_r, min_video_latency_r = [], []
+ max_video_pktloss_s, min_video_pktloss_s = [], []
+ max_video_pktloss_r, min_video_pktloss_r = [], []
+ for i in range(0, len(self.zoom_obj_dict[ce][obj_name]["obj"].device_names)):
+ temp_max_audio_jitter_s, temp_min_audio_jitter_s = 0.0, 0.0
+ temp_max_audio_jitter_r, temp_min_audio_jitter_r = 0.0, 0.0
+ temp_max_audio_latency_s, temp_min_audio_latency_s = 0.0, 0.0
+ temp_max_audio_latency_r, temp_min_audio_latency_r = 0.0, 0.0
+ temp_max_audio_pktloss_s, temp_min_audio_pktloss_s = 0.0, 0.0
+ temp_max_audio_pktloss_r, temp_min_audio_pktloss_r = 0.0, 0.0
+
+ temp_max_video_jitter_s, temp_min_video_jitter_s = 0.0, 0.0
+ temp_max_video_jitter_r, temp_min_video_jitter_r = 0.0, 0.0
+ temp_max_video_latency_s, temp_min_video_latency_s = 0.0, 0.0
+ temp_max_video_latency_r, temp_min_video_latency_r = 0.0, 0.0
+ temp_max_video_pktloss_s, temp_min_video_pktloss_s = 0.0, 0.0
+ temp_max_video_pktloss_r, temp_min_video_pktloss_r = 0.0, 0.0
+ per_client_data = {
+ "audio_jitter_s": [],
+ "audio_jitter_r": [],
+ "audio_latency_s": [],
+ "audio_latency_r": [],
+ "audio_pktloss_s": [],
+ "audio_pktloss_r": [],
+ "video_jitter_s": [],
+ "video_jitter_r": [],
+ "video_latency_s": [],
+ "video_latency_r": [],
+ "video_pktloss_s": [],
+ "video_pktloss_r": [],
+ }
+ try:
+ file_path = os.path.join(self.zoom_obj_dict[ce][obj_name]["obj"].report_path_date_time, f'{self.zoom_obj_dict[ce][obj_name]["obj"].device_names[i]}.csv')
+ with open(file_path, mode='r', encoding='utf-8', errors='ignore') as file:
+ csv_reader = csv.DictReader(file)
+ for row in csv_reader:
+
+ per_client_data["audio_jitter_s"].append(float(row["Sent Audio Jitter (ms)"]))
+ per_client_data["audio_jitter_r"].append(float(row["Receive Audio Jitter (ms)"]))
+ per_client_data["audio_latency_s"].append(float(row["Sent Audio Latency (ms)"]))
+ per_client_data["audio_latency_r"].append(float(row["Receive Audio Latency (ms)"]))
+ per_client_data["audio_pktloss_s"].append(float((row["Sent Audio Packet loss (%)"]).split(" ")[0].replace("%", "")))
+ per_client_data["audio_pktloss_r"].append(float((row["Receive Audio Packet loss (%)"]).split(" ")[0].replace("%", "")))
+ per_client_data["video_jitter_s"].append(float(row["Sent Video Jitter (ms)"]))
+ per_client_data["video_jitter_r"].append(float(row["Receive Video Jitter (ms)"]))
+ per_client_data["video_latency_s"].append(float(row["Sent Video Latency (ms)"]))
+ per_client_data["video_latency_r"].append(float(row["Receive Video Latency (ms)"]))
+ per_client_data["video_pktloss_s"].append(float((row["Sent Video Packet loss (%)"]).split(" ")[0].replace("%", "")))
+ per_client_data["video_pktloss_r"].append(float((row["Receive Video Packet loss (%)"]).split(" ")[0].replace("%", "")))
+
+ temp_max_audio_jitter_s = max(temp_max_audio_jitter_s, float(row["Sent Audio Jitter (ms)"]))
+ temp_max_audio_jitter_r = max(temp_max_audio_jitter_r, float(row["Receive Audio Jitter (ms)"]))
+ temp_max_audio_latency_s = max(temp_max_audio_latency_s, float(row["Sent Audio Latency (ms)"]))
+ temp_max_audio_latency_r = max(temp_max_audio_latency_r, float(row["Receive Audio Latency (ms)"]))
+ temp_max_audio_pktloss_s = max(temp_max_audio_pktloss_s, float((row["Sent Audio Packet loss (%)"]).split(" ")[0].replace("%", "")))
+ temp_max_audio_pktloss_r = max(temp_max_audio_pktloss_r, float((row["Receive Audio Packet loss (%)"]).split(" ")[0].replace("%", "")))
+
+ temp_max_video_jitter_s = max(temp_max_video_jitter_s, float(row["Sent Video Jitter (ms)"]))
+ temp_max_video_jitter_r = max(temp_max_video_jitter_r, float(row["Receive Video Jitter (ms)"]))
+ temp_max_video_latency_s = max(temp_max_video_latency_s, float(row["Sent Video Latency (ms)"]))
+ temp_max_video_latency_r = max(temp_max_video_latency_r, float(row["Receive Video Latency (ms)"]))
+ temp_max_video_pktloss_s = max(temp_max_video_pktloss_s, float((row["Sent Video Packet loss (%)"]).split(" ")[0].replace("%", "")))
+ temp_max_video_pktloss_r = max(temp_max_video_pktloss_r, float((row["Receive Video Packet loss (%)"]).split(" ")[0].replace("%", "")))
+
+ temp_min_audio_jitter_s = min(
+ temp_min_audio_jitter_s,
+ float(
+ row["Sent Audio Jitter (ms)"])) if temp_min_audio_jitter_s > 0 and float(
+ row["Sent Audio Jitter (ms)"]) > 0 else (
+ float(
+ row["Sent Audio Jitter (ms)"]) if float(
+ row["Sent Audio Jitter (ms)"]) > 0 else temp_min_audio_jitter_s)
+ temp_min_audio_jitter_r = min(
+ temp_min_audio_jitter_r, float(
+ row["Receive Audio Jitter (ms)"])) if temp_min_audio_jitter_r > 0 and float(
+ row["Receive Audio Jitter (ms)"]) > 0 else (
+ float(
+ row["Receive Audio Jitter (ms)"]) if float(
+ row["Receive Audio Jitter (ms)"]) > 0 else temp_min_audio_jitter_r)
+ temp_min_audio_latency_s = min(
+ temp_min_audio_latency_s, float(
+ row["Sent Audio Latency (ms)"])) if temp_min_audio_latency_s > 0 and float(
+ row["Sent Audio Latency (ms)"]) > 0 else (
+ float(
+ row["Sent Audio Latency (ms)"]) if float(
+ row["Sent Audio Latency (ms)"]) > 0 else temp_min_audio_jitter_s)
+ temp_min_audio_latency_r = min(
+ temp_min_audio_latency_r, float(
+ row["Receive Audio Latency (ms)"])) if temp_min_audio_latency_r > 0 and float(
+ row["Receive Audio Latency (ms)"]) > 0 else (
+ float(
+ row["Receive Audio Latency (ms)"]) if float(
+ row["Receive Audio Latency (ms)"]) > 0 else temp_min_audio_jitter_r)
+
+ temp_min_audio_pktloss_s = min(
+ temp_min_audio_pktloss_s, float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", ""))) if temp_min_audio_pktloss_s > 0 and float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else (
+ float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) if float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else temp_min_audio_pktloss_s)
+ temp_min_audio_pktloss_r = min(
+ temp_min_audio_pktloss_r, float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", ""))) if temp_min_audio_pktloss_r > 0 and float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else (
+ float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) if float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else temp_min_audio_pktloss_r)
+
+ temp_min_video_jitter_s = min(
+ temp_min_video_jitter_s,
+ float(
+ row["Sent Video Jitter (ms)"])) if temp_min_video_jitter_s > 0 and float(
+ row["Sent Video Jitter (ms)"]) > 0 else (
+ float(
+ row["Sent Video Jitter (ms)"]) if float(
+ row["Sent Video Jitter (ms)"]) > 0 else temp_min_video_jitter_s)
+ temp_min_video_jitter_r = min(
+ temp_min_video_jitter_r, float(
+ row["Receive Video Jitter (ms)"])) if temp_min_video_jitter_r > 0 and float(
+ row["Receive Video Jitter (ms)"]) > 0 else (
+ float(
+ row["Receive Video Jitter (ms)"]) if float(
+ row["Receive Video Jitter (ms)"]) > 0 else temp_min_video_jitter_r)
+ temp_min_video_latency_s = min(
+ temp_min_video_latency_s, float(
+ row["Sent Video Latency (ms)"])) if temp_min_video_latency_s > 0 and float(
+ row["Sent Video Latency (ms)"]) > 0 else (
+ float(
+ row["Sent Video Latency (ms)"]) if float(
+ row["Sent Video Latency (ms)"]) > 0 else temp_min_video_latency_s)
+ temp_min_video_latency_r = min(
+ temp_min_video_latency_r, float(
+ row["Receive Video Latency (ms)"])) if temp_min_video_latency_r > 0 and float(
+ row["Receive Video Latency (ms)"]) > 0 else (
+ float(
+ row["Receive Video Latency (ms)"]) if float(
+ row["Receive Video Latency (ms)"]) > 0 else temp_min_video_latency_r)
+
+ temp_min_video_pktloss_s = min(
+ temp_min_video_pktloss_s, float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", ""))) if temp_min_video_pktloss_s > 0 and float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else (
+ float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) if float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else temp_min_video_pktloss_s)
+ temp_min_video_pktloss_r = min(
+ temp_min_video_pktloss_r, float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", ""))) if temp_min_video_pktloss_r > 0 and float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else (
+ float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) if float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else temp_min_video_pktloss_r)
+
+ except Exception as e:
+ logging.error(f"Error in reading data in client {self.zoom_obj_dict[ce][obj_name]['obj'].device_names[i]}", e)
+ no_csv_client.append(self.zoom_obj_dict[ce][obj_name]["obj"].device_names[i])
+ rejected_clients.append(self.zoom_obj_dict[ce][obj_name]["obj"].device_names[i])
+ if self.zoom_obj_dict[ce][obj_name]["obj"].device_names[i] not in no_csv_client:
+ client_array.append(self.zoom_obj_dict[ce][obj_name]["obj"].device_names[i])
+ accepted_clients.append(self.zoom_obj_dict[ce][obj_name]["obj"].device_names[i])
+ accepted_ostypes.append(self.zoom_obj_dict[ce][obj_name]["obj"].real_sta_os_type[i])
+ max_audio_jitter_s.append(temp_max_audio_jitter_s)
+ min_audio_jitter_s.append(temp_min_audio_jitter_s)
+ max_audio_jitter_r.append(temp_max_audio_jitter_r)
+ min_audio_jitter_r.append(temp_min_audio_jitter_r)
+ max_audio_latency_s.append(temp_max_audio_latency_s)
+ min_audio_latency_s.append(temp_min_audio_latency_s)
+ max_audio_latency_r.append(temp_max_audio_latency_r)
+ min_audio_latency_r.append(temp_min_audio_latency_r)
+ max_video_jitter_s.append(temp_max_video_jitter_s)
+ min_video_jitter_s.append(temp_min_video_jitter_s)
+ max_video_jitter_r.append(temp_max_video_jitter_r)
+ min_video_jitter_r.append(temp_min_video_jitter_r)
+ max_video_latency_s.append(temp_max_video_latency_s)
+ min_video_latency_s.append(temp_min_video_latency_s)
+ max_video_latency_r.append(temp_max_video_latency_r)
+ min_video_latency_r.append(temp_min_video_latency_r)
+
+ max_audio_pktloss_s.append(temp_max_audio_pktloss_s)
+ min_audio_pktloss_s.append(temp_min_audio_pktloss_s)
+ max_audio_pktloss_r.append(temp_max_audio_pktloss_r)
+ min_audio_pktloss_r.append(temp_min_audio_pktloss_r)
+ max_video_pktloss_s.append(temp_max_video_pktloss_s)
+ min_video_pktloss_s.append(temp_min_video_pktloss_s)
+ max_video_pktloss_r.append(temp_max_video_pktloss_r)
+ min_video_pktloss_r.append(temp_min_video_pktloss_r)
+
+ final_dataset.append(per_client_data.copy())
+
+ self.overall_report.set_table_title("Test Devices:")
+ self.overall_report.build_table_title()
+
+ device_details = pd.DataFrame({
+ 'Hostname': self.zoom_obj_dict[ce][obj_name]["obj"].real_sta_hostname,
+ 'OS Type': self.zoom_obj_dict[ce][obj_name]["obj"].real_sta_os_type,
+ "MAC": self.zoom_obj_dict[ce][obj_name]["obj"].mac_list,
+ "RSSI": self.zoom_obj_dict[ce][obj_name]["obj"].rssi_list,
+ "Link Rate": self.zoom_obj_dict[ce][obj_name]["obj"].link_rate_list,
+ "SSID": self.zoom_obj_dict[ce][obj_name]["obj"].ssid_list,
+
+ })
+ self.overall_report.set_table_dataframe(device_details)
+ self.overall_report.build_table()
+
+ if self.zoom_obj_dict[ce][obj_name]["obj"].audio:
+ self.overall_report.set_graph_title("Audio Latency (Sent/Received)")
+ self.overall_report.build_graph_title()
+ x_data_set = [max_audio_latency_s.copy(), min_audio_latency_s.copy(), max_audio_latency_r.copy(), min_audio_latency_r.copy()]
+ y_data_set = client_array
+
+ x_fig_size = 18
+ y_fig_size = len(client_array) * 1 + 4
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=x_data_set,
+ _xaxis_name="Latency (ms)",
+ _yaxis_name="Devices",
+ _yaxis_label=y_data_set,
+ _yaxis_categories=y_data_set,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _color_name=["yellow", "blue", "orange", "grey"],
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="Audio Latency(sent/received)",
+ _graph_image_name=f"Audio Latency(sent and received){obj_no}",
+ _label=["Max Sent", "Min Sent", "Max Recv", "Min Recv"]
+ )
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_graph_title("Audio Jitter (Sent/Received)")
+ self.overall_report.build_graph_title()
+ x_data_set = [max_audio_jitter_s.copy(), min_audio_jitter_s.copy(), max_audio_jitter_r.copy(), min_audio_jitter_r.copy()]
+ y_data_set = client_array
+
+ x_fig_size = 18
+ y_fig_size = len(client_array) * 1 + 4
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=x_data_set,
+ _xaxis_name="Jitter (ms)",
+ _yaxis_name="Devices",
+ _yaxis_label=y_data_set,
+ _yaxis_categories=y_data_set,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _color_name=["yellow", "blue", "orange", "grey"],
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="Audio Jitter(sent/received)",
+ _graph_image_name=f"Audio Jitter(sent and received) {obj_no}",
+ _label=["Max Sent", "Min Sent", "Max Recv", "Min Recv"]
+ )
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_graph_title("Audio Packet Loss (Sent/Received)")
+ self.overall_report.build_graph_title()
+ x_data_set = [max_audio_pktloss_s.copy(), min_audio_pktloss_s.copy(), max_audio_pktloss_r.copy(), min_audio_pktloss_r.copy()]
+ y_data_set = client_array
+
+ x_fig_size = 18
+ y_fig_size = len(client_array) * 1 + 4
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=x_data_set,
+ _xaxis_name="Packet Loss (%)",
+ _yaxis_name="Devices",
+ _yaxis_label=y_data_set,
+ _yaxis_categories=y_data_set,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _color_name=["yellow", "blue", "orange", "grey"],
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="Audio Packet Loss(sent/received)",
+ _graph_image_name=f"Audio Packet Loss(sent and received){obj_no}",
+ _label=["Max Sent", "Min Sent", "Max Recv", "Min Recv"]
+ )
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_table_title("Test Audio Results Table:")
+ self.overall_report.build_table_title()
+ audio_test_details = pd.DataFrame({
+ 'Device Name': [client for client in accepted_clients],
+ 'Avg Latency Sent (ms)': [round(sum(data["audio_latency_s"]) / len(data["audio_latency_s"]), 2) if len(data["audio_latency_s"]) != 0 else 0 for data in final_dataset],
+ 'Avg Latency Recv (ms)': [round(sum(data["audio_latency_r"]) / len(data["audio_latency_r"]), 2) if len(data["audio_latency_r"]) != 0 else 0 for data in final_dataset],
+ 'Avg Jitter Sent (ms)': [round(sum(data["audio_jitter_s"]) / len(data["audio_jitter_s"]), 2) if len(data["audio_jitter_s"]) != 0 else 0 for data in final_dataset],
+ 'Avg Jitter Recv (ms)': [round(sum(data["audio_jitter_r"]) / len(data["audio_jitter_r"]), 2) if len(data["audio_jitter_r"]) != 0 else 0 for data in final_dataset],
+ 'Avg Pkt Loss Sent': [round(sum(data["audio_pktloss_s"]) / len(data["audio_pktloss_s"]), 2) if len(data["audio_pktloss_s"]) != 0 else 0 for data in final_dataset],
+ 'Avg Pkt Loss Recv': [round(sum(data["audio_pktloss_r"]) / len(data["audio_pktloss_r"]), 2) if len(data["audio_pktloss_r"]) != 0 else 0 for data in final_dataset],
+ 'CSV link': ['csv data'.format(client) for client in accepted_clients]
+
+ })
+ self.overall_report.set_table_dataframe(audio_test_details)
+ self.overall_report.dataframe_html = self.overall_report.dataframe.to_html(index=False,
+ justify='center', render_links=True, escape=False) # have the index be able to be passed in.
+ self.overall_report.html += self.overall_report.dataframe_html
+ if self.zoom_obj_dict[ce][obj_name]["obj"].video:
+ self.overall_report.set_graph_title("Video Latency (Sent/Received)")
+ self.overall_report.build_graph_title()
+ x_data_set = [max_video_latency_s.copy(), min_video_latency_s.copy(), max_video_latency_r.copy(), min_video_latency_r.copy()]
+ y_data_set = client_array
+ x_fig_size = 18
+ y_fig_size = len(client_array) * 1 + 4
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=x_data_set,
+ _xaxis_name="Latency (ms)",
+ _yaxis_name="Devices",
+ _yaxis_label=y_data_set,
+ _yaxis_categories=y_data_set,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _color_name=["yellow", "blue", "orange", "grey"],
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="Video Latency(sent/received)",
+ _graph_image_name=f"Video Latency(sent and received){obj_no}",
+ _label=["Max Sent", "Min Sent", "Max Recv", "Min Recv"]
+ )
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_graph_title("Video Jitter (Sent/Received)")
+ self.overall_report.build_graph_title()
+ x_data_set = [max_video_jitter_s.copy(), min_video_jitter_s.copy(), max_video_jitter_r.copy(), min_video_jitter_r.copy()]
+ y_data_set = client_array
+ x_fig_size = 18
+ y_fig_size = len(client_array) * 1 + 4
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=x_data_set,
+ _xaxis_name="Jitter (ms)",
+ _yaxis_name="Devices",
+ _yaxis_label=y_data_set,
+ _yaxis_categories=y_data_set,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _color_name=["yellow", "blue", "orange", "grey"],
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="Video Jitter(sent/received)",
+ _graph_image_name=f"Video Jitter(sent and received){obj_no}",
+ _label=["Max Sent", "Min Sent", "Max Recv", "Min Recv"]
+ )
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_graph_title("Video Packet Loss (Sent/Received)")
+ self.overall_report.build_graph_title()
+ x_data_set = [max_video_pktloss_s.copy(), min_video_pktloss_s.copy(), max_video_pktloss_r.copy(), min_video_pktloss_r.copy()]
+ y_data_set = client_array
+ x_fig_size = 18
+ y_fig_size = len(client_array) * 1 + 4
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=x_data_set,
+ _xaxis_name="Packet Loss (%)",
+ _yaxis_name="Devices",
+ _yaxis_label=y_data_set,
+ _yaxis_categories=y_data_set,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _color_name=["yellow", "blue", "orange", "grey"],
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="Video Packet Loss(sent/received)",
+ _graph_image_name=f"Video Packet Loss(sent and received){obj_no}",
+ _label=["Max Sent", "Min Sent", "Max Recv", "Min Recv"]
+ )
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_table_title("Test Video Results Table:")
+ self.overall_report.build_table_title()
+ video_test_details = pd.DataFrame({
+ 'Device Name': [client for client in accepted_clients],
+ 'Avg Latency Sent (ms)': [round(sum(data["video_latency_s"]) / len(data["video_latency_s"]), 2) if len(data["video_latency_s"]) != 0 else 0 for data in final_dataset],
+ 'Avg Latency Recv (ms)': [round(sum(data["video_latency_r"]) / len(data["video_latency_r"]), 2) if len(data["video_latency_r"]) != 0 else 0 for data in final_dataset],
+ 'Avg Jitter Sent (ms)': [round(sum(data["video_jitter_s"]) / len(data["video_jitter_s"]), 2) if len(data["video_jitter_s"]) != 0 else 0 for data in final_dataset],
+ 'Avg Jitter Recv (ms)': [round(sum(data["video_jitter_r"]) / len(data["video_jitter_r"]), 2) if len(data["video_jitter_r"]) != 0 else 0 for data in final_dataset],
+ 'Avg Pkt Loss Sent': [round(sum(data["video_pktloss_s"]) / len(data["video_pktloss_s"]), 2) if len(data["video_pktloss_s"]) != 0 else 0 for data in final_dataset],
+ 'Avg Pkt Loss Recv': [round(sum(data["video_pktloss_r"]) / len(data["video_pktloss_r"]), 2) if len(data["video_pktloss_r"]) != 0 else 0 for data in final_dataset],
+ 'CSV link': ['csv data'.format(client) for client in accepted_clients]
+ })
+ self.overall_report.set_table_dataframe(video_test_details)
+
+ self.overall_report.dataframe_html = self.overall_report.dataframe.to_html(index=False,
+ justify='center', render_links=True, escape=False) # have the index be able to be passed in.
+ self.overall_report.html += self.overall_report.dataframe_html
+ self.overall_report.set_custom_html("
")
+ self.overall_report.build_custom()
+
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"zoom_test_{obj_no}"
+ else:
+ break
+
+ except Exception as e:
+ logger.info(f"failed to generate report for {test_name} {e}")
+
+
+ def generate_test_exc_df(self,test_results_df,args_dict):
+ series_df = {}
+ parallel_df = {}
+ if self.order_priority == "series":
+ if len(self.series_tests) != 0:
+ series_df = test_results_df[:len(self.series_tests)].copy()
+ series_df["s/no"] = range(1, len(series_df) + 1)
+ series_df = series_df[["s/no", "test_name", "Duration", "status"]]
+ if len(self.parallel_tests) != 0:
+ parallel_df = test_results_df[len(self.series_tests):].copy()
+ parallel_df["s/no"] = range(1, len(parallel_df) + 1)
+ parallel_df = parallel_df[["s/no", "test_name", "Duration", "status"]]
+ else:
+ if len(self.parallel_tests) != 0:
+ parallel_df = test_results_df[:len(self.parallel_tests)].copy()
+ parallel_df["s/no"] = range(1, len(parallel_df) + 1)
+ parallel_df = parallel_df[["s/no", "test_name", "Duration", "status"]]
+
+ if len(self.series_tests) != 0:
+ series_df = test_results_df[len(self.parallel_tests):].copy()
+ series_df["s/no"] = range(1, len(series_df) + 1)
+ series_df = series_df[["s/no", "test_name", "Duration", "status"]]
+ return series_df,parallel_df
+
+ def generate_overall_report(self,test_results_df='',args_dict={}):
+ self.overall_report = lf_report.lf_report(_results_dir_name="Base_Class_Test_Overall_report", _output_html="base_class_overall.html",
+ _output_pdf="base_class_overall.pdf", _path=self.result_path if not self.dowebgui else self.result_dir)
+ self.report_path_date_time = self.overall_report.get_path_date_time()
+ self.overall_report.set_title("Candela Base Class")
+ self.overall_report.set_date(datetime.datetime.now())
+ self.overall_report.build_banner()
+ # self.overall_report.set_custom_html(test_results_df.to_html(index=False, justify='center'))
+ # self.overall_report.build_custom()
+ try:
+ series_df,parallel_df = self.generate_test_exc_df(test_results_df,args_dict)
+ except Exception:
+ # traceback.print_exc()
+ print('exception failed dataframe')
+ if self.order_priority == "series":
+ if len(self.series_tests) != 0:
+ self.overall_report.set_custom_html('Series Tests
')
+ self.overall_report.build_custom()
+ self.overall_report.set_table_title("Traffic Details")
+ self.overall_report.build_table_title()
+ self.overall_report.set_custom_html(series_df.to_html(index=False, justify='center'))
+ self.overall_report.build_custom()
+ self.render_each_test(ce="series")
+ if len(self.parallel_tests) != 0:
+ self.overall_report.set_custom_html('Parallel Tests
')
+ self.overall_report.build_custom()
+ self.overall_report.set_table_title("Traffic Details")
+ self.overall_report.build_table_title()
+ self.overall_report.set_custom_html(parallel_df.to_html(index=False, justify='center'))
+ self.overall_report.build_custom()
+ self.render_each_test(ce="parallel")
+ else:
+ if len(self.parallel_tests) != 0:
+ self.overall_report.set_custom_html('Parallel Tests
')
+ self.overall_report.build_custom()
+ self.overall_report.set_table_title("Traffic Details")
+ self.overall_report.build_table_title()
+ self.overall_report.set_custom_html(parallel_df.to_html(index=False, justify='center'))
+ self.overall_report.build_custom()
+ self.render_each_test(ce="parallel")
+ if len(self.series_tests) != 0:
+ self.overall_report.set_custom_html('Series Tests
')
+ self.overall_report.build_custom()
+ self.overall_report.set_table_title("Traffic Details")
+ self.overall_report.build_table_title()
+ self.overall_report.set_custom_html(series_df.to_html(index=False, justify='center'))
+ self.overall_report.build_custom()
+ self.render_each_test(ce="series")
+ # self.overall_report.insert_table_at_marker(test_results_df,"for_table")
+ self.overall_report.build_footer()
+ html_file = self.overall_report.write_html()
+ print("returned file {}".format(html_file))
+ print(html_file)
+ self.overall_report.write_pdf()
+
+def validate_individual_args(args,test_name):
+ if test_name == 'ping_test':
+ return True
+ elif test_name =='http_test':
+ return True
+ elif test_name =='ftp_test':
+ return True
+ elif test_name =='thput_test':
+ return True
+ elif test_name =='qos_test':
+ return True
+ elif test_name =='vs_test':
+ return True
+ elif test_name =="zoom_test":
+ if args["zoom_signin_email"] is None:
+ return False
+ if args["zoom_signin_passwd"] is None:
+ return False
+ if args["zoom_participants"] is None:
+ return False
+ return True
+ elif test_name =="yt_test":
+ if args["yt_url"] is None:
+ return False
+ elif test_name == "rb_test":
+ return True
+
+
+
+
+
+
+
+def validate_time(n: str) -> str:
+ try:
+ if type(n) == int or type(n) == str and n.isdigit(): # just a number, default seconds
+ seconds = int(n)
+ elif n.endswith("s"):
+ seconds = int(n[:-1])
+ elif n.endswith("m"):
+ seconds = int(n[:-1]) * 60
+ elif n.endswith("h"):
+ seconds = int(n[:-1]) * 3600
+ else:
+ return "wrong type"
+
+ # Now normalize
+ if seconds < 60:
+ return f"{seconds} secs"
+ elif seconds < 3600:
+ return f"{seconds // 60} mins"
+ else:
+ return f"{seconds // 3600} hours"
+ except ValueError:
+ return "wrong type"
+
+def validate_args(args):
+ # pass/fail , config , groups-profiles arg validation
+ tests = ["http_test","ping_test","ftp_test","thput_test","qos_test","vs_test","mcast_test","yt_test","rb_test","zoom_test"]
+ if args[series_tests]:
+ series_tests = args[series_tests].split(',')
+ if args[parallel_tests]:
+ parallel_tests = args[parallel_tests].split(',')
+ for test in tests:
+ flag_test = True
+ if test in series_tests or test in parallel_tests:
+ logger.info(f"validating args for {test}...")
+ flag_test = validate_individual_args(args,test)
+ test = test.split('_')[0]
+ if args[f'{test}_expected_passfail_value'] and args[f'{test}_device_csv_name']:
+ logger.error(f"Specify either --{test}_expected_passfail_value or --{test}_device_csv_name")
+ flag_test = False
+ if args[f'{test}_group_name']:
+ selected_groups = args[f'{test}_group_name'].split(',')
+ else:
+ selected_groups = []
+ if args[f'{test}_profile_name']:
+ selected_profiles = args['profile_name'].split(',')
+ else:
+ selected_profiles = []
+
+ if len(selected_groups) != len(selected_profiles):
+ logger.error(f"Number of groups should match number of profiles")
+ flag_test = False
+ elif args[f'{test}_group_name'] and args[f'{test}_profile_name'] and args[f'{test}_file_name'] and args[f'{test}_device_list'] != []:
+ logger.error(f"Either --{test}_group_name or --{test}_device_list should be entered not both")
+ flag_test = False
+ elif args[f'{test}_ssid'] and args[f'{test}_profile_name']:
+ logger.error(f"Either --{test}_ssid or --{test}_profile_name should be given")
+ flag_test = False
+
+ elif args[f'{test}_file_name'] and (args.get(f'{test}_group_name') is None or args.get(f'{test}_profile_name') is None):
+ logger.error(f"Please enter the correct set of arguments for configuration")
+ flag_test = False
+
+ if args[f'{test}_config'] and args.get(f'{test}_group_name') is None:
+ if args.get(f'{test}_ssid') and args.get(f'{test}_security') and args[f'{test}_security'].lower() == 'open' and (args.get(f'{test}_passwd') is None or args[f'{test}_passwd'] == ''):
+ args[f'{test}_passwd'] = '[BLANK]'
+
+ if args.get(f'{test}_ssid') is None or args.get(f'{test}_passwd') is None or args[f'{test}_passwd'] == '':
+ logger.error(f'For configuration need to Specify --{test}_ssid , --{test}_passwd (Optional for "open" type security) , --{test}_security')
+ flag_test = False
+
+ elif args.get(f'{test}_ssid') and args[f'{test}_passwd'] == '[BLANK]' and args.get(f'{test}_security') and args[f'{test}_security'].lower() != 'open':
+ logger.error(f'Please provide valid --{test}_passwd and --{test}_security configuration')
+ flag_test = False
+
+ elif args.get(f'{test}_ssid') and args.get(f'{test}_passwd'):
+ if args.get(f'{test}_security') is None:
+ logger.error(f'Security must be provided when --{test}_ssid and --{test}_password specified')
+ flag_test = False
+ elif args[f'{test}_passwd'] == '[BLANK]' and args[f'{test}_security'].lower() != 'open':
+ logger.error(f'Please provide valid passwd and security configuration')
+ flag_test = False
+ elif args[f'{test}_security'].lower() == 'open' and args[f'{test}_passwd'] != '[BLANK]':
+ logger.error(f"For an open type security, the password should be left blank (i.e., set to '' or [BLANK]).")
+ flag_test = False
+ if flag_test:
+ logger.info(f"Arg validation check done for {test}")
+
+
+def main():
+
+ parser = argparse.ArgumentParser(
+ prog="lf_interop_throughput.py",
+ formatter_class=argparse.RawTextHelpFormatter,
+ )
+ parser = argparse.ArgumentParser(description="Run Candela API Tests")
+ #Always Common
+ parser.add_argument('--mgr', '--lfmgr', default='localhost', help='hostname for where LANforge GUI is running')
+ parser.add_argument('--mgr_port', '--port', default=8080, help='port LANforge GUI HTTP service is running on')
+ parser.add_argument('--upstream_port', '-u', default='eth1', help='non-station port that generates traffic: ., e.g: 1.eth1')
+ #Common
+ parser.add_argument('--device_list', help="Enter the devices on which the test should be run", default=[])
+ parser.add_argument('--duration', help='Please enter the duration in s,m,h (seconds or minutes or hours).Eg: 30s,5m,48h')
+ parser.add_argument('--parallel',
+ action="store_true",
+ help='to run in parallel')
+ parser.add_argument("--tests",type=str,help="Comma-separated ordered list of tests to run (e.g., ping_test,http_test,ping_test)")
+ parser.add_argument('--series_tests', help='Comma-separated list of tests to run in series')
+ parser.add_argument('--parallel_tests', help='Comma-separated list of tests to run in parallel')
+ parser.add_argument('--order_priority', choices=['series', 'parallel'], default='series',
+ help='Which tests to run first: series or parallel')
+ parser.add_argument('--test_name', help='Name of the Test')
+ parser.add_argument('--dowebgui', help="If true will execute script for webgui", default=False, type=bool)
+ parser.add_argument('--result_dir', help="Specify the result dir to store the runtime logs ", default='')
+
+ #NOt common
+ #ping
+ #without config
+ parser.add_argument('--ping_test',
+ action="store_true",
+ help='ping_test consists')
+ parser.add_argument('--ping_target',
+ type=str,
+ help='Target URL or port for ping test',
+ default='1.1.eth1')
+ parser.add_argument('--ping_interval',
+ type=str,
+ help='Interval (in seconds) between the echo requests',
+ default='1')
+
+ parser.add_argument('--ping_duration',
+ type=float,
+ help='Duration (in minutes) to run the ping test',
+ default=1)
+ parser.add_argument('--ping_use_default_config',
+ action='store_true',
+ help='specify this flag if wanted to proceed with existing Wi-Fi configuration of the devices')
+ parser.add_argument('--ping_device_list', help="Enter the devices on which the ping test should be run", default=[])
+ #ping pass fail value
+ parser.add_argument("--ping_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--ping_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #ping with groups and profile configuration
+ parser.add_argument('--ping_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--ping_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--ping_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #ping configuration with --config
+ parser.add_argument("--ping_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--ping_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--ping_passwd', '--ping_password', '--ping_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--ping_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ parser.add_argument("--ping_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--ping_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--ping_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--ping_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--ping_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--ping_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--ping_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--ping_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--ping_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--ping_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--ping_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--ping_pairwise", type=str, default='NA')
+ parser.add_argument("--ping_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--ping_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--ping_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--ping_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--ping_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--ping_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--ping_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--ping_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--ping_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+ #http
+ parser.add_argument('--http_test',
+ action="store_true",
+ help='http consists')
+ parser.add_argument('--http_bands', nargs="+", help='specify which band testing you want to run eg 5G, 2.4G, 6G',
+ default=["5G", "2.4G", "6G"])
+ parser.add_argument('--http_duration', help='Please enter the duration in s,m,h (seconds or minutes or hours).Eg: 30s,5m,48h')
+ parser.add_argument('--http_file_size', type=str, help='specify the size of file you want to download', default='5MB')
+ parser.add_argument('--http_device_list', help="Enter the devices on which the ping test should be run", default=[])
+ #http pass fail value
+ parser.add_argument("--http_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--http_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #http with groups and profile configuration
+ parser.add_argument('--http_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--http_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--http_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #http configuration with --config
+ parser.add_argument("--http_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--http_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--http_passwd', '--http_password', '--http_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--http_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ parser.add_argument("--http_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--http_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--http_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--http_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--http_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--http_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--http_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--http_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--http_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--http_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--http_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--http_pairwise", type=str, default='NA')
+ parser.add_argument("--http_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--http_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--http_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--http_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--http_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--http_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--http_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--http_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--http_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+
+ #ftp
+ parser.add_argument('--ftp_test',
+ action="store_true",
+ help='ftp_test consists')
+ parser.add_argument('--ftp_bands', nargs="+", help='specify which band testing you want to run eg 5G, 2.4G, 6G',
+ default=["5G", "2.4G", "6G"])
+ parser.add_argument('--ftp_duration', help='Please enter the duration in s,m,h (seconds or minutes or hours).Eg: 30s,5m,48h')
+ parser.add_argument('--ftp_file_size', type=str, help='specify the size of file you want to download', default='5MB')
+ parser.add_argument('--ftp_device_list', help="Enter the devices on which the ping test should be run", default=[])
+ #ftp pass fail value
+ parser.add_argument("--ftp_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--ftp_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #ftp with groups and profile configuration
+ parser.add_argument('--ftp_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--ftp_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--ftp_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #ftp configuration with --config
+ parser.add_argument("--ftp_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--ftp_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--ftp_passwd', '--ftp_password', '--ftp_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--ftp_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ parser.add_argument("--ftp_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--ftp_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--ftp_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--ftp_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--ftp_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--ftp_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--ftp_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--ftp_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--ftp_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--ftp_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--ftp_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--ftp_pairwise", type=str, default='NA')
+ parser.add_argument("--ftp_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--ftp_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--ftp_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--ftp_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--ftp_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--ftp_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--ftp_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--ftp_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--ftp_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+
+ #qos
+ parser.add_argument('--qos_test',
+ action="store_true",
+ help='qos_test consists')
+ parser.add_argument('--qos_duration', help='--qos_duration sets the duration of the test', default="2m")
+ parser.add_argument('--qos_upload', help='--upload traffic load per connection (upload rate)')
+ parser.add_argument('--qos_download', help='--download traffic load per connection (download rate)')
+ parser.add_argument('--qos_traffic_type', help='Select the Traffic Type [lf_udp, lf_tcp]', required=False)
+ parser.add_argument('--qos_tos', help='Enter the tos. Example1 : "BK,BE,VI,VO" , Example2 : "BK,VO", Example3 : "VI" ')
+ parser.add_argument('--qos_device_list', help="Enter the devices on which the ping test should be run", default=[])
+ #qos pass fail value
+ parser.add_argument("--qos_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--qos_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #qos with groups and profile configuration
+ parser.add_argument('--qos_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--qos_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--qos_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #qos configuration with --config
+ parser.add_argument("--qos_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--qos_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--qos_passwd', '--qos_password', '--qos_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--qos_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ #Optional qos config args
+ parser.add_argument("--qos_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--qos_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--qos_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--qos_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--qos_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--qos_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--qos_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--qos_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--qos_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--qos_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--qos_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--qos_pairwise", type=str, default='NA')
+ parser.add_argument("--qos_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--qos_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--qos_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--qos_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--qos_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--qos_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--qos_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--qos_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--qos_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+
+
+ #vs
+ parser.add_argument('--vs_test',
+ action="store_true",
+ help='vs_test consists')
+ parser.add_argument("--vs_url", default="www.google.com", help='specify the url you want to test on')
+ parser.add_argument("--vs_media_source", type=str, default='1')
+ parser.add_argument("--vs_media_quality", type=str, default='0')
+ parser.add_argument('--vs_duration', type=str, help='time to run traffic')
+ parser.add_argument('--vs_device_list', help="Enter the devices on which the ping test should be run", default=[])
+ #vs pass fail value
+ parser.add_argument("--vs_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--vs_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #vs with groups and profile configuration
+ parser.add_argument('--vs_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--vs_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--vs_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #vs configuration with --config
+ parser.add_argument("--vs_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--vs_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--vs_passwd', '--vs_password', '--vs_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--vs_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ #Optional vs config args
+ parser.add_argument("--vs_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--vs_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--vs_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--vs_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--vs_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--vs_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--vs_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--vs_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--vs_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--vs_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--vs_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--vs_pairwise", type=str, default='NA')
+ parser.add_argument("--vs_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--vs_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--vs_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--vs_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--vs_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--vs_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--vs_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--vs_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--vs_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+
+ #thput
+ parser.add_argument('--thput_test',
+ action="store_true",
+ help='thput_test consists')
+ parser.add_argument('--thput_test_duration', help='--thput_test_duration sets the duration of the test', default="")
+ parser.add_argument('--thput_download', help='--thput_download traffic load per connection (download rate)', default='2560')
+ parser.add_argument('--thput_traffic_type', help='Select the Traffic Type [lf_udp, lf_tcp]', required=False)
+ parser.add_argument('--thput_upload', help='--thput_download traffic load per connection (download rate)', default='2560')
+ parser.add_argument('--thput_device_list', help="Enter the devices on which the test should be run", default=[])
+ parser.add_argument('--thput_do_interopability', action='store_true', help='Ensures test on devices run sequentially, capturing each device’s data individually for plotting in the final report.')
+ parser.add_argument("--thput_default_config", action="store_true", help="To stop configuring the devices in interoperability")
+ #thput pass fail value
+ parser.add_argument("--thput_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--thput_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #thput with groups and profile configuration
+ parser.add_argument('--thput_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--thput_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--thput_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument('--thput_load_type', help="Determine the type of load: < wc_intended_load | wc_per_client_load >", default="wc_per_client_load")
+ parser.add_argument('--thput_packet_size', help='Determine the size of the packet in which Packet Size Should be Greater than 16B or less than 64KB(65507)', default="-1")
+
+ #thput configuration with --config
+ parser.add_argument("--thput_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--thput_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--thput_passwd', '--thput_password', '--thput_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--thput_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ #Optional thput config args
+ parser.add_argument("--thput_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--thput_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--thput_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--thput_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--thput_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--thput_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--thput_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--thput_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--thput_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--thput_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--thput_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--thput_pairwise", type=str, default='NA')
+ parser.add_argument("--thput_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--thput_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--thput_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--thput_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--thput_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--thput_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--thput_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--thput_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--thput_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+ #mcast
+ parser.add_argument('--mcast_test',
+ action="store_true",
+ help='mcast_test consists')
+ parser.add_argument(
+ '--mcast_test_duration',
+ help='--test_duration example --time 5d (5 days) default: 3m options: number followed by d, h, m or s',
+ default='3m')
+ parser.add_argument(
+ '--mcast_endp_type',
+ help=(
+ '--endp_type example --endp_type \"lf_udp lf_tcp mc_udp\" '
+ ' Default: lf_udp , options: lf_udp, lf_udp6, lf_tcp, lf_tcp6, mc_udp, mc_udp6'),
+ default='lf_udp',
+ type=valid_endp_types)
+ parser.add_argument(
+ '--mcast_upstream_port',
+ help='--mcast_upstream_port example: --mcast_upstream_port eth1',
+ default='eth1')
+ parser.add_argument(
+ '--mcast_side_b_min_bps',
+ help='''--side_b_min_bps or --download_min_bps, requested upstream min tx rate, comma separated list for multiple iterations. Default 256000
+ When runnign with tcp/udp and mcast will use this value''',
+ default="256000")
+ parser.add_argument(
+ '--mcast_tos',
+ help='--tos: Support different ToS settings: BK,BE,VI,VO,numeric',
+ default="BE")
+ parser.add_argument(
+ '--mcast_device_list',
+ action='append',
+ help='Specify the Resource IDs for real clients. Accepts a comma-separated list (e.g., 1.11,1.95,1.360).'
+ )
+ #mcast pass fail value
+ parser.add_argument("--mcast_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--mcast_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #mcast with groups and profile configuration
+ parser.add_argument('--mcast_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--mcast_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--mcast_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #mcast configuration with --config
+ parser.add_argument("--mcast_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--mcast_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--mcast_passwd', '--mcast_password', '--mcast_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--mcast_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ #Optional mcast config args
+ parser.add_argument("--mcast_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--mcast_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--mcast_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--mcast_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--mcast_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--mcast_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--mcast_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--mcast_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--mcast_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--mcast_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--mcast_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--mcast_pairwise", type=str, default='NA')
+ parser.add_argument("--mcast_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--mcast_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--mcast_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--mcast_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--mcast_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--mcast_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--mcast_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--mcast_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--mcast_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+ #YOUTUBE
+ parser.add_argument('--yt_test',
+ action="store_true",
+ help='mcast_test consists')
+ parser.add_argument('--yt_url', type=str, help='youtube url')
+ parser.add_argument('--yt_duration', help='duration to run the test in sec')
+ parser.add_argument('--yt_res', default='Auto', help="to set resolution to 144p,240p,720p")
+ # parser.add_argument('--yt_upstream_port', type=str, help='Specify The Upstream Port name or IP address', required=True)
+ parser.add_argument('--yt_device_list', help='Specify the real device ports seperated by comma')
+ #mcast pass fail value
+ parser.add_argument("--yt_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--yt_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #yt with groups and profile configuration
+ parser.add_argument('--yt_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--yt_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--yt_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #yt configuration with --config
+ parser.add_argument("--yt_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--yt_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--yt_passwd', '--yt_password', '--yt_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--yt_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ #Optional yt config args
+ parser.add_argument("--yt_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--yt_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--yt_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--yt_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--yt_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--yt_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--yt_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--yt_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--yt_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--yt_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--yt_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--yt_pairwise", type=str, default='NA')
+ parser.add_argument("--yt_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--yt_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--yt_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--yt_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--yt_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--yt_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--yt_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--yt_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--yt_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+ #real browser
+ parser.add_argument('--rb_test',
+ action="store_true",
+ help='mcast_test consists')
+ parser.add_argument("--rb_url", default="https://google.com", help='specify the url you want to test on')
+ parser.add_argument('--rb_duration', type=str, help='time to run traffic')
+ parser.add_argument('--rb_device_list', type=str, help='provide resource_ids of android devices. for instance: "10,12,14"')
+ parser.add_argument('--rb_webgui_incremental', '--rb_incremental_capacity', help="Specify the incremental values <1,2,3..>", dest='rb_webgui_incremental', type=str)
+ parser.add_argument('--rb_incremental', help="to add incremental capacity to run the test", action='store_true')
+ parser.add_argument("--rb_count", type=int, default=100, help='specify the number of url you want to test on '
+ 'per minute')
+ #mcast pass fail value
+ parser.add_argument("--rb_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--rb_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #rb with groups and profile configuration
+ parser.add_argument('--rb_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--rb_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--rb_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #rb configuration with --config
+ parser.add_argument("--rb_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--rb_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--rb_passwd', '--rb_password', '--rb_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--rb_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ #Optional rb config args
+ parser.add_argument("--rb_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--rb_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--rb_ieee80211", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--rb_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--rb_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--rb_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--rb_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--rb_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--rb_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--rb_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--rb_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--rb_pairwise", type=str, default='NA')
+ parser.add_argument("--rb_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--rb_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--rb_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--rb_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--rb_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--rb_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--rb_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--rb_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--rb_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+ #zoom
+ parser.add_argument('--zoom_test',
+ action="store_true",
+ help='mcast_test consists')
+ parser.add_argument('--zoom_duration', type=int, help="Duration of the Zoom meeting in minutes")
+ parser.add_argument('--zoom_signin_email', type=str, help="Sign-in email")
+ parser.add_argument('--zoom_signin_passwd', type=str, help="Sign-in password")
+ parser.add_argument('--zoom_participants', type=int, help="no of participanrs")
+ parser.add_argument('--zoom_audio', action='store_true')
+ parser.add_argument('--zoom_video', action='store_true')
+ parser.add_argument('--zoom_device_list', help="resources participated in the test")
+ parser.add_argument('--zoom_host', help="Host of the test")
+ #zoom config args
+ parser.add_argument("--zoom_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--zoom_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #zoom with groups and profile configuration
+ parser.add_argument('--zoom_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--zoom_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--zoom_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #zoom configuration with --config
+ parser.add_argument("--zoom_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--zoom_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--zoom_passwd', '--zoom_password', '--zoom_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--zoom_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ #Optional zoom config args
+ parser.add_argument("--zoom_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--zoom_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--zoom_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--zoom_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--zoom_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--zoom_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--zoom_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--zoom_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--zoom_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--zoom_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--zoom_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--zoom_pairwise", type=str, default='NA')
+ parser.add_argument("--zoom_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--zoom_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--zoom_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--zoom_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--zoom_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--zoom_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--zoom_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--zoom_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--zoom_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+
+
+ args = parser.parse_args()
+ args_dict = vars(args)
+ duration_dict = {}
+
+
+ print('argsss',args_dict)
+ # exit(0)
+ # validate_args(args_dict)
+ candela_apis = Candela(ip=args.mgr, port=args.mgr_port,order_priority=args.order_priority,test_name=args.test_name,result_dir=args.result_dir,dowebgui=args.dowebgui)
+ print(args)
+ test_map = {
+ "ping_test": (run_ping_test, "PING TEST"),
+ "http_test": (run_http_test, "HTTP TEST"),
+ "ftp_test": (run_ftp_test, "FTP TEST"),
+ "qos_test": (run_qos_test, "QoS TEST"),
+ "vs_test": (run_vs_test, "VIDEO STREAMING TEST"),
+ "thput_test": (run_thput_test, "THROUGHPUT TEST"),
+ "mcast_test": (run_mcast_test, "MULTICAST TEST"),
+ "yt_test": (run_yt_test, "YOUTUBE TEST"),
+ "rb_test": (run_rb_test, "REAL BROWSER TEST"),
+ "zoom_test": (run_zoom_test, "ZOOM TEST"),
+ }
+
+
+ if not args.series_tests and not args.parallel_tests:
+ logger.error("Please provide tests cases --parallel_tests or --series_tests")
+ logger.info(f"availbe tests are {test_map.keys()}")
+ exit(0)
+
+ flag=1
+ tests_to_run_series = []
+ tests_to_run_parallel = []
+ if args.series_tests:
+ tests_to_run_series = args.series_tests.split(',')
+ for test in tests_to_run_series:
+ if test not in test_map:
+ logger.error(f"{test} is not availble in test suite")
+ flag = 0
+ if args.parallel_tests:
+ tests_to_run_parallel = args.parallel_tests.split(',')
+ for test in tests_to_run_parallel:
+ if test not in test_map:
+ logger.error(f"{test} is not availble in test suite")
+ flag = 0
+
+
+ if not flag:
+ logger.info(f"availble tests are {test_map.keys()}")
+ exit(0)
+ if args.parallel_tests and (len(tests_to_run_parallel) != len(set(tests_to_run_parallel))):
+ logger.error("in -parallel dont specify duplicate tests")
+ exit(0)
+ duration_flag = False
+ if args.series_tests:
+ for test in args.series_tests.split(','):
+ if test == "thput_test":
+ duration_dict[test] = validate_time(args_dict[f"{test}_duration"])
+ elif test == "mcast_test":
+ duration_dict[test] = validate_time(args_dict[f"{test.split('_')[0]}_test_duration"])
+ elif test == "ping_test" or test == "zoom_test":
+ duration_dict[test] = "{} mins".format(args_dict["{}_duration".format(test.split('_')[0])])
+ else:
+ duration_dict[test] = validate_time(args_dict[f"{test.split('_')[0]}_duration"])
+ if args.parallel_tests:
+ for test in args.parallel_tests.split(','):
+ if test == "thput_test":
+ duration_dict[test] = validate_time(args_dict[f"{test}_duration"])
+ elif test == "mcast_test":
+ duration_dict[test] = validate_time(args_dict[f"{test.split('_')[0]}_test_duration"])
+ elif test == "ping_test" or test == "zoom_test":
+ duration_dict[test] = "{} mins".format(args_dict["{}_duration".format(test.split('_')[0])])
+ else:
+ duration_dict[test] = validate_time(args_dict[f"{test.split('_')[0]}_duration"])
+ for test_name,duration in duration_dict.items():
+ if duration == "wrong type":
+ duration_flag = True
+ print(f"wrong duration type for {test_name}")
+ if duration_flag:
+ exit(1)
+ candela_apis.duration_dict = duration_dict.copy()
+ # args.current = "series"
+ iszoom = 'zoom_test' in tests_to_run_parallel or 'zoom_test' in tests_to_run_series
+ isrb = 'rb_test' in tests_to_run_parallel or 'rb_test' in tests_to_run_series
+ isyt = 'yt_test' in tests_to_run_parallel or 'yt_test' in tests_to_run_series
+ candela_apis.series_tests = tests_to_run_series
+ candela_apis.parallel_tests = tests_to_run_parallel
+ candela_apis.misc_clean_up(layer3=True,layer4=True,generic=True,port_5000=iszoom,port_5002=isyt,port_5003=isrb)
+ if args.series_tests or args.parallel_tests:
+ series_threads = []
+ parallel_threads = []
+ parallel_connect = []
+ series_connect = []
+ rb_test = 'rb_test' in tests_to_run_parallel
+ yt_test = 'yt_test' in tests_to_run_parallel
+ zoom_test = 'zoom_test' in tests_to_run_parallel
+ # Process series tests
+ if args.series_tests:
+ ordered_series_tests = args.series_tests.split(',')
+ # ordered_parallel_tests = args.parallel_tests.split(',')
+ # phase 1
+ if args.dowebgui:
+ gen_order = ["ping_test","qos_test","ftp_test","http_test","mcast_test","vs_test","thput_test","rb_test","yt_test","zoom_test"]
+ temp_ord_list = []
+ for test_name in gen_order:
+ if test_name in ordered_series_tests:
+ temp_ord_list.append(test_name)
+ ordered_series_tests = temp_ord_list.copy()
+ for idx, test_name in enumerate(ordered_series_tests):
+ test_name = test_name.strip().lower()
+ if test_name in test_map:
+ func, label = test_map[test_name]
+ args.current = "series"
+ if test_name in ['rb_test','zoom_test','yt_test']:
+ if test_name == "rb_test":
+ obj_no = 1
+ while f"rb_test_{obj_no}" in candela_apis.rb_obj_dict["series"]:
+ obj_no+=1
+ obj_name = f"rb_test_{obj_no}"
+ candela_apis.rb_obj_dict["series"][obj_name] = manager.dict({"obj":None,"data":None})
+ print('hiii data',candela_apis.rb_obj_dict)
+ elif test_name == "yt_test":
+ obj_no = 1
+ while f"yt_test_{obj_no}" in candela_apis.yt_obj_dict["series"]:
+ obj_no+=1
+ obj_name = f"yt_test_{obj_no}"
+ candela_apis.yt_obj_dict["series"][obj_name] = manager.dict({"obj":None,"data":None})
+ elif test_name == "zoom_test":
+ obj_no = 1
+ while f"zoom_test_{obj_no}" in candela_apis.zoom_obj_dict["series"]:
+ obj_no+=1
+ obj_name = f"zoom_test_{obj_no}"
+ candela_apis.zoom_obj_dict["series"][obj_name] = manager.dict({"obj":None,"data":None})
+ print('hiii data',candela_apis.zoom_obj_dict)
+ series_threads.append(multiprocessing.Process(target=run_test_safe(func, f"{label} [Series {idx+1}]", args, candela_apis,duration_dict[test_name])))
+ else:
+ series_threads.append(threading.Thread(
+ target=run_test_safe(func, f"{label} [Series {idx+1}]", args, candela_apis,duration_dict[test_name])
+ ))
+ else:
+ print(f"Warning: Unknown test '{test_name}' in --series_tests")
+
+ # Process parallel tests
+ if args.parallel_tests:
+ ordered_parallel_tests = args.parallel_tests.split(',')
+ #phase 1
+ if args.dowebgui:
+ gen_order = ["ping_test","qos_test","ftp_test","http_test","mcast_test","vs_test","thput_test","rb_test","yt_test","zoom_test"]
+ temp_ord_list = []
+ for test_name in gen_order:
+ if test_name in ordered_parallel_tests:
+ temp_ord_list.append(test_name)
+ ordered_parallel_tests = temp_ord_list.copy()
+ for idx, test_name in enumerate(ordered_parallel_tests):
+ test_name = test_name.strip().lower()
+ if test_name in test_map:
+ func, label = test_map[test_name]
+ args.current = "parallel"
+ if test_name in ['rb_test','zoom_test','yt_test']:
+ # if test_name == "rb_test":
+ # candela_apis.rb_pipe_dict["parallel"][len(candela_apis.rb_pipe_dict["parallel"])] = {}
+ # candela_apis.rb_pipe_dict["parallel"][len(candela_apis.rb_pipe_dict["parallel"])]["parent"],candela_apis.rb_pipe_dict["parallel"][len(candela_apis.rb_pipe_dict["parallel"])]["child"] = multiprocessing.Pipe()
+ # parent_conn, child_conn = multiprocessing.Pipe()
+ # candela_apis.parallel_connect[idx] = [test_name,parent_conn,child_conn]
+ if test_name == "rb_test":
+ candela_apis.rb_obj_dict["parallel"]["rb_test"] = manager.dict({"obj": None, "data": None})
+ print('hiii data',candela_apis.rb_obj_dict)
+ elif test_name == "yt_test":
+ candela_apis.yt_obj_dict["parallel"]["yt_test"] = manager.dict({"obj": None, "data": None})
+ print('hiii data',candela_apis.yt_obj_dict)
+ elif test_name == "zoom_test":
+ candela_apis.zoom_obj_dict["parallel"]["zoom_test"] = manager.dict({"obj": None, "data": None})
+ print('hiii data',candela_apis.zoom_obj_dict)
+ parallel_threads.append(multiprocessing.Process(target=run_test_safe(func, f"{label} [Parallel {idx+1}]", args, candela_apis,duration_dict[test_name])))
+ else:
+ parallel_threads.append(threading.Thread(
+ target=run_test_safe(func, f"{label} [Parallel {idx+1}]", args, candela_apis,duration_dict[test_name])
+ ))
+ else:
+ print(f"Warning: Unknown test '{test_name}' in --parallel_tests")
+ logging.info(f"Series Threads: {series_threads}")
+ logging.info(f"Parallel Threads: {parallel_threads}")
+ logging.info(f"connections parallel {candela_apis.parallel_connect}")
+ logging.info(f"connections series{candela_apis.series_connect}")
+ # time.sleep(20)
+ if args.dowebgui:
+ # overall_path = os.path.join(args.result_dir, directory)
+ candela_apis.overall_status = {"ping": "notstarted", "qos": "notstarted", "ftp": "notstarted", "http": "notstarted",
+ "mc": "notstarted", "vs": "notstarted", "thput": "notstarted","rb": "notstarted","vs": "notstarted","zoom": "notstarted","yt": "notstarted", "time": datetime.datetime.now().strftime("%Y %d %H:%M:%S"), "status": "running", "current_mode":"tbd" , "current_test_name": "tbd"}
+ candela_apis.overall_csv.append(candela_apis.overall_status.copy())
+ df1 = pd.DataFrame(candela_apis.overall_csv)
+ df1.to_csv('{}/overall_status.csv'.format(args.result_dir), index=False)
+
+ if args.order_priority == 'series':
+ candela_apis.current_exec="series"
+ for t in series_threads:
+ t.start()
+ t.join()
+ candela_apis.series_index += 1
+ # Then run parallel tests
+ if len(parallel_threads) != 0:
+ # candela_apis.misc_clean_up(layer3=False,layer4=False,generic=True)
+ candela_apis.misc_clean_up(layer3=True,layer4=True,generic=True,port_5000=iszoom,port_5002=isyt,port_5003=isrb)
+ print('starting parallel tests.......')
+ time.sleep(10)
+ candela_apis.current_exec = "parallel"
+ for t in parallel_threads:
+ t.start()
+
+ candela_apis.parallel_index = 0
+ for t in parallel_threads:
+ t.join()
+ candela_apis.parallel_index += 1
+
+ else:
+ candela_apis.current_exec="parallel"
+ for t in parallel_threads:
+ t.start()
+ # for p in parallel_processes:
+ # p.start()
+
+ for t in parallel_threads:
+ t.join()
+
+ if len(series_threads) != 0:
+ rb_test = 'rb_test' in tests_to_run_parallel
+ yt_test = 'yt_test' in tests_to_run_parallel
+ candela_apis.misc_clean_up(layer3=True,layer4=True,generic=True,port_5000=iszoom,port_5002=isyt,port_5003=isrb)
+ print('starting Series tests.......')
+ time.sleep(5)
+ candela_apis.current_exec="series"
+ for t in series_threads:
+ t.start()
+ t.join()
+ # for p in series_processes:
+ # p.start()
+ # p.join()
+ # candela_apis.misc_clean_up(layer3=True,layer4=True,generic=True)
+ else:
+ logger.error("provide either --paralell_tests or --series_tests")
+ exit(1)
+ rb_test = 'rb_test' in tests_to_run_parallel
+ yt_test = 'yt_test' in tests_to_run_parallel
+ # candela_apis.browser_cleanup(rb_test=rb_test,yt_test=yt_test)
+ # candela_apis.misc_clean_up(layer3=False,layer4=False,generic=True)
+ candela_apis.misc_clean_up(layer3=True,layer4=True,generic=True,port_5000=iszoom,port_5002=isyt,port_5003=isrb)
+ log_file = save_logs()
+ print(f"Logs saved to: {log_file}")
+ test_results_df = pd.DataFrame(list(test_results_list))
+ # You can also access the test results dataframe:
+ candela_apis.generate_overall_report(test_results_df=test_results_df,args_dict=args_dict)
+ if candela_apis.dowebgui:
+ try:
+ candela_apis.overall_status["status"] = "completed"
+ candela_apis.overall_status["time"] = datetime.datetime.now().strftime("%Y %d %H:%M:%S")
+ candela_apis.overall_csv.append(candela_apis.overall_status.copy())
+ df1 = pd.DataFrame(candela_apis.overall_csv)
+ df1.to_csv('{}/overall_status.csv'.format(candela_apis.result_dir), index=False)
+ except Exception as e:
+ logging.info("Error while wrinting status file for webui", e)
+
+ print("\nTest Results Summary:")
+ print(test_results_df)
+ # candela_apis.overall_report.insert_table_at_marker(test_results_df,"for_table")
+ # candela_apis.overall_report.build_footer()
+ # html_file = candela_apis.overall_report.write_html()
+ # print("returned file {}".format(html_file))
+ # print(html_file)
+ # candela_apis.overall_report.write_pdf()
+
+def run_test_safe(test_func, test_name, args, candela_apis,duration):
+ global error_logs
+ # global test_results_df
+ def wrapper():
+ global error_logs
+ # global test_results_df
+
+ try:
+ result = test_func(args, candela_apis)
+ if not result:
+ status = "NOT EXECUTED"
+ logger.error(f"{test_name} NOT EXECUTED")
+ else:
+ status = "EXECUTED"
+ logger.info(f"{test_name} EXECUTED")
+ # Update the dataframe with test result
+ # test_results_df.loc[len(test_results_df)] = [test_name, status]
+ test_results_list.append({"test_name": test_name, "Duration":duration, "status": status})
+
+ except SystemExit as e:
+ if e.code != 0:
+ status = "NOT EXECUTED"
+ else:
+ status = "EXECUTED"
+ tb = traceback.format_exc()
+ print(tb)
+ error_msg = f"{test_name} exited with code {e.code}\n"
+ logger.error(error_msg)
+ error_logs += error_msg
+ # test_results_df.loc[len(test_results_df)] = [test_name, status]
+ test_results_list.append({"test_name": test_name,"Duration":duration, "status": status})
+
+ except Exception as e:
+ status = "NOT EXECUTED"
+ error_msg = f"{test_name} crashed unexpectedly\n"
+ logger.exception(error_msg)
+ tb_str = traceback.format_exc()
+ logger.info("sussss")
+ # traceback.print_exc()
+ full_error = error_msg + tb_str + "\n"
+ error_logs += full_error
+ # test_results_df.loc[len(test_results_df)] = [test_name, status]
+ test_results_list.append({"test_name": test_name,"Duration":duration, "status": status})
+
+ return wrapper
+
+def save_logs():
+ """Save accumulated error logs to a timestamped file in base_class_logs directory"""
+ global error_logs
+
+
+ # Create directory if it doesn't exist
+ log_dir = "base_class_logs"
+ os.makedirs(log_dir, exist_ok=True)
+
+ # Generate timestamp
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+ log_filename = f"{log_dir}/test_logs_{timestamp}.txt"
+
+ # Write logs to file
+ with open(log_filename, 'w') as f:
+ f.write(error_logs)
+
+ logger.info(f"Test logs saved to {log_filename}")
+ return log_filename
+
+def run_ping_test(args, candela_apis):
+ return candela_apis.run_ping_test(
+ real=True,
+ target=args.ping_target,
+ ping_interval=args.ping_interval,
+ ping_duration=args.ping_duration,
+ use_default_config=False if args.ping_config else True,
+ dev_list=args.ping_device_list,
+ expected_passfail_value=args.ping_expected_passfail_value,
+ device_csv_name=args.ping_device_csv_name,
+ file_name=args.ping_file_name,
+ group_name=args.ping_group_name,
+ profile_name=args.ping_profile_name,
+ ssid=args.ping_ssid,
+ passwd=args.ping_passwd,
+ security=args.ping_security,
+ eap_method=args.ping_eap_method,
+ eap_identity=args.ping_eap_identity,
+ ieee8021x=args.ping_ieee8021x,
+ ieee80211u=args.ping_ieee80211u,
+ ieee80211w=args.ping_ieee80211w,
+ enable_pkc=args.ping_enable_pkc,
+ bss_transition=args.ping_bss_transition,
+ power_save=args.ping_power_save,
+ disable_ofdma=args.ping_disable_ofdma,
+ roam_ft_ds=args.ping_roam_ft_ds,
+ key_management=args.ping_key_management,
+ pairwise=args.ping_pairwise,
+ private_key=args.ping_private_key,
+ ca_cert=args.ping_ca_cert,
+ client_cert=args.ping_client_cert,
+ pk_passwd=args.ping_pk_passwd,
+ pac_file=args.ping_pac_file,
+ wait_time=args.ping_wait_time,
+ local_lf_report_dir = candela_apis.result_path if not args.dowebgui else args.result_dir
+ )
+
+def run_http_test(args, candela_apis):
+ return candela_apis.run_http_test(
+ upstream_port=args.upstream_port,
+ bands=args.http_bands,
+ duration=args.http_duration,
+ file_size=args.http_file_size,
+ device_list=args.http_device_list,
+ expected_passfail_value=args.http_expected_passfail_value,
+ device_csv_name=args.http_device_csv_name,
+ file_name=args.http_file_name,
+ group_name=args.http_group_name,
+ profile_name=args.http_profile_name,
+ config=args.http_config,
+ ssid=args.http_ssid,
+ passwd=args.http_passwd,
+ security=args.http_security,
+ eap_method=args.http_eap_method,
+ eap_identity=args.http_eap_identity,
+ ieee8021x=args.http_ieee8021x,
+ ieee80211u=args.http_ieee80211u,
+ ieee80211w=args.http_ieee80211w,
+ enable_pkc=args.http_enable_pkc,
+ bss_transition=args.http_bss_transition,
+ power_save=args.http_power_save,
+ disable_ofdma=args.http_disable_ofdma,
+ roam_ft_ds=args.http_roam_ft_ds,
+ key_management=args.http_key_management,
+ pairwise=args.http_pairwise,
+ private_key=args.http_private_key,
+ ca_cert=args.http_ca_cert,
+ client_cert=args.http_client_cert,
+ pk_passwd=args.http_pk_passwd,
+ pac_file=args.http_pac_file,
+ wait_time=args.http_wait_time,
+ dowebgui=args.dowebgui,
+ test_name=args.test_name,
+ result_dir=args.result_dir
+ )
+
+def run_ftp_test(args, candela_apis):
+ return candela_apis.run_ftp_test(
+ device_list=args.ftp_device_list,
+ file_sizes=[args.ftp_file_size],
+ traffic_duration=args.ftp_duration,
+ bands=args.ftp_bands,
+ expected_passfail_value=args.ftp_expected_passfail_value,
+ device_csv_name=args.ftp_device_csv_name,
+ file_name=args.ftp_file_name,
+ group_name=args.ftp_group_name,
+ profile_name=args.ftp_profile_name,
+ config=args.ftp_config,
+ ssid=args.ftp_ssid,
+ passwd=args.ftp_passwd,
+ security=args.ftp_security,
+ eap_method=args.ftp_eap_method,
+ eap_identity=args.ftp_eap_identity,
+ ieee8021x=args.ftp_ieee8021x,
+ ieee80211u=args.ftp_ieee80211u,
+ ieee80211w=args.ftp_ieee80211w,
+ enable_pkc=args.ftp_enable_pkc,
+ bss_transition=args.ftp_bss_transition,
+ power_save=args.ftp_power_save,
+ disable_ofdma=args.ftp_disable_ofdma,
+ roam_ft_ds=args.ftp_roam_ft_ds,
+ key_management=args.ftp_key_management,
+ pairwise=args.ftp_pairwise,
+ private_key=args.ftp_private_key,
+ ca_cert=args.ftp_ca_cert,
+ client_cert=args.ftp_client_cert,
+ pk_passwd=args.ftp_pk_passwd,
+ pac_file=args.ftp_pac_file,
+ wait_time=args.ftp_wait_time,
+ dowebgui="True" if args.dowebgui else False,
+ test_name=args.test_name,
+ result_dir=args.result_dir,
+ upstream_port=args.upstream_port,
+ )
+
+def run_qos_test(args, candela_apis):
+ print("QOS_LIST",args.qos_device_list)
+ return candela_apis.run_qos_test(
+ upstream_port=args.upstream_port,
+ test_duration=args.qos_duration,
+ download=args.qos_download,
+ upload=args.qos_upload,
+ traffic_type=args.qos_traffic_type,
+ tos=args.qos_tos,
+ device_list=args.qos_device_list,
+ expected_passfail_value=args.qos_expected_passfail_value,
+ device_csv_name=args.qos_device_csv_name,
+ file_name=args.qos_file_name,
+ group_name=args.qos_group_name,
+ profile_name=args.qos_profile_name,
+ config=args.qos_config,
+ ssid=args.qos_ssid,
+ passwd=args.qos_passwd,
+ security=args.qos_security,
+ eap_method=args.qos_eap_method,
+ eap_identity=args.qos_eap_identity,
+ ieee8021x=args.qos_ieee8021x,
+ ieee80211u=args.qos_ieee80211u,
+ ieee80211w=args.qos_ieee80211w,
+ enable_pkc=args.qos_enable_pkc,
+ bss_transition=args.qos_bss_transition,
+ power_save=args.qos_power_save,
+ disable_ofdma=args.qos_disable_ofdma,
+ roam_ft_ds=args.qos_roam_ft_ds,
+ key_management=args.qos_key_management,
+ pairwise=args.qos_pairwise,
+ private_key=args.qos_private_key,
+ ca_cert=args.qos_ca_cert,
+ client_cert=args.qos_client_cert,
+ pk_passwd=args.qos_pk_passwd,
+ pac_file=args.qos_pac_file,
+ wait_time=args.qos_wait_time,
+ dowebgui="True" if args.dowebgui else False,
+ test_name=args.test_name,
+ result_dir=args.result_dir
+ )
+
+def run_vs_test(args, candela_apis):
+ return candela_apis.run_vs_test1(
+ url=args.vs_url,
+ media_source=args.vs_media_source,
+ media_quality=args.vs_media_quality,
+ duration=args.vs_duration,
+ device_list=args.vs_device_list,
+ expected_passfail_value=args.vs_expected_passfail_value,
+ device_csv_name=args.vs_device_csv_name,
+ file_name=args.vs_file_name,
+ group_name=args.vs_group_name,
+ profile_name=args.vs_profile_name,
+ config=args.vs_config,
+ ssid=args.vs_ssid,
+ passwd=args.vs_passwd,
+ encryp=args.vs_security,
+ eap_method=args.vs_eap_method,
+ eap_identity=args.vs_eap_identity,
+ ieee8021x=args.vs_ieee8021x,
+ ieee80211u=args.vs_ieee80211u,
+ ieee80211w=args.vs_ieee80211w,
+ enable_pkc=args.vs_enable_pkc,
+ bss_transition=args.vs_bss_transition,
+ power_save=args.vs_power_save,
+ disable_ofdma=args.vs_disable_ofdma,
+ roam_ft_ds=args.vs_roam_ft_ds,
+ key_management=args.vs_key_management,
+ pairwise=args.vs_pairwise,
+ private_key=args.vs_private_key,
+ ca_cert=args.vs_ca_cert,
+ client_cert=args.vs_client_cert,
+ pk_passwd=args.vs_pk_passwd,
+ pac_file=args.vs_pac_file,
+ wait_time=args.vs_wait_time,
+ upstream_port=args.upstream_port,
+ dowebgui=args.dowebgui,
+ test_name=args.test_name,
+ result_dir=args.result_dir
+ )
+
+def run_thput_test(args, candela_apis):
+ if args.thput_do_interopability and args.thput_config:
+ args.thput_default_config = False
+ args.thput_config = False
+ return candela_apis.run_throughput_test(
+ upstream_port=args.upstream_port,
+ test_duration=args.thput_test_duration,
+ download=args.thput_download,
+ upload=args.thput_upload,
+ traffic_type=args.thput_traffic_type,
+ device_list=args.thput_device_list,
+ do_interopability=args.thput_do_interopability,
+ default_config=args.thput_default_config,
+ expected_passfail_value=args.thput_expected_passfail_value,
+ device_csv_name=args.thput_device_csv_name,
+ file_name=args.thput_file_name,
+ group_name=args.thput_group_name,
+ profile_name=args.thput_profile_name,
+ config=args.thput_config,
+ ssid=args.thput_ssid,
+ passwd=args.thput_passwd,
+ security=args.thput_security,
+ eap_method=args.thput_eap_method,
+ eap_identity=args.thput_eap_identity,
+ ieee8021x=args.thput_ieee8021x,
+ ieee80211u=args.thput_ieee80211u,
+ ieee80211w=args.thput_ieee80211w,
+ enable_pkc=args.thput_enable_pkc,
+ bss_transition=args.thput_bss_transition,
+ power_save=args.thput_power_save,
+ disable_ofdma=args.thput_disable_ofdma,
+ roam_ft_ds=args.thput_roam_ft_ds,
+ key_management=args.thput_key_management,
+ pairwise=args.thput_pairwise,
+ private_key=args.thput_private_key,
+ ca_cert=args.thput_ca_cert,
+ client_cert=args.thput_client_cert,
+ pk_passwd=args.thput_pk_passwd,
+ pac_file=args.thput_pac_file,
+ wait_time=args.thput_wait_time,
+ load_type=args.thput_load_type,
+ packet_size=args.thput_packet_size,
+ dowebgui=args.dowebgui,
+ test_name=args.test_name,
+ result_dir=args.result_dir
+ )
+
+def run_mcast_test(args, candela_apis):
+ return candela_apis.run_mc_test1(
+ test_duration=args.mcast_test_duration,
+ upstream_port=args.upstream_port,
+ endp_type=args.mcast_endp_type,
+ side_b_min_bps=args.mcast_side_b_min_bps,
+ tos=args.mcast_tos,
+ device_list=args.mcast_device_list,
+ expected_passfail_value=args.mcast_expected_passfail_value,
+ device_csv_name=args.mcast_device_csv_name,
+ file_name=args.mcast_file_name,
+ group_name=args.mcast_group_name,
+ profile_name=args.mcast_profile_name,
+ config=args.mcast_config,
+ ssid=args.mcast_ssid,
+ passwd=args.mcast_passwd,
+ security=args.mcast_security,
+ eap_method=args.mcast_eap_method,
+ eap_identity=args.mcast_eap_identity,
+ ieee8021x=args.mcast_ieee8021x,
+ ieee80211u=args.mcast_ieee80211u,
+ ieee80211w=args.mcast_ieee80211w,
+ enable_pkc=args.mcast_enable_pkc,
+ bss_transition=args.mcast_bss_transition,
+ power_save=args.mcast_ieee8021x,
+ disable_ofdma=args.mcast_disable_ofdma,
+ roam_ft_ds=args.mcast_roam_ft_ds,
+ key_management=args.mcast_key_management,
+ pairwise=args.mcast_pairwise,
+ private_key=args.mcast_private_key,
+ ca_cert=args.mcast_ca_cert,
+ client_cert=args.mcast_client_cert,
+ pk_passwd=args.mcast_pk_passwd,
+ pac_file=args.mcast_pac_file,
+ wait_time=args.mcast_wait_time,
+ dowebgui="True" if args.dowebgui else False,
+ test_name=args.test_name,
+ result_dir=args.result_dir
+ )
+
+def run_yt_test(args, candela_apis):
+ return candela_apis.run_yt_test(
+ url=args.yt_url,
+ duration=args.yt_duration,
+ res=args.yt_res,
+ upstream_port=args.upstream_port,
+ resource_list=args.yt_device_list,
+ expected_passfail_value=args.yt_expected_passfail_value,
+ device_csv_name=args.yt_device_csv_name,
+ file_name=args.yt_file_name,
+ group_name=args.yt_group_name,
+ profile_name=args.yt_profile_name,
+ config=args.yt_config,
+ ssid=args.yt_ssid,
+ passwd=args.yt_passwd,
+ encryp=args.yt_security,
+ eap_method=args.yt_eap_method,
+ eap_identity=args.yt_eap_identity,
+ ieee8021x=args.yt_ieee8021x,
+ ieee80211u=args.yt_ieee80211u,
+ ieee80211w=args.yt_ieee80211w,
+ enable_pkc=args.yt_enable_pkc,
+ bss_transition=args.yt_bss_transition,
+ power_save=args.yt_ieee8021x,
+ disable_ofdma=args.yt_disable_ofdma,
+ roam_ft_ds=args.yt_roam_ft_ds,
+ key_management=args.yt_key_management,
+ pairwise=args.yt_pairwise,
+ private_key=args.yt_private_key,
+ ca_cert=args.yt_ca_cert,
+ client_cert=args.yt_client_cert,
+ pk_passwd=args.yt_pk_passwd,
+ pac_file=args.yt_pac_file,
+ exec_type=args.current,
+ do_webUI="True" if args.dowebgui else False,
+ ui_report_dir = args.result_dir,
+ test_name=args.test_name
+ )
+
+def run_rb_test(args, candela_apis):
+ return candela_apis.run_rb_test(
+ url=args.rb_url,
+ upstream_port=args.upstream_port,
+ device_list=args.rb_device_list,
+ expected_passfail_value=args.rb_expected_passfail_value,
+ device_csv_name=args.rb_device_csv_name,
+ file_name=args.rb_file_name,
+ group_name=args.rb_group_name,
+ profile_name=args.rb_profile_name,
+ config=args.rb_config,
+ ssid=args.rb_ssid,
+ passwd=args.rb_passwd,
+ encryp=args.rb_security,
+ eap_method=args.rb_eap_method,
+ eap_identity=args.rb_eap_identity,
+ ieee80211=args.rb_ieee80211,
+ ieee80211u=args.rb_ieee80211u,
+ ieee80211w=args.rb_ieee80211w,
+ enable_pkc=args.rb_enable_pkc,
+ bss_transition=args.rb_bss_transition,
+ power_save=args.rb_power_save,
+ disable_ofdma=args.rb_disable_ofdma,
+ roam_ft_ds=args.rb_roam_ft_ds,
+ key_management=args.rb_key_management,
+ pairwise=args.rb_pairwise,
+ private_key=args.rb_private_key,
+ ca_cert=args.rb_ca_cert,
+ client_cert=args.rb_client_cert,
+ pk_passwd=args.rb_pk_passwd,
+ pac_file=args.rb_pac_file,
+ wait_time=args.rb_wait_time,
+ duration=args.rb_duration,
+ exec_type=args.current,
+ count = args.rb_count,
+ dowebgui = args.dowebgui,
+ result_dir=args.result_dir,
+ test_name=args.test_name,
+ webgui_incremental=args.rb_webgui_incremental
+ )
+
+def run_zoom_test(args, candela_apis):
+ return candela_apis.run_zoom_test(
+ duration=args.zoom_duration,
+ signin_email=args.zoom_signin_email,
+ signin_passwd=args.zoom_signin_passwd,
+ participants=args.zoom_participants,
+ audio=args.zoom_audio,
+ video=args.zoom_video,
+ upstream_port=args.upstream_port,
+ resource_list=args.zoom_device_list,
+ zoom_host=args.zoom_host,
+ expected_passfail_value=args.zoom_expected_passfail_value,
+ device_csv_name=args.zoom_device_csv_name,
+ file_name=args.zoom_file_name,
+ group_name=args.zoom_group_name,
+ profile_name=args.zoom_profile_name,
+ config=args.zoom_config,
+ ssid=args.zoom_ssid,
+ passwd=args.zoom_passwd,
+ encryp=args.zoom_security,
+ eap_method=args.zoom_eap_method,
+ eap_identity=args.zoom_eap_identity,
+ ieee8021x=args.zoom_ieee8021x,
+ ieee80211u=args.zoom_ieee80211u,
+ ieee80211w=args.zoom_ieee80211w,
+ enable_pkc=args.zoom_enable_pkc,
+ bss_transition=args.zoom_bss_transition,
+ power_save=args.zoom_power_save,
+ disable_ofdma=args.zoom_disable_ofdma,
+ roam_ft_ds=args.zoom_roam_ft_ds,
+ key_management=args.zoom_key_management,
+ pairwise=args.zoom_pairwise,
+ private_key=args.zoom_private_key,
+ ca_cert=args.zoom_ca_cert,
+ client_cert=args.zoom_client_cert,
+ pk_passwd=args.zoom_pk_passwd,
+ pac_file=args.zoom_pac_file,
+ wait_time=args.zoom_wait_time,
+ exec_type=args.current,
+ do_webUI= args.dowebgui,
+ report_dir= args.result_dir,
+ testname= args.test_name,
+ )
+# def browser_cleanup(args,candela_apis):
+# return candela_apis.browser_cleanup(args)
+main()
diff --git a/py-scripts/candela_base_class.py b/py-scripts/candela_base_class.py
new file mode 100644
index 000000000..7edbd16f4
--- /dev/null
+++ b/py-scripts/candela_base_class.py
@@ -0,0 +1,9894 @@
+import asyncio
+import importlib
+import datetime
+from datetime import datetime, timedelta
+import time
+import requests
+# echo Performing POST cleanup of browser processes... & taskkill /F /IM chrome.exe /T >nul 2>&1 & taskkill /F /IM chromedriver.exe /T >nul 2>&1 & echo Browser processes terminated.
+# cmd /c "echo Performing POST cleanup of browser processes... && taskkill /F /IM chrome.exe /T >nul 2>&1 && taskkill /F /IM chromedriver.exe /T >nul 2>&1 && echo Browser processes terminated."
+import paramiko
+import threading
+import logging
+from lf_graph import lf_bar_graph_horizontal,lf_bar_graph,lf_line_graph
+import pandas as pd
+from lf_base_interop_profile import RealDevice
+from lf_ftp import FtpTest
+import lf_webpage as http_test
+import multiprocessing
+import lf_interop_qos as qos_test
+import lf_interop_ping as ping_test
+from lf_interop_throughput import Throughput
+from lf_interop_video_streaming import VideoStreamingTest
+# from lf_interop_real_browser_test import RealBrowserTest
+from test_l3 import L3VariableTime,change_port_to_ip,configure_reporting,query_real_clients,valid_endp_types
+from lf_kpi_csv import lf_kpi_csv
+import lf_cleanup
+import os
+import sys
+lf_kpi_csv = importlib.import_module("py-scripts.lf_kpi_csv")
+import argparse
+import json
+import traceback
+from types import SimpleNamespace
+import matplotlib
+import csv
+import matplotlib.pyplot as plt
+from pathlib import Path
+realm = importlib.import_module("py-json.realm")
+Realm = realm.Realm
+error_logs = ""
+# objj = "obj"
+test_results_df = pd.DataFrame(columns=['test_name', 'status'])
+matplotlib.use('Agg') # Before importing pyplot
+base_path = os.getcwd()
+print('base path',base_path)
+sys.path.insert(0, os.path.join(base_path, 'py-json')) # for interop_connectivity, LANforge
+sys.path.insert(0, os.path.join(base_path, 'py-json', 'LANforge')) # for LFUtils
+sys.path.insert(0, os.path.join(base_path, 'py-scripts')) # for lf_logger_config
+througput_test=importlib.import_module("py-scripts.lf_interop_throughput")
+video_streaming_test=importlib.import_module("py-scripts.lf_interop_video_streaming")
+web_browser_test=importlib.import_module("py-scripts.real_application_tests.real_browser.lf_interop_real_browser_test")
+zoom_test=importlib.import_module("py-scripts.real_application_tests.zoom_automation.lf_interop_zoom")
+yt_test=importlib.import_module("py-scripts.real_application_tests.youtube.lf_interop_youtube")
+lf_report_pdf = importlib.import_module("py-scripts.lf_report")
+lf_logger_config = importlib.import_module("py-scripts.lf_logger_config")
+logger = logging.getLogger(__name__)
+RealBrowserTest = getattr(web_browser_test, "RealBrowserTest")
+Youtube = getattr(yt_test, "Youtube")
+ZoomAutomation = getattr(zoom_test, "ZoomAutomation")
+DeviceConfig=importlib.import_module("py-scripts.DeviceConfig")
+# from py_scripts import lf_logger_config, interop_connectivity
+from lf_interop_ping import Ping
+# from LANforge.LFUtils import LFUtils
+import sys
+import os
+from multiprocessing import Manager
+manager = Manager()
+test_results_list = manager.list()
+# BASE PATH: /home/sidartha/project/lanforge-scripts
+# base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+
+# # Add py-json and LANforge to sys.path
+# sys.path.insert(0, os.path.join(base_path, 'py-json')) # for interop_connectivity
+# sys.path.insert(0, os.path.join(base_path, 'py-json', 'LANforge')) # for LFUtils
+# sys.path.insert(0, os.path.join(base_path, 'py-scripts')) # for lf_logger_config
+
+# import LFUtils
+# import lf_logger_config
+# import interop_connectivity
+if 'py-json' not in sys.path:
+ sys.path.append(os.path.join(os.path.abspath('..'), 'py-json'))
+
+if 'py-scripts' not in sys.path:
+ sys.path.append('/home/lanforge/lanforge-scripts/py-scripts')
+lf_report = importlib.import_module("py-scripts.lf_report")
+from station_profile import StationProfile
+import interop_connectivity
+from LANforge import LFUtils
+class Candela(Realm):
+ """
+ Candela Class file to invoke different scripts from py-scripts.
+ """
+
+ def __init__(self, ip='localhost', port=8080,order_priority="series"):
+ """
+ Constructor to initialize the LANforge IP and port
+ Args:
+ ip (str, optional): LANforge IP. Defaults to 'localhost'.
+ port (int, optional): LANforge port. Defaults to 8080.
+ """
+ super().__init__(lfclient_host=ip,
+ lfclient_port=port)
+ self.lanforge_ip = ip
+ self.port = port
+ self.api_url = 'http://{}:{}'.format(self.lanforge_ip, self.port)
+ self.cleanup = lf_cleanup.lf_clean(host=self.lanforge_ip, port=self.port, resource='all')
+ self.ftp_test = None
+ self.http_test = None
+ self.generic_endps_profile = self.new_generic_endp_profile()
+ self.iterations_before_test_stopped_by_user=None
+ self.incremental_capacity_list=None
+ self.all_dataframes=None
+ self.to_run_cxs_len=None
+ self.date=None
+ self.test_setup_info=None
+ self.individual_df=None
+ self.cx_order_list=None
+ self.dataset2=None
+ self.dataset = None
+ self.lis = None
+ self.bands = None
+ self.total_urls = None
+ self.uc_min_value = None
+ self.cx_order_list = None
+ self.gave_incremental=None
+ self.result_path = os.getcwd()
+ self.test_count_dict = {}
+ self.current_exec = "series"
+ self.order_priority = order_priority
+ self.obj_dict = {}
+ self.http_obj_dict = {"parallel":{},"series":{}}
+ self.ftp_obj_dict = {"parallel":{},"series":{}}
+ self.thput_obj_dict = {"parallel":{},"series":{}}
+ self.qos_obj_dict = {"parallel":{},"series":{}}
+ self.ping_obj_dict = {"parallel":{},"series":{}}
+ self.mcast_obj_dict = {"parallel":{},"series":{}}
+ self.rb_obj_dict = {"parallel":{},"series":{}}
+ self.yt_obj_dict = {"parallel":{},"series":{}}
+ self.zoom_obj_dict = {"parallel":{},"series":{}}
+ self.vs_obj_dict = {"parallel":{},"series":{}}
+ self.rb_obj_dict = manager.dict({
+ "parallel": manager.dict(),
+ "series": manager.dict()
+ })
+ # self.rb_obj_dict = manager.dict({
+ # "parallel": manager.dict(),
+ # "series": manager.dict()
+ # })
+ # self.rb_pipe_dict = {"parallel":{},"series":{}}
+ # self.yt_obj_dict = manager.dict({"parallel": {}, "series": {}})
+ # self.zoom_obj_dict = manager.dict({"parallel": {}, "series": {}})
+ self.parallel_connect = {}
+ self.series_connect = {}
+ self.parallel_index = 0
+ self.series_index = 0
+
+ def api_get(self, endp: str):
+ """
+ Sends a GET request to fetch data
+
+ Args:
+ endp (str): API endpoint
+
+ Returns:
+ response: response code for the request
+ data: data returned in the response
+ """
+ if endp[0] != '/':
+ endp = '/' + endp
+ response = requests.get(url=self.api_url + endp)
+ data = response.json()
+ return response, data
+
+ def api_post(self, endp: str, payload: dict):
+ """
+ Sends POST request
+
+ Args:
+ endp (str): API endpoint
+ payload (dict): Endpoint data in JSON format
+
+ Returns:
+ response: response code for the request
+ None if endpoint is invalid
+ """
+ if endp == '' or endp is None:
+ logger.info('Invalid endpoint specified.')
+ return False
+ if endp[0] != '/':
+ endp = '/' + endp
+ response = requests.post(url=self.api_url + endp, json=payload)
+ return response
+
+ def port_clean_up(self,port_no):
+ print('port cleanup......')
+ time.sleep(5)
+ hostname = self.lanforge_ip
+ username = "root"
+ password = "lanforge"
+ ports = []
+ ports.append(port_no)
+ # ssh = paramiko.SSHClient()
+ # ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ # ssh.connect(hostname, username=username, password=password)
+
+ # for cmd in commands:
+ # print(f"--- Running: {cmd} ---")
+ # stdin, stdout, stderr = ssh.exec_command(cmd)
+ # print("Output:\n", stdout.read().decode())
+ # print("Errors:\n", stderr.read().decode())
+ # ssh.close()
+
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ ssh.connect(hostname, username=username, password=password)
+
+ # for port in ports:
+ # print(f"\n--- Checking port {port} ---")
+
+ # try:
+ # check_cmd = f"lsof -i :{port}"
+ # stdin, stdout, stderr = ssh.exec_command(check_cmd, timeout=10) # ⬅ timeout added
+ # output = stdout.read().decode().strip()
+ # error = stderr.read().decode().strip()
+
+ # if output:
+ # print(f"Processes using port {port}:\n{output}")
+
+ # # kill_cmd = f"fuser -kv {port}/tcp"
+ # # kill_cmd = f"fuser -k {port}/tcp"
+ # kill_cmd = f"fuser -k {port}/tcp || true"
+ # stdin, stdout, stderr = ssh.exec_command(kill_cmd, timeout=10)
+ # print("Kill Output:\n", stdout.read().decode())
+ # print("Kill Errors:\n", stderr.read().decode())
+ # else:
+ # print(f"No process found on port {port}")
+
+ # except Exception as e:
+ # print(f"Error checking port {port}: {e}")
+
+ for port in ports:
+ print(f"\n--- Checking port {port} ---")
+
+ try:
+ # Get only the PIDs of processes using this port
+ check_cmd = f"lsof -t -i:{port}"
+ stdin, stdout, stderr = ssh.exec_command(check_cmd, timeout=10)
+ pids = stdout.read().decode().strip().splitlines()
+
+ if pids:
+ print(f"Processes using port {port}: {', '.join(pids)}")
+
+ # Kill each PID safely
+ for pid in pids:
+ kill_cmd = f"kill -9 {pid}"
+ ssh.exec_command(kill_cmd, timeout=10)
+ print(f"Killed PID {pid} on port {port}")
+ else:
+ print(f"No process found on port {port}")
+
+ except Exception as e:
+ print(f"Error checking port {port}: {e}")
+
+ ssh.close()
+
+
+ def misc_clean_up(self,layer3=False,layer4=False,generic=False,port_5000=False,port_5002=False,port_5003=False):
+ """
+ Use for the cleanup of cross connections
+ arguments:
+ layer3: (Boolean : optional) Default : False To Delete all layer3 connections
+ layer4: (Boolean : optional) Default : False To Delete all layer4 connections
+ """
+ if layer3:
+ self.cleanup.cxs_clean()
+ self.cleanup.layer3_endp_clean()
+ if layer4:
+ self.cleanup.layer4_endp_clean()
+ if generic:
+ resp = self.json_get('/generic?fields=name')
+ if 'endpoints' in resp:
+ for i in resp['endpoints']:
+ if list(i.values())[0]['name']:
+ self.generic_endps_profile.created_cx.append('CX_' + list(i.values())[0]['name'])
+ self.generic_endps_profile.created_endp.append(list(i.values())[0]['name'])
+ self.generic_endps_profile.cleanup()
+ # if port_5000 or port_5002 or port_5003:
+ # print('port cleanup......')
+ # time.sleep(5)
+ # hostname = self.lanforge_ip
+ # username = "root"
+ # password = "lanforge"
+ # ports = []
+ # if port_5003:
+ # ports.append(5003)
+ # if port_5000:
+ # ports.append(5000)
+ # if port_5002:
+ # ports.append(5002)
+ # # ssh = paramiko.SSHClient()
+ # # ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ # # ssh.connect(hostname, username=username, password=password)
+
+ # # for cmd in commands:
+ # # print(f"--- Running: {cmd} ---")
+ # # stdin, stdout, stderr = ssh.exec_command(cmd)
+ # # print("Output:\n", stdout.read().decode())
+ # # print("Errors:\n", stderr.read().decode())
+ # # ssh.close()
+
+ # ssh = paramiko.SSHClient()
+ # ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ # ssh.connect(hostname, username=username, password=password)
+
+ # # for port in ports:
+ # # print(f"\n--- Checking port {port} ---")
+
+ # # try:
+ # # check_cmd = f"lsof -i :{port}"
+ # # stdin, stdout, stderr = ssh.exec_command(check_cmd, timeout=10) # ⬅ timeout added
+ # # output = stdout.read().decode().strip()
+ # # error = stderr.read().decode().strip()
+
+ # # if output:
+ # # print(f"Processes using port {port}:\n{output}")
+
+ # # # kill_cmd = f"fuser -kv {port}/tcp"
+ # # # kill_cmd = f"fuser -k {port}/tcp"
+ # # kill_cmd = f"fuser -k {port}/tcp || true"
+ # # stdin, stdout, stderr = ssh.exec_command(kill_cmd, timeout=10)
+ # # print("Kill Output:\n", stdout.read().decode())
+ # # print("Kill Errors:\n", stderr.read().decode())
+ # # else:
+ # # print(f"No process found on port {port}")
+
+ # # except Exception as e:
+ # # print(f"Error checking port {port}: {e}")
+
+ # for port in ports:
+ # print(f"\n--- Checking port {port} ---")
+
+ # try:
+ # # Get only the PIDs of processes using this port
+ # check_cmd = f"lsof -t -i:{port}"
+ # stdin, stdout, stderr = ssh.exec_command(check_cmd, timeout=10)
+ # pids = stdout.read().decode().strip().splitlines()
+
+ # if pids:
+ # print(f"Processes using port {port}: {', '.join(pids)}")
+
+ # # Kill each PID safely
+ # for pid in pids:
+ # kill_cmd = f"kill -9 {pid}"
+ # ssh.exec_command(kill_cmd, timeout=10)
+ # print(f"Killed PID {pid} on port {port}")
+ # else:
+ # print(f"No process found on port {port}")
+
+ # except Exception as e:
+ # print(f"Error checking port {port}: {e}")
+
+ # ssh.close()
+
+ def get_device_info(self):
+ """
+ Fetches all the real devices clustered to the LANforge
+
+ Returns:
+ interop_tab_response: if invalid response code. Response code other than 200.
+ all_devices (dict): returns both the port data and resource mgr data with shelf.resource as the key
+ """
+ androids, linux, macbooks, windows, iOS = [], [], [], [], []
+ all_devices = {}
+
+ # querying resource manager tab for fetching laptops data
+ resource_manager_tab_response, resource_manager_data = self.api_get(
+ endp='/resource/all')
+ if resource_manager_tab_response.status_code != 200:
+ logger.info('Error fetching the data with the {}. Returned {}'.format(
+ '/resources/all', resource_manager_tab_response))
+ return resource_manager_tab_response
+ resources_list = [resource_manager_data['resource']
+ if 'resource' in resource_manager_data else resource_manager_data['resources']][0]
+ for resource in resources_list:
+ resource_port, resource_data = list(resource.keys())[
+ 0], list(resource.values())[0]
+ if resource_data['phantom']:
+ continue
+ if resource_data['ct-kernel'] is False:
+ if resource_data['app-id'] == '0':
+ if 'Win' in resource_data['hw version']:
+ windows.append(resource_data)
+ elif 'Apple' in resource_data['hw version']:
+ macbooks.append(resource_data)
+ elif 'Linux' in resource_data['hw version']:
+ linux.append(resource_data)
+ else:
+ if 'Apple' in resource_data['hw version']:
+ iOS.append(resource_data)
+ else:
+ androids.append(resource_data)
+ all_devices[resource_port] = resource_data
+ shelf, resource = resource_port.split('.')
+ _, port_data = self.api_get(endp='/port/{}/{}'.format(shelf, resource))
+ if 'interface' in port_data.keys():
+ port_data['interfaces'] = [port_data['interface']]
+ for port_id in port_data['interfaces']:
+ port_id_values = list(port_id.values())[0]
+ _, all_columns = self.api_get(endp=port_id_values['_links'])
+ all_columns = all_columns['interface']
+ if all_columns['parent dev'] == 'wiphy0':
+ all_devices[resource_port].update(all_columns)
+ return all_devices
+
+ def get_client_connection_details(self, device_list: list):
+ """
+ Method to return SSID, BSSID and Signal Strength details of the ports mentioned in the device list argument.
+
+ Args:
+ device_list (list): List of all the ports. E.g., ['1.10.wlan0', '1.11.wlan0']
+
+ Returns:
+ connection_details (dict): Dictionary containing port number as the key and SSID, BSSID, Signal as the values for each device in the device_list.
+ """
+ connection_details = {}
+ for device in device_list:
+ shelf, resource, port_name = device.split('.')
+ _, device_data = self.api_get('/port/{}/{}/{}?fields=phantom,down,ssid,ap,signal,mac'.format(shelf, resource, port_name))
+ device_data = device_data['interface']
+ if device_data['phantom'] or device_data['down']:
+ print('{} is in phantom state or down state, data may not be accurate.'.format(device))
+ connection_details[device] = device_data
+ return connection_details
+
+ def filter_iOS_devices(self, device_list):
+ modified_device_list = device_list
+ if type(device_list) is str:
+ modified_device_list = device_list.split(',')
+ filtered_list = []
+ for device in modified_device_list:
+ if device.count('.') == 1:
+ shelf, resource = device.split('.')
+ elif device.count('.') == 2:
+ shelf, resource, port = device.split('.')
+ elif device.count('.') == 0:
+ shelf, resource = 1, device
+ response_code, device_data = self.api_get('/resource/{}/{}'.format(shelf, resource))
+ if 'status' in device_data and device_data['status'] == 'NOT_FOUND':
+ print('Device {} is not found.'.format(device))
+ continue
+ device_data = device_data['resource']
+ # print(device_data)
+ if 'Apple' in device_data['hw version'] and (device_data['app-id'] != '') and (device_data['app-id'] != '0' or device_data['kernel'] == ''):
+ print('{} is an iOS device. Currently we do not support iOS devices.'.format(device))
+ else:
+ filtered_list.append(device)
+ if type(device_list) is str:
+ filtered_list = ','.join(filtered_list)
+ return filtered_list
+
+ def render_overall_report(self,test_name=""):
+ if test_name == "http_test":
+ if test_name not in self.test_count_dict:
+ self.test_count_dict[test_name] = 1
+
+
+
+ def run_ping_test(
+ self,
+ target: str = '1.1.eth1',
+ ping_interval: str = '1',
+ ping_duration: float = 1,
+ ssid: str = None,
+ mgr_passwd: str = 'lanforge',
+ server_ip: str = None,
+ security: str = 'open',
+ passwd: str = '[BLANK]',
+ virtual: bool = False,
+ num_sta: int = 1,
+ radio: str = None,
+ real: bool = True,
+ use_default_config: bool = True,
+ debug: bool = False,
+ local_lf_report_dir: str = "",
+ log_level: str = None,
+ lf_logger_config_json: str = None,
+ help_summary: bool = False,
+ group_name: str = None,
+ profile_name: str = None,
+ file_name: str = None,
+ eap_method: str = 'DEFAULT',
+ eap_identity: str = '',
+ ieee8021x: bool = False,
+ ieee80211u: bool = False,
+ ieee80211w: int = 1,
+ enable_pkc: bool = False,
+ bss_transition: bool = False,
+ power_save: bool = False,
+ disable_ofdma: bool = False,
+ roam_ft_ds: bool = False,
+ key_management: str = 'DEFAULT',
+ pairwise: str = '[BLANK]',
+ private_key: str = '[BLANK]',
+ ca_cert: str = '[BLANK]',
+ client_cert: str = '[BLANK]',
+ pk_passwd: str = '[BLANK]',
+ pac_file: str = '[BLANK]',
+ expected_passfail_value: str = None,
+ device_csv_name: str = None,
+ wait_time: int = 60,
+ dev_list: str = None
+ ):
+
+ # set the logger level to debug
+ logger_config = lf_logger_config.lf_logger_config()
+
+ if log_level:
+ logger_config.set_level(level=log_level)
+
+ if lf_logger_config_json:
+ # logger_config.lf_logger_config_json = "lf_logger_config.json"
+ logger_config.lf_logger_config_json = lf_logger_config_json
+ logger_config.load_lf_logger_config()
+ # validate_args(args)
+
+ mgr_ip = self.lanforge_ip
+ mgr_password = mgr_passwd
+ mgr_port = self.port
+ server_ip = server_ip
+ ssid = ssid
+ security = security
+ password = passwd
+ num_sta = num_sta
+ radio = radio
+ target = target
+ interval = ping_interval
+ duration = ping_duration
+ configure = not use_default_config
+ debug = debug
+ group_name = group_name
+ file_name = file_name
+ profile_name = profile_name
+ eap_method = eap_method
+ eap_identity = eap_identity
+ ieee80211 = ieee8021x
+ ieee80211u = ieee80211u
+ ieee80211w = ieee80211w
+ enable_pkc = enable_pkc
+ bss_transition = bss_transition
+ power_save = power_save
+ disable_ofdma = disable_ofdma
+ roam_ft_ds = roam_ft_ds
+ key_management = key_management
+ pairwise = pairwise
+ private_key = private_key
+ ca_cert = ca_cert
+ client_cert = client_cert
+ pk_passwd = pk_passwd
+ pac_file = pac_file
+
+ if (debug):
+ print('''Specified configuration:
+ ip: {}
+ port: {}
+ ssid: {}
+ security: {}
+ password: {}
+ target: {}
+ Ping interval: {}
+ Packet Duration (in min): {}
+ virtual: {}
+ num of virtual stations: {}
+ radio: {}
+ real: {}
+ debug: {}
+ '''.format(mgr_ip, mgr_port, ssid, security, password, target, interval, duration, virtual, num_sta, radio, real, debug))
+
+ ce = self.current_exec #seires
+ if ce == "parallel":
+ obj_name = "ping_test"
+ else:
+ obj_no = 1
+ while f"ping_test_{obj_no}" in self.ping_obj_dict[ce]:
+ obj_no+=1
+ obj_name = f"ping_test_{obj_no}"
+ self.ping_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+ # ping object creation
+ self.ping_obj_dict[ce][obj_name]["obj"] = Ping(host=mgr_ip, port=mgr_port, ssid=ssid, security=security, password=password, radio=radio,
+ lanforge_password=mgr_password, target=target, interval=interval, sta_list=[], virtual=virtual, real=real, duration=duration, debug=debug, csv_name=device_csv_name,
+ expected_passfail_val=expected_passfail_value, wait_time=wait_time, group_name=group_name)
+
+ # changing the target from port to IP
+ self.ping_obj_dict[ce][obj_name]["obj"].change_target_to_ip()
+
+ # creating virtual stations if --virtual flag is specified
+ if (virtual):
+
+ logging.info('Proceeding to create {} virtual stations on {}'.format(num_sta, radio))
+ station_list = LFUtils.portNameSeries(
+ prefix_='sta', start_id_=0, end_id_=num_sta - 1, padding_number_=100000, radio=radio)
+ self.ping_obj_dict[ce][obj_name]["obj"].sta_list = station_list
+ if (debug):
+ logging.info('Virtual Stations: {}'.format(station_list).replace(
+ '[', '').replace(']', '').replace('\'', ''))
+
+ # selecting real clients if --real flag is specified
+ if (real):
+ Devices = RealDevice(manager_ip=mgr_ip, selected_bands=[])
+ Devices.get_devices()
+ self.ping_obj_dict[ce][obj_name]["obj"].Devices = Devices
+ # self.ping_obj_dict[ce][obj_name]["obj"].select_real_devices(real_devices=Devices)
+ # If config is True, attempt to bring up all devices in the list and perform tests on those that become active
+ if (configure):
+ config_devices = {}
+ obj = DeviceConfig.DeviceConfig(lanforge_ip=mgr_ip, file_name=file_name, wait_time=wait_time)
+ # Case 1: Group name, file name, and profile name are provided
+ if group_name and file_name and profile_name:
+ selected_groups = group_name.split(',')
+ selected_profiles = profile_name.split(',')
+ for i in range(len(selected_groups)):
+ config_devices[selected_groups[i]] = selected_profiles[i]
+ obj.initiate_group()
+ group_device_map = obj.get_groups_devices(data=selected_groups, groupdevmap=True)
+ # Configure devices in the selected group with the selected profile
+ eid_list = asyncio.run(obj.connectivity(config=config_devices, upstream=server_ip))
+ Devices.get_devices()
+ self.ping_obj_dict[ce][obj_name]["obj"].select_real_devices(real_devices=Devices, device_list=eid_list)
+ # Case 2: Device list is empty but config flag is True — prompt the user to input device details for configuration
+ else:
+ all_devices = obj.get_all_devices()
+ device_list = []
+ config_dict = {
+ 'ssid': ssid,
+ 'passwd': password,
+ 'enc': security,
+ 'eap_method': eap_method,
+ 'eap_identity': eap_identity,
+ 'ieee80211': ieee80211,
+ 'ieee80211u': ieee80211u,
+ 'ieee80211w': ieee80211w,
+ 'enable_pkc': enable_pkc,
+ 'bss_transition': bss_transition,
+ 'power_save': power_save,
+ 'disable_ofdma': disable_ofdma,
+ 'roam_ft_ds': roam_ft_ds,
+ 'key_management': key_management,
+ 'pairwise': pairwise,
+ 'private_key': private_key,
+ 'ca_cert': ca_cert,
+ 'client_cert': client_cert,
+ 'pk_passwd': pk_passwd,
+ 'pac_file': pac_file,
+ 'server_ip': server_ip,
+ }
+ for device in all_devices:
+ if device["type"] == 'laptop':
+ device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["hostname"])
+ else:
+ device_list.append(device["eid"] + " " + device["serial"])
+ logger.info(f"Available devices: {device_list}")
+ if dev_list is None:
+ dev_list = input("Enter the desired resources to run the test:")
+ dev_list = dev_list.split(',')
+ dev_list = asyncio.run(obj.connectivity(device_list=dev_list, wifi_config=config_dict))
+ Devices.get_devices()
+ self.ping_obj_dict[ce][obj_name]["obj"].select_real_devices(real_devices=Devices, device_list=dev_list)
+ # Case 3: Config is False, no device list is provided, and no group is selected
+ # Prompt the user to manually input devices for running the test
+ else:
+ device_list = self.ping_obj_dict[ce][obj_name]["obj"].Devices.get_devices()
+ logger.info(f"Available devices: {device_list}")
+ if dev_list is None:
+ dev_list = input("Enter the desired resources to run the test:")
+ dev_list = dev_list.split(',')
+ # dev_list = input("Enter the desired resources to run the test:").split(',')
+ self.ping_obj_dict[ce][obj_name]["obj"].select_real_devices(real_devices=Devices, device_list=dev_list)
+
+ # station precleanup
+ self.ping_obj_dict[ce][obj_name]["obj"].cleanup() #11 change
+
+ # building station if virtual
+ if (virtual):
+ self.ping_obj_dict[ce][obj_name]["obj"].buildstation()
+
+ # check if generic tab is enabled or not
+ if (not self.ping_obj_dict[ce][obj_name]["obj"].check_tab_exists()):
+ logging.error('Generic Tab is not available.\nAborting the test.')
+ return False
+
+ self.ping_obj_dict[ce][obj_name]["obj"].sta_list += self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list
+
+ # creating generic endpoints
+ self.ping_obj_dict[ce][obj_name]["obj"].create_generic_endp()
+
+ logging.info(self.ping_obj_dict[ce][obj_name]["obj"].generic_endps_profile.created_cx)
+
+ # run the test for the given duration
+ logging.info('Running the ping test for {} minutes'.format(duration))
+
+ # start generate endpoint
+ self.ping_obj_dict[ce][obj_name]["obj"].start_generic()
+ # time_counter = 0
+ ports_data_dict = self.ping_obj_dict[ce][obj_name]["obj"].json_get('/ports/all/')['interfaces']
+ ports_data = {}
+ for ports in ports_data_dict:
+ port, port_data = list(ports.keys())[0], list(ports.values())[0]
+ ports_data[port] = port_data
+
+ time.sleep(duration * 60)
+
+ logging.info('Stopping the test')
+ self.ping_obj_dict[ce][obj_name]["obj"].stop_generic()
+
+ result_data = self.ping_obj_dict[ce][obj_name]["obj"].get_results()
+ # logging.info(result_data)
+ logging.info(self.ping_obj_dict[ce][obj_name]["obj"].result_json)
+ if (virtual):
+ ports_data_dict = self.ping_obj_dict[ce][obj_name]["obj"].json_get('/ports/all/')['interfaces']
+ ports_data = {}
+ for ports in ports_data_dict:
+ port, port_data = list(ports.keys())[0], list(ports.values())[0]
+ ports_data[port] = port_data
+ if (isinstance(result_data, dict)):
+ for station in self.ping_obj_dict[ce][obj_name]["obj"].sta_list:
+ if (station not in self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list):
+ current_device_data = ports_data[station]
+ if (station.split('.')[2] in result_data['name']):
+ try:
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station] = {
+ 'command': result_data['command'],
+ 'sent': result_data['tx pkts'],
+ 'recv': result_data['rx pkts'],
+ 'dropped': result_data['dropped'],
+ 'min_rtt': [result_data['last results'].split('\n')[-2].split()[-1].split('/')[0] if len(result_data['last results']) != 0 and 'min/avg/max' in result_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'avg_rtt': [result_data['last results'].split('\n')[-2].split()[-1].split('/')[1] if len(result_data['last results']) != 0 and 'min/avg/max' in result_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'max_rtt': [result_data['last results'].split('\n')[-2].split()[-1].split('/')[2] if len(result_data['last results']) != 0 and 'min/avg/max' in result_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'mac': current_device_data['mac'],
+ 'channel': current_device_data['channel'],
+ 'ssid': current_device_data['ssid'],
+ 'mode': current_device_data['mode'],
+ 'name': station,
+ 'os': 'Virtual',
+ 'remarks': [],
+ 'last_result': [result_data['last results'].split('\n')[-2] if len(result_data['last results']) != 0 else ""][0]
+ }
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station]['remarks'] = self.ping_obj_dict[ce][obj_name]["obj"].generate_remarks(self.ping_obj_dict[ce][obj_name]["obj"].result_json[station])
+ except BaseException:
+ logging.error('Failed parsing the result for the station {}'.format(station))
+
+ else:
+ for station in self.ping_obj_dict[ce][obj_name]["obj"].sta_list:
+ if (station not in self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list):
+ current_device_data = ports_data[station]
+ for ping_device in result_data:
+ ping_endp, ping_data = list(ping_device.keys())[
+ 0], list(ping_device.values())[0]
+ if (station.split('.')[2] in ping_endp):
+ try:
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station] = {
+ 'command': ping_data['command'],
+ 'sent': ping_data['tx pkts'],
+ 'recv': ping_data['rx pkts'],
+ 'dropped': ping_data['dropped'],
+ 'min_rtt': [ping_data['last results'].split('\n')[-2].split()[-1].split('/')[0] if len(ping_data['last results']) != 0 and 'min/avg/max' in ping_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'avg_rtt': [ping_data['last results'].split('\n')[-2].split()[-1].split('/')[1] if len(ping_data['last results']) != 0 and 'min/avg/max' in ping_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'max_rtt': [ping_data['last results'].split('\n')[-2].split()[-1].split('/')[2] if len(ping_data['last results']) != 0 and 'min/avg/max' in ping_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'mac': current_device_data['mac'],
+ 'ssid': current_device_data['ssid'],
+ 'channel': current_device_data['channel'],
+ 'mode': current_device_data['mode'],
+ 'name': station,
+ 'os': 'Virtual',
+ 'remarks': [],
+ 'last_result': [ping_data['last results'].split('\n')[-2] if len(ping_data['last results']) != 0 else ""][0]
+ }
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station]['remarks'] = self.ping_obj_dict[ce][obj_name]["obj"].generate_remarks(self.ping_obj_dict[ce][obj_name]["obj"].result_json[station])
+ except BaseException:
+ logging.error('Failed parsing the result for the station {}'.format(station))
+
+ if (real):
+ if (isinstance(result_data, dict)):
+ for station in self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list:
+ current_device_data = Devices.devices_data[station]
+ # logging.info(current_device_data)
+ if (station in result_data['name']):
+ try:
+ # logging.info(result_data['last results'].split('\n'))
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station] = {
+ 'command': result_data['command'],
+ 'sent': result_data['tx pkts'],
+ 'recv': result_data['rx pkts'],
+ 'dropped': result_data['dropped'],
+ 'min_rtt': [result_data['last results'].split('\n')[-2].split()[-1].split(':')[-1].split('/')[0] if len(result_data['last results']) != 0 and 'min/avg/max' in result_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'avg_rtt': [result_data['last results'].split('\n')[-2].split()[-1].split(':')[-1].split('/')[1] if len(result_data['last results']) != 0 and 'min/avg/max' in result_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'max_rtt': [result_data['last results'].split('\n')[-2].split()[-1].split(':')[-1].split('/')[2] if len(result_data['last results']) != 0 and 'min/avg/max' in result_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'mac': current_device_data['mac'],
+ 'ssid': current_device_data['ssid'],
+ 'channel': current_device_data['channel'],
+ 'mode': current_device_data['mode'],
+ 'name': [current_device_data['user'] if current_device_data['user'] != '' else current_device_data['hostname']][0],
+ 'os': ['Windows' if 'Win' in current_device_data['hw version'] else 'Linux' if 'Linux' in current_device_data['hw version'] else 'Mac' if 'Apple' in current_device_data['hw version'] else 'Android'][0], # noqa E501
+ 'remarks': [],
+ 'last_result': [result_data['last results'].split('\n')[-2] if len(result_data['last results']) != 0 else ""][0]
+ }
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station]['remarks'] = self.ping_obj_dict[ce][obj_name]["obj"].generate_remarks(self.ping_obj_dict[ce][obj_name]["obj"].result_json[station])
+ except BaseException:
+ logging.error('Failed parsing the result for the station {}'.format(station))
+ else:
+ for station in self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list:
+ current_device_data = Devices.devices_data[station]
+ for ping_device in result_data:
+ ping_endp, ping_data = list(ping_device.keys())[
+ 0], list(ping_device.values())[0]
+ if (station in ping_endp):
+ try:
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station] = {
+ 'command': ping_data['command'],
+ 'sent': ping_data['tx pkts'],
+ 'recv': ping_data['rx pkts'],
+ 'dropped': ping_data['dropped'],
+ 'min_rtt': [ping_data['last results'].split('\n')[-2].split()[-1].split(':')[-1].split('/')[0] if len(ping_data['last results']) != 0 and 'min/avg/max' in ping_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'avg_rtt': [ping_data['last results'].split('\n')[-2].split()[-1].split(':')[-1].split('/')[1] if len(ping_data['last results']) != 0 and 'min/avg/max' in ping_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'max_rtt': [ping_data['last results'].split('\n')[-2].split()[-1].split(':')[-1].split('/')[2] if len(ping_data['last results']) != 0 and 'min/avg/max' in ping_data['last results'].split('\n')[-2] else '0'][0], # noqa E501
+ 'mac': current_device_data['mac'],
+ 'ssid': current_device_data['ssid'],
+ 'channel': current_device_data['channel'],
+ 'mode': current_device_data['mode'],
+ 'name': [current_device_data['user'] if current_device_data['user'] != '' else current_device_data['hostname']][0],
+ 'os': ['Windows' if 'Win' in current_device_data['hw version'] else 'Linux' if 'Linux' in current_device_data['hw version'] else 'Mac' if 'Apple' in current_device_data['hw version'] else 'Android'][0], # noqa E501
+ 'remarks': [],
+ 'last_result': [ping_data['last results'].split('\n')[-2] if len(ping_data['last results']) != 0 else ""][0]
+ }
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json[station]['remarks'] = self.ping_obj_dict[ce][obj_name]["obj"].generate_remarks(self.ping_obj_dict[ce][obj_name]["obj"].result_json[station])
+ except BaseException:
+ logging.error('Failed parsing the result for the station {}'.format(station))
+
+ logging.info(self.ping_obj_dict[ce][obj_name]["obj"].result_json)
+
+ # station post cleanup
+ self.ping_obj_dict[ce][obj_name]["obj"].cleanup() #12 change
+
+ if local_lf_report_dir == "":
+ # Report generation when groups are specified but no custom report path is provided
+ if group_name:
+ self.ping_obj_dict[ce][obj_name]["obj"].generate_report(config_devices=config_devices, group_device_map=group_device_map)
+ # Report generation when no group is specified and no custom report path is provided
+ else:
+ self.ping_obj_dict[ce][obj_name]["obj"].generate_report()
+ else:
+ # Report generation when groups are specified and a custom report path is provided
+ if group_name:
+ self.ping_obj_dict[ce][obj_name]["obj"].generate_report(config_devices=config_devices, group_device_map=group_device_map, report_path=local_lf_report_dir)
+ # Report generation when no group is specified but a custom report path is provided
+ else:
+ self.ping_obj_dict[ce][obj_name]["obj"].generate_report(report_path=local_lf_report_dir)
+ params = {
+ "result_json": None,
+ "result_dir": "Ping_Test_Report",
+ "report_path": "",
+ "config_devices": "",
+ "group_device_map": {}
+ }
+
+ if local_lf_report_dir != "":
+ params["report_path"] = local_lf_report_dir
+
+ if group_name:
+ params["config_devices"] = config_devices
+ params["group_device_map"] = group_device_map
+ self.ping_obj_dict[ce][obj_name]["data"] = params.copy()
+ return True
+
+ def run_http_test(
+ self,
+ upstream_port='eth2',
+ num_stations=0,
+ twog_radio='wiphy3',
+ fiveg_radio='wiphy0',
+ sixg_radio='wiphy2',
+ twog_security=None,
+ twog_ssid=None,
+ twog_passwd=None,
+ fiveg_security=None,
+ fiveg_ssid=None,
+ fiveg_passwd=None,
+ sixg_security=None,
+ sixg_ssid=None,
+ sixg_passwd=None,
+ target_per_ten=100,
+ file_size='5MB',
+ bands=["5G", "2.4G", "6G"],
+ duration=None,
+ client_type="Real",
+ threshold_5g="60",
+ threshold_2g="90",
+ threshold_both="50",
+ ap_name="TestAP",
+ lf_username="lanforge",
+ lf_password="lanforge",
+ ssh_port=22,
+ test_rig="",
+ test_tag="",
+ dut_hw_version="",
+ dut_sw_version="",
+ dut_model_num="",
+ dut_serial_num="",
+ test_priority="",
+ test_id="lf_webpage",
+ csv_outfile="",
+ dowebgui=False,
+ result_dir='',
+ device_list=[],
+ test_name=None,
+ get_url_from_file=False,
+ file_path=None,
+ help_summary=False,
+ ssid=None,
+ passwd='',
+ security=None,
+ file_name=None,
+ group_name=None,
+ profile_name=None,
+ eap_method='DEFAULT',
+ eap_identity='',
+ ieee8021x=False,
+ ieee80211u=False,
+ ieee80211w=1,
+ enable_pkc=False,
+ bss_transition=False,
+ power_save=False,
+ disable_ofdma=False,
+ roam_ft_ds=False,
+ key_management='DEFAULT',
+ pairwise='NA',
+ private_key='NA',
+ ca_cert='NA',
+ client_cert='NA',
+ pk_passwd='NA',
+ pac_file='NA',
+ expected_passfail_value=None,
+ device_csv_name=None,
+ wait_time=60,
+ config=False,
+ get_live_view=False,
+ total_floors="0"
+ ):
+
+ bands.sort()
+
+ # Error checking to prevent case issues
+ for band in range(len(bands)):
+ bands[band] = bands[band].upper()
+ if bands[band] == "BOTH":
+ bands[band] = "Both"
+
+ # Error checking for non-existent bands
+ valid_bands = ['2.4G', '5G', '6G', 'Both']
+ for band in bands:
+ if band not in valid_bands:
+ raise ValueError("Invalid band '%s' used in bands argument!" % band)
+
+ # Check for Both being used independently
+ if len(bands) > 1 and "Both" in bands:
+ raise ValueError("'Both' test type must be used independently!")
+
+ # validate_args(args)
+ if duration.endswith('s') or duration.endswith('S'):
+ duration = int(duration[0:-1])
+ elif duration.endswith('m') or duration.endswith('M'):
+ duration = int(duration[0:-1]) * 60
+ elif duration.endswith('h') or duration.endswith('H'):
+ duration = int(duration[0:-1]) * 60 * 60
+ elif duration.endswith(''):
+ duration = int(duration)
+
+ list6G, list6G_bytes, list6G_speed, list6G_urltimes = [], [], [], []
+ list5G, list5G_bytes, list5G_speed, list5G_urltimes = [], [], [], []
+ list2G, list2G_bytes, list2G_speed, list2G_urltimes = [], [], [], []
+ Both, Both_bytes, Both_speed, Both_urltimes = [], [], [], []
+ listReal, listReal_bytes, listReal_speed, listReal_urltimes = [], [], [], [] # For real devices (not band specific)
+ dict_keys = []
+ dict_keys.extend(bands)
+ # print(dict_keys)
+ final_dict = dict.fromkeys(dict_keys)
+ # print(final_dict)
+ dict1_keys = ['dl_time', 'min', 'max', 'avg', 'bytes_rd', 'speed', 'url_times']
+ for i in final_dict:
+ final_dict[i] = dict.fromkeys(dict1_keys)
+ print(final_dict)
+ min6 = []
+ min5 = []
+ min2 = []
+ min_both = []
+ max6 = []
+ max5 = []
+ max2 = []
+ max_both = []
+ avg6 = []
+ avg2 = []
+ avg5 = []
+ avg_both = []
+ port_list, dev_list, macid_list = [], [], []
+ for band in bands:
+ # For real devices while ensuring no blocker for Virtual devices
+ if client_type == 'Real':
+ ssid = ssid
+ passwd = passwd
+ security = security
+ elif band == "2.4G":
+ security = [twog_security]
+ ssid = [twog_ssid]
+ passwd = [twog_passwd]
+ elif band == "5G":
+ security = [fiveg_security]
+ ssid = [fiveg_ssid]
+ passwd = [fiveg_passwd]
+ elif band == "6G":
+ security = [sixg_security]
+ ssid = [sixg_ssid]
+ passwd = [sixg_passwd]
+ elif band == "Both":
+ security = [twog_security, fiveg_security]
+ ssid = [twog_ssid, fiveg_ssid]
+ passwd = [twog_passwd, fiveg_passwd]
+ ce = self.current_exec #seires
+ if ce == "parallel":
+ obj_name = "http_test"
+ else:
+ obj_no = 1
+ while f"http_test_{obj_no}" in self.http_obj_dict[ce]:
+ obj_no+=1
+ obj_name = f"http_test_{obj_no}"
+ self.http_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+
+ self.http_obj_dict[ce][obj_name]["obj"] = http_test.HttpDownload(lfclient_host=self.lanforge_ip, lfclient_port=self.port,
+ upstream=upstream_port, num_sta=num_stations,
+ security=security, ap_name=ap_name,
+ ssid=ssid, password=passwd,
+ target_per_ten=target_per_ten,
+ file_size=file_size, bands=band,
+ twog_radio=twog_radio,
+ fiveg_radio=fiveg_radio,
+ sixg_radio=sixg_radio,
+ client_type=client_type,
+ lf_username=lf_username, lf_password=lf_password,
+ result_dir=result_dir, # FOR WEBGUI
+ dowebgui=dowebgui, # FOR WEBGUI
+ device_list=device_list,
+ test_name=test_name, # FOR WEBGUI
+ get_url_from_file=get_url_from_file,
+ file_path=file_path,
+ file_name=file_name,
+ group_name=group_name,
+ profile_name=profile_name,
+ eap_method=eap_method,
+ eap_identity=eap_identity,
+ ieee80211=ieee8021x,
+ ieee80211u=ieee80211u,
+ ieee80211w=ieee80211w,
+ enable_pkc=enable_pkc,
+ bss_transition=bss_transition,
+ power_save=power_save,
+ disable_ofdma=disable_ofdma,
+ roam_ft_ds=roam_ft_ds,
+ key_management=key_management,
+ pairwise=pairwise,
+ private_key=private_key,
+ ca_cert=ca_cert,
+ client_cert=client_cert,
+ pk_passwd=pk_passwd,
+ pac_file=pac_file,
+ expected_passfail_value=expected_passfail_value,
+ device_csv_name=device_csv_name,
+ wait_time=wait_time,
+ config=config,
+ get_live_view= get_live_view,
+ total_floors = total_floors
+ )
+ if client_type == "Real":
+ if not isinstance(device_list, list):
+ self.http_obj_dict[ce][obj_name]["obj"].device_list = self.http_obj_dict[ce][obj_name]["obj"].filter_iOS_devices(device_list)
+ if len(self.http_obj_dict[ce][obj_name]["obj"].device_list) == 0:
+ logger.info("There are no devices available")
+ return False
+ port_list, dev_list, macid_list, configuration = self.http_obj_dict[ce][obj_name]["obj"].get_real_client_list()
+ if dowebgui and group_name:
+ if len(dev_list) == 0:
+ logger.info("No device is available to run the test")
+ obj = {
+ "status": "Stopped",
+ "configuration_status": "configured"
+ }
+ self.http_obj_dict[ce][obj_name]["obj"].updating_webui_runningjson(obj)
+ return
+ else:
+ obj = {
+ "configured_devices": dev_list,
+ "configuration_status": "configured"
+ }
+ self.http_obj_dict[ce][obj_name]["obj"].updating_webui_runningjson(obj)
+ num_stations = len(port_list)
+ if not get_url_from_file:
+ self.http_obj_dict[ce][obj_name]["obj"].file_create(ssh_port=ssh_port)
+ else:
+ if file_path is None:
+ print("WARNING: Please Specify the path of the file, if you select the --get_url_from_file")
+ return False
+ self.http_obj_dict[ce][obj_name]["obj"].set_values()
+ self.http_obj_dict[ce][obj_name]["obj"].precleanup()
+ self.http_obj_dict[ce][obj_name]["obj"].build()
+ if client_type == 'Real':
+ self.http_obj_dict[ce][obj_name]["obj"].monitor_cx()
+ logger.info(f'Test started on the devices : {self.http_obj_dict[ce][obj_name]["obj"].port_list}')
+ test_time = datetime.now()
+ # Solution For Leap Year conflict changed it to %Y
+ test_time = test_time.strftime("%Y %d %H:%M:%S")
+ print("Test started at ", test_time)
+ self.http_obj_dict[ce][obj_name]["obj"].start()
+ if dowebgui:
+ # FOR WEBGUI, -This fumction is called to fetch the runtime data from layer-4
+ self.http_obj_dict[ce][obj_name]["obj"].monitor_for_runtime_csv(duration)
+ elif client_type == 'Real':
+ # To fetch runtime csv during runtime
+ self.http_obj_dict[ce][obj_name]["obj"].monitor_for_runtime_csv(duration)
+ else:
+ time.sleep(duration)
+ self.http_obj_dict[ce][obj_name]["obj"].stop()
+ # taking self.http_obj_dict[ce][obj_name]["obj"].data, which got updated in the monitor_for_runtime_csv method
+ if client_type == 'Real':
+ uc_avg_val = self.http_obj_dict[ce][obj_name]["obj"].data['uc_avg']
+ url_times = self.http_obj_dict[ce][obj_name]["obj"].data['url_data']
+ rx_bytes_val = self.http_obj_dict[ce][obj_name]["obj"].data['bytes_rd']
+ print('rx_rate_Val',self.http_obj_dict[ce][obj_name]["obj"].data['rx rate (1m)'])
+ rx_rate_val = list(self.http_obj_dict[ce][obj_name]["obj"].data['rx rate (1m)'])
+ else:
+ uc_avg_val = self.http_obj_dict[ce][obj_name]["obj"].my_monitor('uc-avg')
+ url_times = self.http_obj_dict[ce][obj_name]["obj"].my_monitor('total-urls')
+ rx_bytes_val = self.http_obj_dict[ce][obj_name]["obj"].my_monitor('bytes-rd')
+ rx_rate_val = self.http_obj_dict[ce][obj_name]["obj"].my_monitor('rx rate')
+ if dowebgui:
+ self.http_obj_dict[ce][obj_name]["obj"].data_for_webui["url_data"] = url_times # storing the layer-4 url data at the end of test
+ if client_type == 'Real': # for real clients
+ listReal.extend(uc_avg_val)
+ listReal_bytes.extend(rx_bytes_val)
+ listReal_speed.extend(rx_rate_val)
+ listReal_urltimes.extend(url_times)
+ logger.info("%s %s %s", listReal, listReal_bytes, listReal_speed)
+ final_dict[band]['dl_time'] = listReal
+ min2.append(min(listReal))
+ final_dict[band]['min'] = min2
+ max2.append(max(listReal))
+ final_dict[band]['max'] = max2
+ avg2.append((sum(listReal) / num_stations))
+ final_dict[band]['avg'] = avg2
+ final_dict[band]['bytes_rd'] = listReal_bytes
+ final_dict[band]['speed'] = listReal_speed
+ final_dict[band]['url_times'] = listReal_urltimes
+ else:
+ if band == "5G":
+ list5G.extend(uc_avg_val)
+ list5G_bytes.extend(rx_bytes_val)
+ list5G_speed.extend(rx_rate_val)
+ list5G_urltimes.extend(url_times)
+ logger.info("%s %s %s %s", list5G, list5G_bytes, list5G_speed, list5G_urltimes)
+ final_dict['5G']['dl_time'] = list5G
+ min5.append(min(list5G))
+ final_dict['5G']['min'] = min5
+ max5.append(max(list5G))
+ final_dict['5G']['max'] = max5
+ avg5.append((sum(list5G) / num_stations))
+ final_dict['5G']['avg'] = avg5
+ final_dict['5G']['bytes_rd'] = list5G_bytes
+ final_dict['5G']['speed'] = list5G_speed
+ final_dict['5G']['url_times'] = list5G_urltimes
+ elif band == "6G":
+ list6G.extend(uc_avg_val)
+ list6G_bytes.extend(rx_bytes_val)
+ list6G_speed.extend(rx_rate_val)
+ list6G_urltimes.extend(url_times)
+ final_dict['6G']['dl_time'] = list6G
+ min6.append(min(list6G))
+ final_dict['6G']['min'] = min6
+ max6.append(max(list6G))
+ final_dict['6G']['max'] = max6
+ avg6.append((sum(list6G) / num_stations))
+ final_dict['6G']['avg'] = avg6
+ final_dict['6G']['bytes_rd'] = list6G_bytes
+ final_dict['6G']['speed'] = list6G_speed
+ final_dict['6G']['url_times'] = list6G_urltimes
+ elif band == "2.4G":
+ list2G.extend(uc_avg_val)
+ list2G_bytes.extend(rx_bytes_val)
+ list2G_speed.extend(rx_rate_val)
+ list2G_urltimes.extend(url_times)
+ logger.info("%s %s %s", list2G, list2G_bytes, list2G_speed)
+ final_dict['2.4G']['dl_time'] = list2G
+ min2.append(min(list2G))
+ final_dict['2.4G']['min'] = min2
+ max2.append(max(list2G))
+ final_dict['2.4G']['max'] = max2
+ avg2.append((sum(list2G) / num_stations))
+ final_dict['2.4G']['avg'] = avg2
+ final_dict['2.4G']['bytes_rd'] = list2G_bytes
+ final_dict['2.4G']['speed'] = list2G_speed
+ final_dict['2.4G']['url_times'] = list2G_urltimes
+ elif bands == "Both":
+ Both.extend(uc_avg_val)
+ Both_bytes.extend(rx_bytes_val)
+ Both_speed.extend(rx_rate_val)
+ Both_urltimes.extend(url_times)
+ final_dict['Both']['dl_time'] = Both
+ min_both.append(min(Both))
+ final_dict['Both']['min'] = min_both
+ max_both.append(max(Both))
+ final_dict['Both']['max'] = max_both
+ avg_both.append((sum(Both) / num_stations))
+ final_dict['Both']['avg'] = avg_both
+ final_dict['Both']['bytes_rd'] = Both_bytes
+ final_dict['Both']['speed'] = Both_speed
+ final_dict['Both']['url_times'] = Both_urltimes
+
+ result_data = final_dict
+ print("result", result_data)
+ print("Test Finished")
+ test_end = datetime.now()
+ test_end = test_end.strftime("%Y %d %H:%M:%S")
+ print("Test ended at ", test_end)
+ s1 = test_time
+ s2 = test_end # for example
+ FMT = '%Y %d %H:%M:%S'
+ test_duration = datetime.strptime(s2, FMT) - datetime.strptime(s1, FMT)
+
+ info_ssid = []
+ info_security = []
+ # For real clients
+ if client_type == 'Real':
+ info_ssid.append(ssid)
+ info_security.append(security)
+ else:
+ for band in bands:
+ if band == "2.4G":
+ info_ssid.append(twog_ssid)
+ info_security.append(twog_security)
+ elif band == "5G":
+ info_ssid.append(fiveg_ssid)
+ info_security.append(fiveg_security)
+ elif band == "6G":
+ info_ssid.append(sixg_ssid)
+ info_security.append(sixg_security)
+ elif band == "Both":
+ info_ssid.append(fiveg_ssid)
+ info_security.append(fiveg_security)
+ info_ssid.append(twog_ssid)
+ info_security.append(twog_security)
+
+ print("total test duration ", test_duration)
+ date = str(datetime.now()).split(",")[0].replace(" ", "-").split(".")[0]
+ duration = duration
+ if int(duration) < 60:
+ duration = str(duration) + "s"
+ elif int(duration == 60) or (int(duration) > 60 and int(duration) < 3600):
+ duration = str(duration / 60) + "m"
+ else:
+ if int(duration == 3600) or (int(duration) > 3600):
+ duration = str(duration / 3600) + "h"
+
+ android_devices, windows_devices, linux_devices, mac_devices = 0, 0, 0, 0
+ all_devices_names = []
+ device_type = []
+ total_devices = ""
+ for i in self.http_obj_dict[ce][obj_name]["obj"].devices_list:
+ split_device_name = i.split(" ")
+ if 'android' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Android)"))
+ device_type.append("Android")
+ android_devices += 1
+ elif 'Win' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Windows)"))
+ device_type.append("Windows")
+ windows_devices += 1
+ elif 'Lin' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Linux)"))
+ device_type.append("Linux")
+ linux_devices += 1
+ elif 'Mac' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Mac)"))
+ device_type.append("Mac")
+ mac_devices += 1
+
+ # Build total_devices string based on counts
+ if android_devices > 0:
+ total_devices += f" Android({android_devices})"
+ if windows_devices > 0:
+ total_devices += f" Windows({windows_devices})"
+ if linux_devices > 0:
+ total_devices += f" Linux({linux_devices})"
+ if mac_devices > 0:
+ total_devices += f" Mac({mac_devices})"
+ if client_type == "Real":
+ if group_name:
+ group_names = ', '.join(configuration.keys())
+ profile_names = ', '.join(configuration.values())
+ configmap = "Groups:" + group_names + " -> Profiles:" + profile_names
+ test_setup_info = {
+ "AP name": ap_name,
+ "Configuration": configmap,
+ "Configured Devices": ", ".join(all_devices_names),
+ "No of Devices": "Total" + f"({len(all_devices_names)})" + total_devices,
+ "Traffic Direction": "Download",
+ "Traffic Duration ": duration
+ }
+ else:
+ test_setup_info = {
+ "AP Name": ap_name,
+ "SSID": ssid,
+ "Device List": ", ".join(all_devices_names),
+ "Security": security,
+ "No of Devices": "Total" + f"({len(all_devices_names)})" + total_devices,
+ "Traffic Direction": "Download",
+ "Traffic Duration ": duration
+ }
+ else:
+ test_setup_info = {
+ "AP Name": ap_name,
+ "SSID": ssid,
+ "Security": security,
+ "No of Devices": num_stations,
+ "Traffic Direction": "Download",
+ "Traffic Duration ": duration
+ }
+ test_input_infor = {
+ "LANforge ip": self.lanforge_ip,
+ "Bands": bands,
+ "Upstream": upstream_port,
+ "Stations": num_stations,
+ "SSID": ','.join(filter(None, info_ssid)) if info_ssid else "",
+ "Security": ', '.join(filter(None, info_security)) if info_security else "",
+ "Duration": duration,
+ "Contact": "support@candelatech.com"
+ }
+ if not file_path:
+ test_setup_info["File size"] = file_size
+ test_setup_info["File location"] = "/usr/local/lanforge/nginx/html"
+ test_input_infor["File size"] = file_size
+ else:
+ test_setup_info["File location (URLs from the File)"] = file_path
+ if client_type == "Real":
+ test_setup_info["failed_cx's"] = self.http_obj_dict[ce][obj_name]["obj"].failed_cx if self.http_obj_dict[ce][obj_name]["obj"].failed_cx else "NONE"
+ # dataset = self.http_obj_dict[ce][obj_name]["obj"].download_time_in_sec(result_data=result_data)
+ rx_rate = []
+ for i in result_data:
+ dataset = result_data[i]['dl_time']
+ dataset2 = result_data[i]['url_times']
+ bytes_rd = result_data[i]['bytes_rd']
+ rx_rate = result_data[i]['speed']
+ dataset1 = [round(x / 1000000, 4) for x in bytes_rd]
+ rx_rate = [round(x / 1000000, 4) for x in rx_rate] # converting bps to mbps
+
+ lis = []
+ if band == "Both":
+ for i in range(1, num_stations * 2 + 1):
+ lis.append(i)
+ else:
+ for i in range(1, num_stations + 1):
+ lis.append(i)
+
+ if dowebgui:
+ self.http_obj_dict[ce][obj_name]["obj"].data_for_webui["status"] = ["STOPPED"] * len(self.http_obj_dict[ce][obj_name]["obj"].devices_list)
+ self.http_obj_dict[ce][obj_name]["obj"].data_for_webui['rx rate (1m)'] = self.http_obj_dict[ce][obj_name]["obj"].data['rx rate (1m)']
+ self.http_obj_dict[ce][obj_name]["obj"].data_for_webui['total_err'] = self.http_obj_dict[ce][obj_name]["obj"].data['total_err']
+ self.http_obj_dict[ce][obj_name]["obj"].data_for_webui["start_time"] = self.http_obj_dict[ce][obj_name]["obj"].data["start_time"]
+ self.http_obj_dict[ce][obj_name]["obj"].data_for_webui["end_time"] = self.http_obj_dict[ce][obj_name]["obj"].data["end_time"]
+ self.http_obj_dict[ce][obj_name]["obj"].data_for_webui["remaining_time"] = self.http_obj_dict[ce][obj_name]["obj"].data["remaining_time"]
+ df1 = pd.DataFrame(self.http_obj_dict[ce][obj_name]["obj"].data_for_webui)
+ df1.to_csv('{}/http_datavalues.csv'.format(self.http_obj_dict[ce][obj_name]["obj"].result_dir), index=False)
+
+ self.http_obj_dict[ce][obj_name]["obj"].generate_report(date, num_stations=num_stations,
+ duration=duration, test_setup_info=test_setup_info, dataset=dataset, lis=lis,
+ bands=bands, threshold_2g=threshold_2g, threshold_5g=threshold_5g,
+ threshold_both=threshold_both, dataset2=dataset2, dataset1=dataset1,
+ # summary_table_value=summary_table_value,
+ result_data=result_data, rx_rate=rx_rate,
+ test_rig=test_rig, test_tag=test_tag, dut_hw_version=dut_hw_version,
+ dut_sw_version=dut_sw_version, dut_model_num=dut_model_num,
+ dut_serial_num=dut_serial_num, test_id=test_id,
+ test_input_infor=test_input_infor, csv_outfile=csv_outfile,report_path=self.result_path)
+ params = {
+ "date": date,
+ "num_stations": num_stations,
+ "duration": duration,
+ "test_setup_info": test_setup_info,
+ "dataset": dataset,
+ "lis": lis,
+ "bands": bands,
+ "threshold_2g": threshold_2g,
+ "threshold_5g": threshold_5g,
+ "threshold_both": threshold_both,
+ "dataset2": dataset2,
+ "dataset1": dataset1,
+ # "summary_table_value": summary_table_value, # optional
+ "result_data": result_data,
+ "rx_rate": rx_rate,
+ "test_rig": test_rig,
+ "test_tag": test_tag,
+ "dut_hw_version": dut_hw_version,
+ "dut_sw_version": dut_sw_version,
+ "dut_model_num": dut_model_num,
+ "dut_serial_num": dut_serial_num,
+ "test_id": test_id,
+ "test_input_infor": test_input_infor,
+ "csv_outfile": csv_outfile,
+ "report_path": self.result_path
+ }
+ self.http_obj_dict[ce][obj_name]["data"] = params.copy()
+
+ # report_path = self.result_path
+ # print("Current working directory:", os.getcwd())
+
+ # if bands == "Both":
+ # num_stations = num_stations * 2
+
+ # # report.set_title("HTTP DOWNLOAD TEST")
+ # # report.set_date(date)
+ # if 'http_test' not in self.test_count_dict:
+ # self.test_count_dict['http_test']=0
+ # self.test_count_dict['http_test']+=1
+ # self.overall_report.set_obj_html(_obj_title=f'HTTP Test ({self.test_count_dict["http_test"]})', _obj="")
+ # self.overall_report.build_objective()
+ # self.overall_report.set_table_title("Test Setup Information")
+ # self.overall_report.build_table_title()
+ # self.overall_report.test_setup_table(value="Test Setup Information", test_setup_data=test_setup_info)
+
+ # graph2 = self.http_obj_dict[ce][obj_name]["obj"].graph_2(dataset2, lis=lis, bands=bands)
+ # print("graph name {}".format(graph2))
+ # self.overall_report.set_graph_image(graph2)
+ # self.overall_report.set_csv_filename(graph2)
+ # self.overall_report.move_csv_file()
+ # self.overall_report.move_graph_image()
+ # self.overall_report.build_graph()
+
+ # self.overall_report.set_obj_html(
+ # "Average time taken to download file ",
+ # "The below graph represents average time taken to download for each client "
+ # ". X- axis shows “Average time taken to download a file ” and Y-axis shows "
+ # "Client names."
+ # )
+ # self.overall_report.build_objective()
+
+ # graph = self.http_obj_dict[ce][obj_name]["obj"].generate_graph(dataset=dataset, lis=lis, bands=bands)
+ # self.overall_report.set_graph_image(graph)
+ # self.overall_report.set_csv_filename(graph)
+ # self.overall_report.move_csv_file()
+ # self.overall_report.move_graph_image()
+ # self.overall_report.build_graph()
+
+ # self.overall_report.set_obj_html(
+ # "Download Time Table Description",
+ # "This Table will provide you information of the "
+ # "minimum, maximum and the average time taken by clients to download a webpage in seconds"
+ # )
+ # self.overall_report.build_objective()
+
+ # self.http_obj_dict[ce][obj_name]["obj"].response_port = self.http_obj_dict[ce][obj_name]["obj"].local_realm.json_get("/port/all")
+ # self.http_obj_dict[ce][obj_name]["obj"].channel_list, self.http_obj_dict[ce][obj_name]["obj"].mode_list, self.http_obj_dict[ce][obj_name]["obj"].ssid_list = [], [], []
+
+ # if self.http_obj_dict[ce][obj_name]["obj"].client_type == "Real":
+ # self.http_obj_dict[ce][obj_name]["obj"].devices = self.http_obj_dict[ce][obj_name]["obj"].devices_list
+ # for interface in self.http_obj_dict[ce][obj_name]["obj"].response_port['interfaces']:
+ # for port, port_data in interface.items():
+ # if port in self.http_obj_dict[ce][obj_name]["obj"].port_list:
+ # self.http_obj_dict[ce][obj_name]["obj"].channel_list.append(str(port_data['channel']))
+ # self.http_obj_dict[ce][obj_name]["obj"].mode_list.append(str(port_data['mode']))
+ # self.http_obj_dict[ce][obj_name]["obj"].ssid_list.append(str(port_data['ssid']))
+ # elif self.http_obj_dict[ce][obj_name]["obj"].client_type == "Virtual":
+ # self.http_obj_dict[ce][obj_name]["obj"].devices = self.http_obj_dict[ce][obj_name]["obj"].station_list[0]
+ # for interface in self.http_obj_dict[ce][obj_name]["obj"].response_port['interfaces']:
+ # for port, port_data in interface.items():
+ # if port in self.http_obj_dict[ce][obj_name]["obj"].station_list[0]:
+ # self.http_obj_dict[ce][obj_name]["obj"].channel_list.append(str(port_data['channel']))
+ # self.http_obj_dict[ce][obj_name]["obj"].mode_list.append(str(port_data['mode']))
+ # self.http_obj_dict[ce][obj_name]["obj"].macid_list.append(str(port_data['mac']))
+ # self.http_obj_dict[ce][obj_name]["obj"].ssid_list.append(str(port_data['ssid']))
+
+ # # Processing result_data
+ # z, z1, z2 = [], [], []
+ # for fcc in list(result_data.keys()):
+ # z.extend([str(round(i / 1000, 1)) for i in result_data[fcc]["min"]])
+ # z1.extend([str(round(i / 1000, 1)) for i in result_data[fcc]["max"]])
+ # z2.extend([str(round(i / 1000, 1)) for i in result_data[fcc]["avg"]])
+
+ # download_table_value_dup = {"Minimum": z, "Maximum": z1, "Average": z2}
+ # download_table_value = {"Band": bands, "Minimum": z, "Maximum": z1, "Average": z2}
+
+ # # KPI reporting
+ # kpi_path = self.overall_report.get_report_path()
+ # print("kpi_path :{kpi_path}".format(kpi_path=kpi_path))
+
+ # kpi_csv = lf_kpi_csv.lf_kpi_csv(
+ # _kpi_path=kpi_path,
+ # _kpi_test_rig=test_rig,
+ # _kpi_test_tag=test_tag,
+ # _kpi_dut_hw_version=dut_hw_version,
+ # _kpi_dut_sw_version=dut_sw_version,
+ # _kpi_dut_model_num=dut_model_num,
+ # _kpi_dut_serial_num=dut_serial_num,
+ # _kpi_test_id=test_id
+ # )
+ # kpi_csv.kpi_dict['Units'] = "Mbps"
+ # for band in range(len(download_table_value["Band"])):
+ # kpi_csv.kpi_csv_get_dict_update_time()
+ # kpi_csv.kpi_dict['Graph-Group'] = "Webpage Download {band}".format(
+ # band=download_table_value['Band'][band])
+ # kpi_csv.kpi_dict['short-description'] = "Webpage download {band} Minimum".format(
+ # band=download_table_value['Band'][band])
+ # kpi_csv.kpi_dict['numeric-score'] = "{min}".format(min=download_table_value['Minimum'][band])
+ # kpi_csv.kpi_csv_write_dict(kpi_csv.kpi_dict)
+
+ # kpi_csv.kpi_dict['short-description'] = "Webpage download {band} Maximum".format(
+ # band=download_table_value['Band'][band])
+ # kpi_csv.kpi_dict['numeric-score'] = "{max}".format(max=download_table_value['Maximum'][band])
+ # kpi_csv.kpi_csv_write_dict(kpi_csv.kpi_dict)
+
+ # kpi_csv.kpi_dict['short-description'] = "Webpage download {band} Average".format(
+ # band=download_table_value['Band'][band])
+ # kpi_csv.kpi_dict['numeric-score'] = "{avg}".format(avg=download_table_value['Average'][band])
+ # kpi_csv.kpi_csv_write_dict(kpi_csv.kpi_dict)
+
+ # if csv_outfile is not None:
+ # current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
+ # csv_outfile = "{}_{}-test_l3_longevity.csv".format(csv_outfile, current_time)
+ # csv_outfile = self.overall_report.file_add_path(csv_outfile)
+ # print("csv output file : {}".format(csv_outfile))
+
+ # test_setup = pd.DataFrame(download_table_value_dup)
+ # self.overall_report.set_table_dataframe(test_setup)
+ # self.overall_report.build_table()
+
+ # if self.http_obj_dict[ce][obj_name]["obj"].group_name:
+ # self.overall_report.set_table_title("Overall Results for Groups")
+ # else:
+ # self.overall_report.set_table_title("Overall Results")
+ # self.overall_report.build_table_title()
+
+ # if self.http_obj_dict[ce][obj_name]["obj"].client_type == "Real":
+ # if self.http_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.http_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ # test_input_list, pass_fail_list = self.http_obj_dict[ce][obj_name]["obj"].get_pass_fail_list(dataset2)
+
+ # if self.http_obj_dict[ce][obj_name]["obj"].group_name:
+ # for key, val in self.http_obj_dict[ce][obj_name]["obj"].group_device_map.items():
+ # if self.http_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.http_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ # dataframe = self.http_obj_dict[ce][obj_name]["obj"].generate_dataframe(
+ # val, self.http_obj_dict[ce][obj_name]["obj"].devices, self.http_obj_dict[ce][obj_name]["obj"].macid_list, self.http_obj_dict[ce][obj_name]["obj"].channel_list,
+ # self.http_obj_dict[ce][obj_name]["obj"].ssid_list, self.http_obj_dict[ce][obj_name]["obj"].mode_list, dataset2, test_input_list,
+ # dataset, dataset1, rx_rate, pass_fail_list
+ # )
+ # else:
+ # dataframe = self.http_obj_dict[ce][obj_name]["obj"].generate_dataframe(
+ # val, self.http_obj_dict[ce][obj_name]["obj"].devices, self.http_obj_dict[ce][obj_name]["obj"].macid_list, self.http_obj_dict[ce][obj_name]["obj"].channel_list,
+ # self.http_obj_dict[ce][obj_name]["obj"].ssid_list, self.http_obj_dict[ce][obj_name]["obj"].mode_list, dataset2, [], dataset,
+ # dataset1, rx_rate, []
+ # )
+ # if dataframe:
+ # self.overall_report.set_obj_html("", "Group: {}".format(key))
+ # self.overall_report.build_objective()
+ # dataframe1 = pd.DataFrame(dataframe)
+ # self.overall_report.set_table_dataframe(dataframe1)
+ # self.overall_report.build_table()
+ # else:
+ # dataframe = {
+ # " Clients": self.http_obj_dict[ce][obj_name]["obj"].devices,
+ # " MAC ": self.http_obj_dict[ce][obj_name]["obj"].macid_list,
+ # " Channel": self.http_obj_dict[ce][obj_name]["obj"].channel_list,
+ # " SSID ": self.http_obj_dict[ce][obj_name]["obj"].ssid_list,
+ # " Mode": self.http_obj_dict[ce][obj_name]["obj"].mode_list,
+ # " No of times File downloaded ": dataset2,
+ # " Average time taken to Download file (ms)": dataset,
+ # " Bytes-rd (Mega Bytes) ": dataset1,
+ # "Rx Rate (Mbps)": rx_rate,
+ # "Failed url's": self.http_obj_dict[ce][obj_name]["obj"].data["total_err"]
+ # }
+ # if self.http_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.http_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ # dataframe[" Expected value of no of times file downloaded"] = test_input_list
+ # dataframe["Status"] = pass_fail_list
+ # dataframe1 = pd.DataFrame(dataframe)
+ # self.overall_report.set_table_dataframe(dataframe1)
+ # self.overall_report.build_table()
+ # else:
+ # dataframe = {
+ # " Clients": self.http_obj_dict[ce][obj_name]["obj"].devices,
+ # " MAC ": self.http_obj_dict[ce][obj_name]["obj"].macid_list,
+ # " Channel": self.http_obj_dict[ce][obj_name]["obj"].channel_list,
+ # " SSID ": self.http_obj_dict[ce][obj_name]["obj"].ssid_list,
+ # " Mode": self.http_obj_dict[ce][obj_name]["obj"].mode_list,
+ # " No of times File downloaded ": dataset2,
+ # " Average time taken to Download file (ms)": dataset,
+ # " Bytes-rd (Mega Bytes) ": dataset1
+ # }
+ # dataframe1 = pd.DataFrame(dataframe)
+ # self.overall_report.set_table_dataframe(dataframe1)
+ # self.overall_report.build_table()
+
+ self.http_obj_dict[ce][obj_name]["obj"].postcleanup()
+ if dowebgui:
+ self.http_obj_dict[ce][obj_name]["obj"].copy_reports_to_home_dir()
+ return True
+
+
+ def run_ftp_test(
+ self,
+ mgr='localhost',
+ mgr_port=8080,
+ upstream_port='eth1',
+ ssid=None,
+ passwd=None,
+ security=None,
+ group_name=None,
+ profile_name=None,
+ file_name=None,
+ ap_name=None,
+ traffic_duration=None,
+ clients_type="Real",
+ dowebgui=False,
+ directions=["Download"],
+ file_sizes=["2MB", "500MB", "1000MB"],
+ local_lf_report_dir="",
+ ap_ip=None,
+ twog_radio='wiphy1',
+ fiveg_radio='wiphy0',
+ sixg_radio='wiphy2',
+ lf_username='lanforge',
+ lf_password='lanforge',
+ ssh_port=22,
+ bands=["5G", "2.4G", "6G", "Both"],
+ num_stations=0,
+ result_dir='',
+ device_list=[],
+ test_name=None,
+ expected_passfail_value=None,
+ device_csv_name=None,
+ wait_time=60,
+ config=False,
+ test_rig="",
+ test_tag="",
+ dut_hw_version="",
+ dut_sw_version="",
+ dut_model_num="",
+ dut_serial_num="",
+ test_priority="",
+ test_id="FTP Data",
+ csv_outfile="",
+ eap_method="DEFAULT",
+ eap_identity='',
+ ieee8021x=False,
+ ieee80211u=False,
+ ieee80211w=1,
+ enable_pkc=False,
+ bss_transition=False,
+ power_save=False,
+ disable_ofdma=False,
+ roam_ft_ds=False,
+ key_management="DEFAULT",
+ pairwise='NA',
+ private_key='NA',
+ ca_cert='NA',
+ client_cert='NA',
+ pk_passwd='NA',
+ pac_file='NA',
+ get_live_view=False,
+ total_floors="0",
+ lf_logger_config_json=None,
+ help_summary=False
+ ):
+ args = SimpleNamespace(**locals())
+ args.mgr = self.lanforge_ip
+ args.mgr_port = int(self.port)
+ return self.run_ftp_test1(args)
+
+ def run_ftp_test1(self,args):
+ # 1st time stamp for test duration
+ time_stamp1 = datetime.now()
+
+ # use for creating ftp_test dictionary
+ interation_num = 0
+
+ # empty dictionary for whole test data
+ ftp_data = {}
+
+ # validate_args(args)
+ if args.traffic_duration.endswith('s') or args.traffic_duration.endswith('S'):
+ args.traffic_duration = int(args.traffic_duration[0:-1])
+ elif args.traffic_duration.endswith('m') or args.traffic_duration.endswith('M'):
+ args.traffic_duration = int(args.traffic_duration[0:-1]) * 60
+ elif args.traffic_duration.endswith('h') or args.traffic_duration.endswith('H'):
+ args.traffic_duration = int(args.traffic_duration[0:-1]) * 60 * 60
+ elif args.traffic_duration.endswith(''):
+ args.traffic_duration = int(args.traffic_duration)
+ ce = self.current_exec #seires
+ if ce == "parallel":
+ obj_name = "ftp_test"
+ else:
+ obj_no = 1
+ while f"ftp_test_{obj_no}" in self.ftp_obj_dict[ce]:
+ obj_no+=1
+ obj_name = f"ftp_test_{obj_no}"
+ self.ftp_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+ # For all combinations ftp_data of directions, file size and client counts, run the test
+ for band in args.bands:
+ for direction in args.directions:
+ for file_size in args.file_sizes:
+ # Start Test
+ self.ftp_obj_dict[ce][obj_name]["obj"] = FtpTest(lfclient_host=args.mgr,
+ lfclient_port=args.mgr_port,
+ result_dir=args.result_dir,
+ upstream=args.upstream_port,
+ dut_ssid=args.ssid,
+ group_name=args.group_name,
+ profile_name=args.profile_name,
+ file_name=args.file_name,
+ dut_passwd=args.passwd,
+ dut_security=args.security,
+ num_sta=args.num_stations,
+ band=band,
+ ap_name=args.ap_name,
+ file_size=file_size,
+ direction=direction,
+ twog_radio=args.twog_radio,
+ fiveg_radio=args.fiveg_radio,
+ sixg_radio=args.sixg_radio,
+ lf_username=args.lf_username,
+ lf_password=args.lf_password,
+ # duration=pass_fail_duration(band, file_size),
+ traffic_duration=args.traffic_duration,
+ ssh_port=args.ssh_port,
+ clients_type=args.clients_type,
+ dowebgui=args.dowebgui,
+ device_list=args.device_list,
+ test_name=args.test_name,
+ eap_method=args.eap_method,
+ eap_identity=args.eap_identity,
+ ieee80211=args.ieee8021x,
+ ieee80211u=args.ieee80211u,
+ ieee80211w=args.ieee80211w,
+ enable_pkc=args.enable_pkc,
+ bss_transition=args.bss_transition,
+ power_save=args.power_save,
+ disable_ofdma=args.disable_ofdma,
+ roam_ft_ds=args.roam_ft_ds,
+ key_management=args.key_management,
+ pairwise=args.pairwise,
+ private_key=args.private_key,
+ ca_cert=args.ca_cert,
+ client_cert=args.client_cert,
+ pk_passwd=args.pk_passwd,
+ pac_file=args.pac_file,
+ expected_passfail_val=args.expected_passfail_value,
+ csv_name=args.device_csv_name,
+ wait_time=args.wait_time,
+ config=args.config,
+ get_live_view= args.get_live_view,
+ total_floors = args.total_floors
+ )
+
+ interation_num = interation_num + 1
+ self.ftp_obj_dict[ce][obj_name]["obj"].file_create()
+ if args.clients_type == "Real":
+ if not isinstance(args.device_list, list):
+ self.ftp_obj_dict[ce][obj_name]["obj"].device_list = self.ftp_obj_dict[ce][obj_name]["obj"].filter_iOS_devices(args.device_list)
+ if len(self.ftp_obj_dict[ce][obj_name]["obj"].device_list) == 0:
+ logger.info("There are no devices available")
+ return False
+ configured_device, configuration = self.ftp_obj_dict[ce][obj_name]["obj"].query_realclients()
+
+ if args.dowebgui and args.group_name:
+ # If no devices are configured,update the Web UI with "Stopped" status
+ if len(configured_device) == 0:
+ logger.warning("No device is available to run the test")
+ obj1 = {
+ "status": "Stopped",
+ "configuration_status": "configured"
+ }
+ self.ftp_obj_dict[ce][obj_name]["obj"].updating_webui_runningjson(obj1)
+ return
+ # If devices are configured, update the Web UI with the list of configured devices
+ else:
+ obj1 = {
+ "configured_devices": configured_device,
+ "configuration_status": "configured"
+ }
+ self.ftp_obj_dict[ce][obj_name]["obj"].updating_webui_runningjson(obj1)
+ self.ftp_obj_dict[ce][obj_name]["obj"].set_values()
+ self.ftp_obj_dict[ce][obj_name]["obj"].precleanup()
+ self.ftp_obj_dict[ce][obj_name]["obj"].build()
+ if not self.ftp_obj_dict[ce][obj_name]["obj"].passes():
+ logger.info(self.ftp_obj_dict[ce][obj_name]["obj"].get_fail_message())
+ return False
+
+ if self.ftp_obj_dict[ce][obj_name]["obj"].clients_type == 'Real':
+ self.ftp_obj_dict[ce][obj_name]["obj"].monitor_cx()
+ logger.info(f'Test started on the devices : {self.ftp_obj_dict[ce][obj_name]["obj"].input_devices_list}')
+ # First time stamp
+ time1 = datetime.now()
+ logger.info("Traffic started running at %s", time1)
+ self.ftp_obj_dict[ce][obj_name]["obj"].start(False, False)
+ # to fetch runtime values during the execution and fill the csv.
+ if args.dowebgui or args.clients_type == "Real":
+ self.ftp_obj_dict[ce][obj_name]["obj"].monitor_for_runtime_csv()
+ self.ftp_obj_dict[ce][obj_name]["obj"].my_monitor_for_real_devices()
+ else:
+ time.sleep(args.traffic_duration)
+ self.ftp_obj_dict[ce][obj_name]["obj"].my_monitor()
+ self.ftp_obj_dict[ce][obj_name]["obj"].stop()
+ print("Traffic stopped running")
+
+ self.ftp_obj_dict[ce][obj_name]["obj"].postcleanup()
+ time2 = datetime.now()
+ logger.info("Test ended at %s", time2)
+
+ # 2nd time stamp for test duration
+ time_stamp2 = datetime.now()
+
+ # total time for test duration
+ # test_duration = str(time_stamp2 - time_stamp1)[:-7]
+
+ date = str(datetime.now()).split(",")[0].replace(" ", "-").split(".")[0]
+
+ # print(ftp_data)
+
+ input_setup_info = {
+ "AP IP": args.ap_ip,
+ "File Size": args.file_sizes,
+ "Bands": args.bands,
+ "Direction": args.directions,
+ "Stations": args.num_stations,
+ "Upstream": args.upstream_port,
+ "SSID": args.ssid,
+ "Security": args.security,
+ "Contact": "support@candelatech.com"
+ }
+ if args.dowebgui:
+ self.ftp_obj_dict[ce][obj_name]["obj"].data_for_webui["status"] = ["STOPPED"] * len(self.ftp_obj_dict[ce][obj_name]["obj"].url_data)
+
+ df1 = pd.DataFrame(self.ftp_obj_dict[ce][obj_name]["obj"].data_for_webui)
+ df1.to_csv('{}/ftp_datavalues.csv'.format(self.ftp_obj_dict[ce][obj_name]["obj"].result_dir), index=False)
+ # copying to home directory i.e home/user_name
+ # self.ftp_obj_dict[ce][obj_name]["obj"].copy_reports_to_home_dir()
+ # Report generation when groups are specified
+ if args.group_name:
+ self.ftp_obj_dict[ce][obj_name]["obj"].generate_report(ftp_data, date, input_setup_info, test_rig=args.test_rig,
+ test_tag=args.test_tag, dut_hw_version=args.dut_hw_version,
+ dut_sw_version=args.dut_sw_version, dut_model_num=args.dut_model_num,
+ dut_serial_num=args.dut_serial_num, test_id=args.test_id,
+ bands=args.bands, csv_outfile=args.csv_outfile, local_lf_report_dir=args.local_lf_report_dir, config_devices=configuration,report_path=self.result_path)
+ # Generating report without group-specific device configuration
+ else:
+ self.ftp_obj_dict[ce][obj_name]["obj"].generate_report(ftp_data, date, input_setup_info, test_rig=args.test_rig,
+ test_tag=args.test_tag, dut_hw_version=args.dut_hw_version,
+ dut_sw_version=args.dut_sw_version, dut_model_num=args.dut_model_num,
+ dut_serial_num=args.dut_serial_num, test_id=args.test_id,
+ bands=args.bands, csv_outfile=args.csv_outfile, local_lf_report_dir=args.local_lf_report_dir,report_path=self.result_path)
+
+ params = {
+ "ftp_data": ftp_data,
+ "date": date,
+ "input_setup_info": input_setup_info,
+ "test_rig": args.test_rig,
+ "test_tag": args.test_tag,
+ "dut_hw_version": args.dut_hw_version,
+ "dut_sw_version": args.dut_sw_version,
+ "dut_model_num": args.dut_model_num,
+ "dut_serial_num": args.dut_serial_num,
+ "test_id": args.test_id,
+ "bands": args.bands,
+ "csv_outfile": args.csv_outfile,
+ "local_lf_report_dir": args.local_lf_report_dir,
+ "report_path": self.result_path
+ }
+
+ if args.group_name:
+ params["config_devices"] = configuration
+ self.ftp_obj_dict[ce][obj_name]["data"] = params.copy()
+ # if args.group_name:
+ # config_devices = configuration
+ # else:
+ # config_devices = ""
+
+ # ftp_data = ftp_data
+ # date = date
+ # input_setup_info = input_setup_info
+ # test_rig = args.test_rig
+ # test_tag = args.test_tag
+ # dut_hw_version = args.dut_hw_version
+ # dut_sw_version = args.dut_sw_version
+ # dut_model_num = args.dut_model_num
+ # dut_serial_num = args.dut_serial_num
+ # test_id = args.test_id
+ # bands = args.bands
+ # csv_outfile = args.csv_outfile
+ # local_lf_report_dir = args.local_lf_report_dir
+ # report_path = self.result_path
+
+ # no_of_stations = ""
+ # duration = ""
+ # x_fig_size = 18
+ # y_fig_size = len(obj.real_client_list1) * .5 + 4
+
+ # if int(obj.traffic_duration) < 60:
+ # duration = str(obj.traffic_duration) + "s"
+ # elif int(obj.traffic_duration == 60) or (int(obj.traffic_duration) > 60 and int(obj.traffic_duration) < 3600):
+ # duration = str(obj.traffic_duration / 60) + "m"
+ # else:
+ # if int(obj.traffic_duration == 3600) or (int(obj.traffic_duration) > 3600):
+ # duration = str(obj.traffic_duration / 3600) + "h"
+
+ # client_list = []
+ # if obj.clients_type == "Real":
+ # client_list = obj.real_client_list1
+ # android_devices, windows_devices, linux_devices, mac_devices = 0, 0, 0, 0
+ # all_devices_names = []
+ # device_type = []
+ # total_devices = ""
+ # for i in obj.real_client_list:
+ # split_device_name = i.split(" ")
+ # if 'android' in split_device_name:
+ # all_devices_names.append(split_device_name[2] + ("(Android)"))
+ # device_type.append("Android")
+ # android_devices += 1
+ # elif 'Win' in split_device_name:
+ # all_devices_names.append(split_device_name[2] + ("(Windows)"))
+ # device_type.append("Windows")
+ # windows_devices += 1
+ # elif 'Lin' in split_device_name:
+ # all_devices_names.append(split_device_name[2] + ("(Linux)"))
+ # device_type.append("Linux")
+ # linux_devices += 1
+ # elif 'Mac' in split_device_name:
+ # all_devices_names.append(split_device_name[2] + ("(Mac)"))
+ # device_type.append("Mac")
+ # mac_devices += 1
+
+ # if android_devices > 0:
+ # total_devices += f" Android({android_devices})"
+ # if windows_devices > 0:
+ # total_devices += f" Windows({windows_devices})"
+ # if linux_devices > 0:
+ # total_devices += f" Linux({linux_devices})"
+ # if mac_devices > 0:
+ # total_devices += f" Mac({mac_devices})"
+ # else:
+ # if obj.clients_type == "Virtual":
+ # client_list = obj.station_list
+ # if 'ftp_test' not in self.test_count_dict:
+ # self.test_count_dict['ftp_test']=0
+ # self.test_count_dict['ftp_test']+=1
+ # self.overall_report.set_obj_html(_obj_title=f'FTP Test ', _obj="")
+ # self.overall_report.build_objective()
+ # self.overall_report.set_table_title("Test Setup Information")
+ # self.overall_report.build_table_title()
+
+ # if obj.clients_type == "Virtual":
+ # no_of_stations = str(len(obj.station_list))
+ # else:
+ # no_of_stations = str(len(obj.input_devices_list))
+
+ # if obj.clients_type == "Real":
+ # if config_devices == "":
+ # test_setup_info = {
+ # "AP Name": obj.ap_name,
+ # "SSID": obj.ssid,
+ # "Security": obj.security,
+ # "Device List": ", ".join(all_devices_names),
+ # "No of Devices": "Total" + f"({no_of_stations})" + total_devices,
+ # "Failed CXs": obj.failed_cx if obj.failed_cx else "NONE",
+ # "File size": obj.file_size,
+ # "File location": "/home/lanforge",
+ # "Traffic Direction": obj.direction,
+ # "Traffic Duration ": duration
+ # }
+ # else:
+ # group_names = ', '.join(config_devices.keys())
+ # profile_names = ', '.join(config_devices.values())
+ # configmap = "Groups:" + group_names + " -> Profiles:" + profile_names
+ # test_setup_info = {
+ # "AP Name": obj.ap_name,
+ # 'Configuration': configmap,
+ # "No of Devices": "Total" + f"({no_of_stations})" + total_devices,
+ # "File size": obj.file_size,
+ # "File location": "/home/lanforge",
+ # "Traffic Direction": obj.direction,
+ # "Traffic Duration ": duration
+ # }
+ # else:
+ # test_setup_info = {
+ # "AP Name": obj.ap_name,
+ # "SSID": obj.ssid,
+ # "Security": obj.security,
+ # "No of Devices": no_of_stations,
+ # "File size": obj.file_size,
+ # "File location": "/home/lanforge",
+ # "Traffic Direction": obj.direction,
+ # "Traffic Duration ": duration
+ # }
+
+ # self.overall_report.test_setup_table(value="Test Setup Information", test_setup_data=test_setup_info)
+
+ # self.overall_report.set_obj_html(
+ # _obj_title=f"No of times file {obj.direction}",
+ # _obj=f"The below graph represents number of times a file {obj.direction} for each client"
+ # f"(WiFi) traffic. X- axis shows “No of times file {obj.direction}” and Y-axis shows "
+ # f"Client names.")
+
+ # self.overall_report.build_objective()
+ # graph = lf_bar_graph_horizontal(_data_set=[obj.url_data], _xaxis_name=f"No of times file {obj.direction}",
+ # _yaxis_name="Client names",
+ # _yaxis_categories=[i for i in client_list],
+ # _yaxis_label=[i for i in client_list],
+ # _yaxis_step=1,
+ # _yticks_font=8,
+ # _yticks_rotation=None,
+ # _graph_title=f"No of times file {obj.direction} (Count)",
+ # _title_size=16,
+ # _figsize=(x_fig_size, y_fig_size),
+ # _legend_loc="best",
+ # _legend_box=(1.0, 1.0),
+ # _color_name=['orange'],
+ # _show_bar_value=True,
+ # _enable_csv=True,
+ # _graph_image_name="Total-url_ftp", _color_edge=['black'],
+ # _color=['orange'],
+ # _label=[obj.direction])
+ # graph_png = graph.build_bar_graph_horizontal()
+ # print("graph name {}".format(graph_png))
+ # self.overall_report.set_graph_image(graph_png)
+ # # need to move the graph image to the results
+ # self.overall_report.move_graph_image()
+ # self.overall_report.set_csv_filename(graph_png)
+ # self.overall_report.move_csv_file()
+ # self.overall_report.build_graph()
+ # self.overall_report.set_obj_html(
+ # _obj_title=f"Average time taken to {obj.direction} file ",
+ # _obj=f"The below graph represents average time taken to {obj.direction} for each client "
+ # f"(WiFi) traffic. X- axis shows “Average time taken to {obj.direction} a file ” and Y-axis shows "
+ # f"Client names.")
+
+ # self.overall_report.build_objective()
+ # graph = lf_bar_graph_horizontal(_data_set=[obj.uc_avg], _xaxis_name=f"Average time taken to {obj.direction} file in ms",
+ # _yaxis_name="Client names",
+ # _yaxis_categories=[i for i in client_list],
+ # _yaxis_label=[i for i in client_list],
+ # _yaxis_step=1,
+ # _yticks_font=8,
+ # _yticks_rotation=None,
+ # _graph_title=f"Average time taken to {obj.direction} file",
+ # _title_size=16,
+ # _figsize=(x_fig_size, y_fig_size),
+ # _legend_loc="best",
+ # _legend_box=(1.0, 1.0),
+ # _color_name=['steelblue'],
+ # _show_bar_value=True,
+ # _enable_csv=True,
+ # _graph_image_name="ucg-avg_ftp", _color_edge=['black'],
+ # _color=['steelblue'],
+ # _label=[obj.direction])
+ # graph_png = graph.build_bar_graph_horizontal()
+ # print("graph name {}".format(graph_png))
+ # self.overall_report.set_graph_image(graph_png)
+ # self.overall_report.move_graph_image()
+ # # need to move the graph image to the results
+ # self.overall_report.set_csv_filename(graph_png)
+ # self.overall_report.move_csv_file()
+ # self.overall_report.build_graph()
+ # if(obj.dowebgui and obj.get_live_view):
+ # for floor in range(0,int(obj.total_floors)):
+ # script_dir = os.path.dirname(os.path.abspath(__file__))
+ # throughput_image_path = os.path.join(script_dir, "heatmap_images", f"ftp_{obj.test_name}_{floor+1}.png")
+ # # rssi_image_path = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_rssi_{floor+1}.png")
+ # timeout = 60 # seconds
+ # start_time = time.time()
+
+ # while not (os.path.exists(throughput_image_path)):
+ # if time.time() - start_time > timeout:
+ # print("Timeout: Images not found within 60 seconds.")
+ # break
+ # time.sleep(1)
+ # while not os.path.exists(throughput_image_path):
+ # if os.path.exists(throughput_image_path):
+ # break
+ # # time.sleep(10)
+ # if os.path.exists(throughput_image_path):
+ # self.overall_report.set_custom_html('')
+ # self.overall_report.build_custom()
+ # # self.overall_report.set_custom_html("Average Throughput Heatmap:
")
+ # # self.overall_report.build_custom()
+ # self.overall_report.set_custom_html(f'
')
+ # self.overall_report.build_custom()
+ # # os.remove(throughput_image_path)
+ # self.overall_report.set_obj_html("File Download Time (sec)", "The below table will provide information of "
+ # "minimum, maximum and the average time taken by clients to download a file in seconds")
+ # self.overall_report.build_objective()
+ # dataframe2 = {
+ # "Minimum": [str(round(min(obj.uc_min) / 1000, 1))],
+ # "Maximum": [str(round(max(obj.uc_max) / 1000, 1))],
+ # "Average": [str(round((sum(obj.uc_avg) / len(client_list)) / 1000, 1))]
+ # }
+ # dataframe3 = pd.DataFrame(dataframe2)
+ # self.overall_report.set_table_dataframe(dataframe3)
+ # self.overall_report.build_table()
+ # self.overall_report.set_table_title("Overall Results")
+ # self.overall_report.build_table_title()
+ # if obj.clients_type == 'Real':
+ # # Calculating the pass/fail criteria when either expected_passfail_val or csv_name is provided
+ # if obj.expected_passfail_val or obj.csv_name:
+ # obj.get_pass_fail_list(client_list)
+ # # When groups are provided a seperate table will be generated for each group using generate_dataframe
+ # if obj.group_name:
+ # for key, val in obj.group_device_map.items():
+ # if obj.expected_passfail_val or obj.csv_name:
+ # dataframe = obj.generate_dataframe(val, client_list, obj.mac_id_list, obj.channel_list, obj.ssid_list, obj.mode_list,
+ # obj.url_data, obj.test_input_list, obj.uc_avg, obj.bytes_rd, obj.rx_rate, obj.pass_fail_list)
+ # else:
+ # dataframe = obj.generate_dataframe(val, client_list, obj.mac_id_list, obj.channel_list, obj.ssid_list,
+ # obj.mode_list, obj.url_data, [], obj.uc_avg, obj.bytes_rd, obj.rx_rate, [])
+
+ # if dataframe:
+ # self.overall_report.set_obj_html("", "Group: {}".format(key))
+ # self.overall_report.build_objective()
+ # dataframe1 = pd.DataFrame(dataframe)
+ # self.overall_report.set_table_dataframe(dataframe1)
+ # self.overall_report.build_table()
+ # else:
+ # dataframe = {
+ # " Clients": client_list,
+ # " MAC ": obj.mac_id_list,
+ # " Channel": obj.channel_list,
+ # " SSID ": obj.ssid_list,
+ # " Mode": obj.mode_list,
+ # " No of times File downloaded ": obj.url_data,
+ # " Time Taken to Download file (ms)": obj.uc_avg,
+ # " Bytes-rd (Mega Bytes)": obj.bytes_rd,
+ # " RX RATE (Mbps) ": obj.rx_rate,
+ # "Failed Urls": obj.total_err
+ # }
+ # if obj.expected_passfail_val or obj.csv_name:
+ # dataframe[" Expected output "] = obj.test_input_list
+ # dataframe[" Status "] = obj.pass_fail_list
+
+ # dataframe1 = pd.DataFrame(dataframe)
+ # self.overall_report.set_table_dataframe(dataframe1)
+ # self.overall_report.build_table()
+
+ # else:
+ # dataframe = {
+ # " Clients": client_list,
+ # " MAC ": obj.mac_id_list,
+ # " Channel": obj.channel_list,
+ # " SSID ": obj.ssid_list,
+ # " Mode": obj.mode_list,
+ # " No of times File downloaded ": obj.url_data,
+ # " Time Taken to Download file (ms)": obj.uc_avg,
+ # " Bytes-rd (Mega Bytes)": obj.bytes_rd,
+ # }
+ # dataframe1 = pd.DataFrame(dataframe)
+ # self.overall_report.set_table_dataframe(dataframe1)
+ # self.overall_report.build_table()
+ # # self.overall_report.build_footer()
+ # # html_file = self.overall_report.write_html()
+ # # logger.info("returned file {}".format(html_file))
+ # # logger.info(html_file)
+ # # self.overall_report.write_pdf()
+
+ # if csv_outfile is not None:
+ # current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
+ # csv_outfile = "{}_{}-test_l4_ftp.csv".format(
+ # csv_outfile, current_time)
+ # csv_outfile = self.overall_report.file_add_path(csv_outfile)
+ # logger.info("csv output file : {}".format(csv_outfile))
+
+
+
+
+ # if args.dowebgui:
+ # obj.copy_reports_to_home_dir()
+
+ return True
+
+
+ def run_qos_test(
+ self,
+ device_list=None,
+ test_name=None,
+ result_dir='',
+ upstream_port='eth1',
+ security="open",
+ ssid=None,
+ passwd='[BLANK]',
+ traffic_type=None,
+ upload=None,
+ download=None,
+ test_duration="2m",
+ ap_name="Test-AP",
+ tos=None,
+ dowebgui=False,
+ debug=False,
+ help_summary=False,
+ group_name=None,
+ profile_name=None,
+ file_name=None,
+ eap_method='DEFAULT',
+ eap_identity='',
+ ieee8021x=False,
+ ieee80211u=False,
+ ieee80211w=1,
+ enable_pkc=False,
+ bss_transition=False,
+ power_save=False,
+ disable_ofdma=False,
+ roam_ft_ds=False,
+ key_management='DEFAULT',
+ pairwise='NA',
+ private_key='NA',
+ ca_cert='NA',
+ client_cert='NA',
+ pk_passwd='NA',
+ pac_file='NA',
+ expected_passfail_value=None,
+ device_csv_name=None,
+ wait_time=60,
+ config=False,
+ get_live_view=False,
+ total_floors="0"
+ ):
+ test_results = {'test_results': []}
+ loads = {}
+ data = {}
+ if download and upload:
+ loads = {'upload': str(upload).split(","), 'download': str(download).split(",")}
+ loads_data = loads["download"]
+ elif download:
+ loads = {'upload': [], 'download': str(download).split(",")}
+ for i in range(len(download)):
+ loads['upload'].append(0)
+ loads_data = loads["download"]
+ else:
+ if upload:
+ loads = {'upload': str(upload).split(","), 'download': []}
+ for i in range(len(upload)):
+ loads['download'].append(0)
+ loads_data = loads["upload"]
+ if download and upload:
+ direction = 'L3_' + traffic_type.split('_')[1].upper() + '_BiDi'
+ elif upload:
+ direction = 'L3_' + traffic_type.split('_')[1].upper() + '_UL'
+ else:
+ direction = 'L3_' + traffic_type.split('_')[1].upper() + '_DL'
+
+ # validate_args(args)
+ if test_duration.endswith('s') or test_duration.endswith('S'):
+ test_duration = int(test_duration[0:-1])
+ elif test_duration.endswith('m') or test_duration.endswith('M'):
+ test_duration = int(test_duration[0:-1]) * 60
+ elif test_duration.endswith('h') or test_duration.endswith('H'):
+ test_duration = int(test_duration[0:-1]) * 60 * 60
+ elif test_duration.endswith(''):
+ test_duration = int(test_duration)
+ ce = self.current_exec #seires
+ if ce == "parallel":
+ obj_name = "qos_test"
+ else:
+ obj_no = 1
+ while f"qos_test_{obj_no}" in self.qos_obj_dict[ce]:
+ obj_no+=1
+ obj_name = f"qos_test_{obj_no}"
+ self.qos_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+ for index in range(len(loads_data)):
+ self.qos_obj_dict[ce][obj_name]["obj"] = qos_test.ThroughputQOS(host=self.lanforge_ip,
+ ip=self.lanforge_ip,
+ port=self.port,
+ number_template="0000",
+ ap_name=ap_name,
+ name_prefix="TOS-",
+ upstream=upstream_port,
+ ssid=ssid,
+ password=passwd,
+ security=security,
+ test_duration=test_duration,
+ use_ht160=False,
+ side_a_min_rate=int(loads['upload'][index]),
+ side_b_min_rate=int(loads['download'][index]),
+ traffic_type=traffic_type,
+ tos=tos,
+ csv_direction=direction,
+ dowebgui=dowebgui,
+ test_name=test_name,
+ result_dir=result_dir,
+ device_list=device_list,
+ _debug_on=debug,
+ group_name=group_name,
+ profile_name=profile_name,
+ file_name=file_name,
+ eap_method=eap_method,
+ eap_identity=eap_identity,
+ ieee80211=ieee8021x,
+ ieee80211u=ieee80211u,
+ ieee80211w=ieee80211w,
+ enable_pkc=enable_pkc,
+ bss_transition=bss_transition,
+ power_save=power_save,
+ disable_ofdma=disable_ofdma,
+ roam_ft_ds=roam_ft_ds,
+ key_management=key_management,
+ pairwise=pairwise,
+ private_key=private_key,
+ ca_cert=ca_cert,
+ client_cert=client_cert,
+ pk_passwd=pk_passwd,
+ pac_file=pac_file,
+ expected_passfail_val=expected_passfail_value,
+ csv_name=device_csv_name,
+ wait_time=wait_time,
+ config=config,
+ get_live_view=get_live_view,
+ total_floors=total_floors
+ )
+ self.qos_obj_dict[ce][obj_name]["obj"].os_type()
+ _, configured_device, _, configuration = self.qos_obj_dict[ce][obj_name]["obj"].phantom_check()
+ if dowebgui and group_name:
+ if len(configured_device) == 0:
+ logger.warning("No device is available to run the test")
+ obj1 = {
+ "status": "Stopped",
+ "configuration_status": "configured"
+ }
+ self.qos_obj_dict[ce][obj_name]["obj"].updating_webui_runningjson(obj1)
+ return
+ else:
+ obj1 = {
+ "configured_devices": configured_device,
+ "configuration_status": "configured"
+ }
+ self.qos_obj_dict[ce][obj_name]["obj"].updating_webui_runningjson(obj1)
+ # checking if we have atleast one device available for running test
+ if self.qos_obj_dict[ce][obj_name]["obj"].dowebgui == "True":
+ if self.qos_obj_dict[ce][obj_name]["obj"].device_found is False:
+ logger.warning("No Device is available to run the test hence aborting the test")
+ df1 = pd.DataFrame([{
+ "BE_dl": 0,
+ "BE_ul": 0,
+ "BK_dl": 0,
+ "BK_ul": 0,
+ "VI_dl": 0,
+ "VI_ul": 0,
+ "VO_dl": 0,
+ "VO_ul": 0,
+ "timestamp": datetime.now().strftime('%H:%M:%S'),
+ 'status': 'Stopped'
+ }]
+ )
+ df1.to_csv('{}/overall_throughput.csv'.format(self.qos_obj_dict[ce][obj_name]["obj"].result_dir), index=False)
+ raise ValueError("Aborting the test....")
+ self.qos_obj_dict[ce][obj_name]["obj"].build()
+ self.qos_obj_dict[ce][obj_name]["obj"].monitor_cx()
+ self.qos_obj_dict[ce][obj_name]["obj"].start(False, False)
+ time.sleep(10)
+ connections_download, connections_upload, drop_a_per, drop_b_per, connections_download_avg, connections_upload_avg, avg_drop_a, avg_drop_b = self.qos_obj_dict[ce][obj_name]["obj"].monitor()
+ logger.info("connections download {}".format(connections_download))
+ logger.info("connections upload {}".format(connections_upload))
+ self.qos_obj_dict[ce][obj_name]["obj"].stop()
+ time.sleep(5)
+ test_results['test_results'].append(self.qos_obj_dict[ce][obj_name]["obj"].evaluate_qos(connections_download, connections_upload, drop_a_per, drop_b_per))
+ data.update(test_results)
+ test_end_time = datetime.now().strftime("%Y %d %H:%M:%S")
+ print("Test ended at: ", test_end_time)
+
+ input_setup_info = {
+ "contact": "support@candelatech.com"
+ }
+ self.qos_obj_dict[ce][obj_name]["obj"].cleanup()
+
+ # Update webgui running json with latest entry and test status completed
+ if self.qos_obj_dict[ce][obj_name]["obj"].dowebgui == "True":
+ last_entry = self.qos_obj_dict[ce][obj_name]["obj"].overall[len(self.qos_obj_dict[ce][obj_name]["obj"].overall) - 1]
+ last_entry["status"] = "Stopped"
+ last_entry["timestamp"] = datetime.now().strftime("%d/%m %I:%M:%S %p")
+ last_entry["remaining_time"] = "0"
+ last_entry["end_time"] = last_entry["timestamp"]
+ self.qos_obj_dict[ce][obj_name]["obj"].df_for_webui.append(
+ last_entry
+ )
+ df1 = pd.DataFrame(self.qos_obj_dict[ce][obj_name]["obj"].df_for_webui)
+ df1.to_csv('{}/overall_throughput.csv'.format(result_dir, ), index=False)
+
+ # copying to home directory i.e home/user_name
+ self.qos_obj_dict[ce][obj_name]["obj"].copy_reports_to_home_dir()
+ if group_name:
+ self.qos_obj_dict[ce][obj_name]["obj"].generate_report(
+ data=data,
+ input_setup_info=input_setup_info,
+ report_path=self.qos_obj_dict[ce][obj_name]["obj"].result_dir if self.qos_obj_dict[ce][obj_name]["obj"].dowebgui else self.result_path,
+ connections_upload_avg=connections_upload_avg,
+ connections_download_avg=connections_download_avg,
+ avg_drop_a=avg_drop_a,
+ avg_drop_b=avg_drop_b, config_devices=configuration)
+ else:
+ self.qos_obj_dict[ce][obj_name]["obj"].generate_report(
+ data=data,
+ input_setup_info=input_setup_info,
+ report_path=self.qos_obj_dict[ce][obj_name]["obj"].result_dir if self.qos_obj_dict[ce][obj_name]["obj"].dowebgui else self.result_path,
+ connections_upload_avg=connections_upload_avg,
+ connections_download_avg=connections_download_avg,
+ avg_drop_a=avg_drop_a,
+ avg_drop_b=avg_drop_b)
+ params = {
+ "data": None,
+ "input_setup_info": None,
+ "connections_download_avg": None,
+ "connections_upload_avg": None,
+ "avg_drop_a": None,
+ "avg_drop_b": None,
+ "report_path": "",
+ "result_dir_name": "Qos_Test_report",
+ "selected_real_clients_names": None,
+ "config_devices": ""
+ }
+
+ params.update({
+ "data": data,
+ "input_setup_info": input_setup_info,
+ "report_path": (
+ self.qos_obj_dict[ce][obj_name]["obj"].result_dir
+ if self.qos_obj_dict[ce][obj_name]["obj"].dowebgui else self.result_path
+ ),
+ "connections_upload_avg": connections_upload_avg,
+ "connections_download_avg": connections_download_avg,
+ "avg_drop_a": avg_drop_a,
+ "avg_drop_b": avg_drop_b
+ })
+
+ if group_name:
+ params["config_devices"] = configuration
+ self.qos_obj_dict[ce][obj_name]["data"] = params.copy()
+ return True
+
+ def run_vs_test(self,args):
+
+ media_source_dict = {
+ 'dash': '1',
+ 'smooth_streaming': '2',
+ 'hls': '3',
+ 'progressive': '4',
+ 'rtsp': '5'
+ }
+ media_quality_dict = {
+ '4k': '0',
+ '8k': '1',
+ '1080p': '2',
+ '720p': '3',
+ '360p': '4'
+ }
+
+ if args.file_name:
+ args.file_name = args.file_name.removesuffix('.csv')
+
+ media_source, media_quality = args.media_source.capitalize(), args.media_quality
+ args.media_source = args.media_source.lower()
+ args.media_quality = args.media_quality.lower()
+
+ if any(char.isalpha() for char in args.media_source):
+ args.media_source = media_source_dict[args.media_source]
+
+ if any(char.isalpha() for char in args.media_quality):
+ args.media_quality = media_quality_dict[args.media_quality]
+
+ logger_config = lf_logger_config.lf_logger_config()
+
+ if args.log_level:
+ logger_config.set_level(level=args.log_level)
+
+ if args.lf_logger_config_json:
+ logger_config.lf_logger_config_json = args.lf_logger_config_json
+ logger_config.load_lf_logger_config()
+
+ logger = logging.getLogger(__name__)
+ ce = self.current_exec #seires
+ if ce == "parallel":
+ obj_name = "vs_test"
+ else:
+ obj_no = 1
+ while f"vs_test_{obj_no}" in self.vs_obj_dict[ce]:
+ obj_no+=1
+ obj_name = f"vs_test_{obj_no}"
+ self.vs_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+ self.vs_obj_dict[ce][obj_name]["obj"] = VideoStreamingTest(host=args.host, ssid=args.ssid, passwd=args.passwd, encryp=args.encryp,
+ suporrted_release=["7.0", "10", "11", "12"], max_speed=args.max_speed,
+ url=args.url, urls_per_tenm=args.urls_per_tenm, duration=args.duration,
+ resource_ids=args.device_list, dowebgui=args.dowebgui, media_quality=args.media_quality, media_source=args.media_source,
+ result_dir=args.result_dir, test_name=args.test_name, incremental=args.incremental, postcleanup=args.postcleanup,
+ precleanup=args.precleanup,
+ pass_fail_val=args.expected_passfail_value,
+ csv_name=args.device_csv_name,
+ groups=args.group_name,
+ profiles=args.profile_name,
+ config=args.config,
+ file_name=args.file_name,
+ floors=args.floors,
+ get_live_view=args.get_live_view
+ )
+ args.upstream_port = self.vs_obj_dict[ce][obj_name]["obj"].change_port_to_ip(args.upstream_port)
+ self.vs_obj_dict[ce][obj_name]["obj"].validate_args()
+ config_obj = DeviceConfig.DeviceConfig(lanforge_ip=args.host, file_name=args.file_name)
+ # if not args.expected_passfail_value and args.device_csv_name is None:
+ # config_obj.device_csv_file(csv_name="device.csv")
+
+ resource_ids_sm = []
+ resource_set = set()
+ resource_list = []
+ resource_ids_generated = ""
+
+ if args.group_name and args.file_name and args.profile_name:
+ selected_groups = args.group_name.split(',')
+ selected_profiles = args.profile_name.split(',')
+ config_devices = {}
+ for i in range(len(selected_groups)):
+ config_devices[selected_groups[i]] = selected_profiles[i]
+ config_obj.initiate_group()
+ asyncio.run(config_obj.connectivity(config_devices, upstream=args.upstream_port))
+
+ adbresponse = config_obj.adb_obj.get_devices()
+ resource_manager = config_obj.laptop_obj.get_devices()
+ all_res = {}
+ df1 = config_obj.display_groups(config_obj.groups)
+ groups_list = df1.to_dict(orient='list')
+ group_devices = {}
+ for adb in adbresponse:
+ group_devices[adb['serial']] = adb['eid']
+ for res in resource_manager:
+ all_res[res['hostname']] = res['shelf'] + '.' + res['resource']
+ eid_list = []
+ for grp_name in groups_list.keys():
+ for g_name in selected_groups:
+ if grp_name == g_name:
+ for j in groups_list[grp_name]:
+ if j in group_devices.keys():
+ eid_list.append(group_devices[j])
+ elif j in all_res.keys():
+ eid_list.append(all_res[j])
+ args.device_list = ",".join(id for id in eid_list)
+ else:
+ # When group/profile are not provided
+ config_dict = {
+ 'ssid': args.ssid,
+ 'passwd': args.passwd,
+ 'enc': args.encryp,
+ 'eap_method': args.eap_method,
+ 'eap_identity': args.eap_identity,
+ 'ieee80211': args.ieee8021x,
+ 'ieee80211u': args.ieee80211u,
+ 'ieee80211w': args.ieee80211w,
+ 'enable_pkc': args.enable_pkc,
+ 'bss_transition': args.bss_transition,
+ 'power_save': args.power_save,
+ 'disable_ofdma': args.disable_ofdma,
+ 'roam_ft_ds': args.roam_ft_ds,
+ 'key_management': args.key_management,
+ 'pairwise': args.pairwise,
+ 'private_key': args.private_key,
+ 'ca_cert': args.ca_cert,
+ 'client_cert': args.client_cert,
+ 'pk_passwd': args.pk_passwd,
+ 'pac_file': args.pac_file,
+ 'server_ip': args.upstream_port
+ }
+ if args.device_list:
+ all_devices = config_obj.get_all_devices()
+ if args.group_name is None and args.file_name is None and args.profile_name is None:
+ dev_list = args.device_list.split(',')
+ if args.config:
+ asyncio.run(config_obj.connectivity(device_list=dev_list, wifi_config=config_dict))
+ else:
+ if args.config:
+ all_devices = config_obj.get_all_devices()
+ device_list = []
+ for device in all_devices:
+ if device["type"] != 'laptop':
+ device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["serial"])
+ elif device["type"] == 'laptop':
+ device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["hostname"])
+ print("Available devices:")
+ for device in device_list:
+ print(device)
+ args.device_list = input("Enter the desired resources to run the test:")
+ dev1_list = args.device_list.split(',')
+ asyncio.run(config_obj.connectivity(device_list=dev1_list, wifi_config=config_dict))
+ else:
+ self.vs_obj_dict[ce][obj_name]["obj"].android_devices = self.vs_obj_dict[ce][obj_name]["obj"].devices.get_devices(only_androids=True)
+ selected_devices, report_labels, selected_macs = self.vs_obj_dict[ce][obj_name]["obj"].devices.query_user()
+ if not selected_devices:
+ logging.info("devices donot exist..!!")
+ return
+
+ self.vs_obj_dict[ce][obj_name]["obj"].android_list = selected_devices
+ # Verify if all resource IDs are valid for Android devices
+ if self.vs_obj_dict[ce][obj_name]["obj"].android_list:
+ resource_ids = ",".join([item.split(".")[1] for item in self.vs_obj_dict[ce][obj_name]["obj"].android_list])
+
+ num_list = list(map(int, resource_ids.split(',')))
+
+ # Sort the list
+ num_list.sort()
+
+ # Join the sorted list back into a string
+ sorted_string = ','.join(map(str, num_list))
+
+ self.vs_obj_dict[ce][obj_name]["obj"].resource_ids = sorted_string
+ resource_ids1 = list(map(int, sorted_string.split(',')))
+ modified_list = list(map(lambda item: int(item.split('.')[1]), self.vs_obj_dict[ce][obj_name]["obj"].android_devices))
+ if not all(x in modified_list for x in resource_ids1):
+ logging.info("Verify Resource ids, as few are invalid...!!")
+ return False
+ resource_ids_sm = self.vs_obj_dict[ce][obj_name]["obj"].resource_ids
+ resource_list = resource_ids_sm.split(',')
+ resource_set = set(resource_list)
+ resource_list_sorted = sorted(resource_set)
+ resource_ids_generated = ','.join(resource_list_sorted)
+ available_resources = list(resource_set)
+
+ if args.dowebgui:
+ resource_ids_sm = args.device_list.split(',')
+ resource_set = set(resource_ids_sm)
+ resource_list = sorted(resource_set)
+ resource_ids_generated = ','.join(resource_list)
+ resource_list_sorted = resource_list
+ selected_devices, report_labels, selected_macs = self.vs_obj_dict[ce][obj_name]["obj"].devices.query_user(dowebgui=args.dowebgui, device_list=resource_ids_generated)
+ self.vs_obj_dict[ce][obj_name]["obj"].resource_ids = ",".join(id.split(".")[1] for id in args.device_list.split(","))
+ available_resources = [int(num) for num in self.vs_obj_dict[ce][obj_name]["obj"].resource_ids.split(',')]
+ else:
+ self.vs_obj_dict[ce][obj_name]["obj"].android_devices = self.vs_obj_dict[ce][obj_name]["obj"].devices.get_devices(only_androids=True)
+ if args.device_list:
+ device_list = args.device_list.split(',')
+ # Extract resource IDs (after the dot), remove duplicates, and sort them
+ resource_ids = sorted(set(int(item.split('.')[1]) for item in device_list if '.' in item))
+ resource_list_sorted = resource_ids
+ self.vs_obj_dict[ce][obj_name]["obj"].resource_ids = ','.join(map(str, resource_ids))
+ # Create a set of Android device IDs (e.g., "resource.123")
+ android_device_ids = set(self.vs_obj_dict[ce][obj_name]["obj"].android_devices)
+ android_device_short_ids = {device.split('.')[0] + '.' + device.split('.')[1] for device in android_device_ids}
+ self.vs_obj_dict[ce][obj_name]["obj"].android_list = [dev for dev in android_device_short_ids if dev in device_list]
+ # Log any devices in the list that are not available
+ for dev in device_list:
+ if dev not in android_device_short_ids:
+ logger.info(f"{dev} device is not available")
+ # Final list of available Android resource IDs
+ available_resources = sorted(set(int(dev.split('.')[1]) for dev in self.vs_obj_dict[ce][obj_name]["obj"].android_list))
+ logger.info(f"Available devices: {available_resources}")
+ if len(available_resources) != 0:
+ available_resources = self.vs_obj_dict[ce][obj_name]["obj"].filter_ios_devices(available_resources)
+ if len(available_resources) == 0:
+ logger.info("No devices which are selected are available in the lanforge")
+ return False
+ gave_incremental = False
+ if args.incremental and not args.webgui_incremental:
+ if self.vs_obj_dict[ce][obj_name]["obj"].resource_ids:
+ logging.info("The total available devices are {}".format(len(available_resources)))
+ self.vs_obj_dict[ce][obj_name]["obj"].incremental = input('Specify incremental values as 1,2,3 : ')
+ self.vs_obj_dict[ce][obj_name]["obj"].incremental = [int(x) for x in self.vs_obj_dict[ce][obj_name]["obj"].incremental.split(',')]
+ else:
+ logging.info("incremental Values are not needed as Android devices are not selected..")
+ elif not args.incremental:
+ gave_incremental = True
+ self.vs_obj_dict[ce][obj_name]["obj"].incremental = [len(available_resources)]
+
+ if args.webgui_incremental:
+ incremental = [int(x) for x in args.webgui_incremental.split(',')]
+ if (len(args.webgui_incremental) == 1 and incremental[0] != len(resource_list_sorted)) or (len(args.webgui_incremental) > 1):
+ self.vs_obj_dict[ce][obj_name]["obj"].incremental = incremental
+
+ if self.vs_obj_dict[ce][obj_name]["obj"].incremental and self.vs_obj_dict[ce][obj_name]["obj"].resource_ids:
+ if self.vs_obj_dict[ce][obj_name]["obj"].incremental[-1] > len(available_resources):
+ logging.info("Exiting the program as incremental values are greater than the resource ids provided")
+ return False
+ elif self.vs_obj_dict[ce][obj_name]["obj"].incremental[-1] < len(available_resources) and len(self.vs_obj_dict[ce][obj_name]["obj"].incremental) > 1:
+ logging.info("Exiting the program as the last incremental value must be equal to selected devices")
+ return False
+
+ # To create cx for selected devices
+ self.vs_obj_dict[ce][obj_name]["obj"].build()
+
+ # To set media source and media quality
+ time.sleep(10)
+
+ # self.vs_obj_dict[ce][obj_name]["obj"].run
+ test_time = datetime.now()
+ test_time = test_time.strftime("%b %d %H:%M:%S")
+
+ logging.info("Initiating Test...")
+
+ individual_dataframe_columns = []
+
+ keys = list(self.vs_obj_dict[ce][obj_name]["obj"].http_profile.created_cx.keys())
+
+ # Extend individual_dataframe_column with dynamically generated column names
+ for i in range(len(keys)):
+ individual_dataframe_columns.extend([
+ f'video_format_bitrate_{keys[i]}',
+ f'total_wait_time_{keys[i]}',
+ f'total_urls_{keys[i]}',
+ f'RSSI_{keys[i]}',
+ f'Link Speed_{keys[i]}',
+ f'Total Buffer_{keys[i]}',
+ f'Total Errors_{keys[i]}',
+ f'Min_Video_Rate_{keys[i]}',
+ f'Max_Video_Rate_{keys[i]}',
+ f'Avg_Video_Rate_{keys[i]}',
+ f'bytes_rd_{keys[i]}',
+ f'rx rate_{keys[i]} bps',
+ f'frame_rate_{keys[i]}',
+ f'Video Quality_{keys[i]}'
+ ])
+
+ individual_dataframe_columns.extend(['overall_video_format_bitrate', 'timestamp', 'iteration', 'start_time', 'end_time', 'remaining_Time', 'status'])
+ individual_df = pd.DataFrame(columns=individual_dataframe_columns)
+
+ cx_order_list = []
+ index = 0
+ file_path = ""
+
+ # Parsing test_duration
+ if args.duration.endswith('s') or args.duration.endswith('S'):
+ args.duration = round(int(args.duration[0:-1]) / 60, 2)
+
+ elif args.duration.endswith('m') or args.duration.endswith('M'):
+ args.duration = int(args.duration[0:-1])
+
+ elif args.duration.endswith('h') or args.duration.endswith('H'):
+ args.duration = int(args.duration[0:-1]) * 60
+
+ elif args.duration.endswith(''):
+ args.duration = int(args.duration)
+
+ incremental_capacity_list_values = self.vs_obj_dict[ce][obj_name]["obj"].get_incremental_capacity_list()
+ if incremental_capacity_list_values[-1] != len(available_resources):
+ logger.error("Incremental capacity doesnt match available devices")
+ if args.postcleanup:
+ self.vs_obj_dict[ce][obj_name]["obj"].postcleanup()
+ return False
+ # Process resource IDs and incremental values if specified
+ if self.vs_obj_dict[ce][obj_name]["obj"].resource_ids:
+ if self.vs_obj_dict[ce][obj_name]["obj"].incremental:
+ test_setup_info_incremental_values = ','.join([str(n) for n in incremental_capacity_list_values])
+ if len(self.vs_obj_dict[ce][obj_name]["obj"].incremental) == len(available_resources):
+ test_setup_info_total_duration = args.duration
+ elif len(self.vs_obj_dict[ce][obj_name]["obj"].incremental) == 1 and len(available_resources) > 1:
+ if self.vs_obj_dict[ce][obj_name]["obj"].incremental[0] == len(available_resources):
+ test_setup_info_total_duration = args.duration
+ else:
+ div = len(available_resources) // self.vs_obj_dict[ce][obj_name]["obj"].incremental[0]
+ mod = len(available_resources) % self.vs_obj_dict[ce][obj_name]["obj"].incremental[0]
+ if mod == 0:
+ test_setup_info_total_duration = args.duration * (div)
+ else:
+ test_setup_info_total_duration = args.duration * (div + 1)
+ else:
+ test_setup_info_total_duration = args.duration * len(incremental_capacity_list_values)
+ else:
+ test_setup_info_total_duration = args.duration
+
+ if args.webgui_incremental:
+ test_setup_info_incremental_values = ','.join([str(n) for n in incremental_capacity_list_values])
+ elif gave_incremental:
+ test_setup_info_incremental_values = "No Incremental Value provided"
+ self.vs_obj_dict[ce][obj_name]["obj"].total_duration = test_setup_info_total_duration
+
+ actual_start_time = datetime.now()
+
+ iterations_before_test_stopped_by_user = []
+
+ # Calculate and manage cx_order_list ( list of cross connections to run ) based on incremental values
+ if self.vs_obj_dict[ce][obj_name]["obj"].resource_ids:
+ # Check if incremental is specified
+ if self.vs_obj_dict[ce][obj_name]["obj"].incremental:
+
+ # Case 1: Incremental list has only one value and it equals the length of keys
+ if len(self.vs_obj_dict[ce][obj_name]["obj"].incremental) == 1 and self.vs_obj_dict[ce][obj_name]["obj"].incremental[0] == len(keys):
+ cx_order_list.append(keys[index:])
+
+ # Case 2: Incremental list has only one value but length of keys is greater than 1
+ elif len(self.vs_obj_dict[ce][obj_name]["obj"].incremental) == 1 and len(keys) > 1:
+ incremental_value = self.vs_obj_dict[ce][obj_name]["obj"].incremental[0]
+ max_index = len(keys)
+ index = 0
+
+ while index < max_index:
+ next_index = min(index + incremental_value, max_index)
+ cx_order_list.append(keys[index:next_index])
+ index = next_index
+
+ # Case 3: Incremental list has multiple values and length of keys is greater than 1
+ elif len(self.vs_obj_dict[ce][obj_name]["obj"].incremental) != 1 and len(keys) > 1:
+
+ index = 0
+ for num in self.vs_obj_dict[ce][obj_name]["obj"].incremental:
+
+ cx_order_list.append(keys[index: num])
+ index = num
+
+ if index < len(keys):
+ cx_order_list.append(keys[index:])
+
+ # Iterate over cx_order_list to start tests incrementally
+ for i in range(len(cx_order_list)):
+ if i == 0:
+ self.vs_obj_dict[ce][obj_name]["obj"].data["start_time_webGUI"] = [datetime.now().strftime('%Y-%m-%d %H:%M:%S')]
+ end_time_webGUI = (datetime.now() + timedelta(minutes=self.vs_obj_dict[ce][obj_name]["obj"].total_duration)).strftime('%Y-%m-%d %H:%M:%S')
+ self.vs_obj_dict[ce][obj_name]["obj"].data['end_time_webGUI'] = [end_time_webGUI]
+
+ # time.sleep(10)
+
+ # Start specific devices based on incremental capacity
+ self.vs_obj_dict[ce][obj_name]["obj"].start_specific(cx_order_list[i])
+ if cx_order_list[i]:
+ logging.info("Test started on Devices with resource Ids : {selected}".format(selected=cx_order_list[i]))
+ else:
+ logging.info("Test started on Devices with resource Ids : {selected}".format(selected=cx_order_list[i]))
+ file_path = "video_streaming_realtime_data.csv"
+ if end_time_webGUI < datetime.now().strftime('%Y-%m-%d %H:%M:%S'):
+ self.vs_obj_dict[ce][obj_name]["obj"].data['remaining_time_webGUI'] = ['0:00']
+ else:
+ date_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
+ self.vs_obj_dict[ce][obj_name]["obj"].data['remaining_time_webGUI'] = [datetime.strptime(end_time_webGUI, "%Y-%m-%d %H:%M:%S") - datetime.strptime(date_time, "%Y-%m-%d %H:%M:%S")]
+
+ if args.dowebgui:
+ file_path = os.path.join(self.vs_obj_dict[ce][obj_name]["obj"].result_dir, "../../Running_instances/{}_{}_running.json".format(self.vs_obj_dict[ce][obj_name]["obj"].host, self.vs_obj_dict[ce][obj_name]["obj"].test_name))
+ if os.path.exists(file_path):
+ with open(file_path, 'r') as file:
+ data = json.load(file)
+ if data["status"] != "Running":
+ break
+ test_stopped_by_user = self.vs_obj_dict[ce][obj_name]["obj"].monitor_for_runtime_csv(args.duration, file_path, individual_df, i, actual_start_time, resource_list_sorted, cx_order_list[i])
+ else:
+ test_stopped_by_user = self.vs_obj_dict[ce][obj_name]["obj"].monitor_for_runtime_csv(args.duration, file_path, individual_df, i, actual_start_time, resource_list_sorted, cx_order_list[i])
+ if not test_stopped_by_user:
+ # Append current iteration index to iterations_before_test_stopped_by_user
+ iterations_before_test_stopped_by_user.append(i)
+ else:
+ # Append current iteration index to iterations_before_test_stopped_by_user
+ iterations_before_test_stopped_by_user.append(i)
+ break
+ self.vs_obj_dict[ce][obj_name]["obj"].stop()
+
+ if self.vs_obj_dict[ce][obj_name]["obj"].resource_ids:
+
+ date = str(datetime.now()).split(",")[0].replace(" ", "-").split(".")[0]
+ username = []
+
+ try:
+ eid_data = self.vs_obj_dict[ce][obj_name]["obj"].json_get("ports?fields=alias,mac,mode,Parent Dev,rx-rate,tx-rate,ssid,signal")
+ except KeyError:
+ logger.error("Error: 'interfaces' key not found in port data")
+ return False
+
+ resource_ids = list(map(int, self.vs_obj_dict[ce][obj_name]["obj"].resource_ids.split(',')))
+ for alias in eid_data["interfaces"]:
+ for i in alias:
+ if int(i.split(".")[1]) > 1 and alias[i]["alias"] == 'wlan0':
+ resource_hw_data = self.vs_obj_dict[ce][obj_name]["obj"].json_get("/resource/" + i.split(".")[0] + "/" + i.split(".")[1])
+ hw_version = resource_hw_data['resource']['hw version']
+ if not hw_version.startswith(('Win', 'Linux', 'Apple')) and int(resource_hw_data['resource']['eid'].split('.')[1]) in resource_ids:
+ username.append(resource_hw_data['resource']['user'])
+
+ device_list_str = ','.join([f"{name} ( Android )" for name in username])
+
+ test_setup_info = {
+ "Testname": args.test_name,
+ "Device List": device_list_str,
+ "No of Devices": "Total" + "( " + str(len(keys)) + " ): Android(" + str(len(keys)) + ")",
+ "Incremental Values": "",
+ "URL": args.url,
+ "Media Source": media_source.upper(),
+ "Media Quality": media_quality
+ }
+ test_setup_info['Incremental Values'] = test_setup_info_incremental_values
+ test_setup_info['Total Duration (min)'] = str(test_setup_info_total_duration)
+
+ logging.info("Test Completed")
+
+ # prev_inc_value = 0
+ if self.vs_obj_dict[ce][obj_name]["obj"].resource_ids and self.vs_obj_dict[ce][obj_name]["obj"].incremental:
+ self.vs_obj_dict[ce][obj_name]["obj"].generate_report(date, list(set(iterations_before_test_stopped_by_user)), test_setup_info=test_setup_info, realtime_dataset=individual_df, cx_order_list=cx_order_list,report_path=self.result_path)
+ elif self.vs_obj_dict[ce][obj_name]["obj"].resource_ids:
+ self.vs_obj_dict[ce][obj_name]["obj"].generate_report(date, list(set(iterations_before_test_stopped_by_user)), test_setup_info=test_setup_info, realtime_dataset=individual_df,report_path=self.result_path)
+
+ params = {
+ "date": None,
+ "iterations_before_test_stopped_by_user": None,
+ "test_setup_info": None,
+ "realtime_dataset": None,
+ "report_path": "",
+ "cx_order_list": []
+ }
+ if self.vs_obj_dict[ce][obj_name]["obj"].resource_ids and self.vs_obj_dict[ce][obj_name]["obj"].incremental:
+ params.update({
+ "date": date,
+ "iterations_before_test_stopped_by_user": list(set(iterations_before_test_stopped_by_user)),
+ "test_setup_info": test_setup_info,
+ "realtime_dataset": individual_df,
+ "report_path": self.result_path,
+ "cx_order_list": cx_order_list
+ })
+ elif self.vs_obj_dict[ce][obj_name]["obj"].resource_ids:
+ params.update({
+ "date": date,
+ "iterations_before_test_stopped_by_user": list(set(iterations_before_test_stopped_by_user)),
+ "test_setup_info": test_setup_info,
+ "realtime_dataset": individual_df,
+ "report_path": self.result_path
+ })
+ self.vs_obj_dict[ce][obj_name]["data"] = params.copy()
+ # Perform post-cleanup operations
+ if args.postcleanup:
+ self.vs_obj_dict[ce][obj_name]["obj"].postcleanup()
+
+ if args.dowebgui:
+ self.vs_obj_dict[ce][obj_name]["obj"].copy_reports_to_home_dir()
+ return True
+
+
+ def run_vs_test1(
+ self,
+ ssid=None,
+ passwd="something",
+ encryp="psk",
+ url="www.google.com",
+ max_speed=0,
+ urls_per_tenm=100,
+ duration=None,
+ test_name="video_streaming_test",
+ dowebgui=False,
+ result_dir='',
+ lf_logger_config_json=None,
+ log_level=None,
+ debug=False,
+ media_source='1',
+ media_quality='0',
+ device_list=None,
+ webgui_incremental=None,
+ incremental=False,
+ no_laptops=True,
+ postcleanup=False,
+ precleanup=False,
+ help_summary=False,
+ group_name=None,
+ profile_name=None,
+ file_name=None,
+ eap_method='DEFAULT',
+ eap_identity='DEFAULT',
+ ieee8021x=False,
+ ieee80211u=False,
+ ieee80211w=1,
+ enable_pkc=False,
+ bss_transition=False,
+ power_save=False,
+ disable_ofdma=False,
+ roam_ft_ds=False,
+ key_management='DEFAULT',
+ pairwise='NA',
+ private_key='NA',
+ ca_cert='NA',
+ client_cert='NA',
+ pk_passwd='NA',
+ pac_file='NA',
+ upstream_port='NA',
+ expected_passfail_value=None,
+ csv_name=None,
+ wait_time=60,
+ config=False,
+ device_csv_name=None,
+ get_live_view=False,
+ floors=0
+ ):
+ args = SimpleNamespace(**locals())
+ args.host = self.lanforge_ip
+ return self.run_vs_test(args)
+
+ def run_throughput_test(
+ self,
+ device_list=[],
+ upstream_port='eth1',
+ ssid=None,
+ passwd='[BLANK]',
+ traffic_type=None,
+ upload='2560',
+ download='2560',
+ test_duration='',
+ report_timer='1s',
+ ap_name='Test-AP',
+ dowebgui=False,
+ tos='Best_Efforts',
+ packet_size='-1',
+ incremental_capacity=[],
+ load_type='wc_per_client_load',
+ do_interopability=False,
+ postcleanup=False,
+ precleanup=False,
+ incremental=False,
+ security='open',
+ test_name=None,
+ result_dir='',
+ get_live_view=False,
+ total_floors='0',
+ expected_passfail_value=None,
+ device_csv_name=None,
+ eap_method='DEFAULT',
+ eap_identity='',
+ ieee8021x=False,
+ ieee80211u=False,
+ ieee80211w=1,
+ enable_pkc=False,
+ bss_transition=False,
+ power_save=False,
+ disable_ofdma=False,
+ roam_ft_ds=False,
+ key_management='DEFAULT',
+ pairwise='NA',
+ private_key='NA',
+ ca_cert='NA',
+ client_cert='NA',
+ pk_passwd='NA',
+ pac_file='NA',
+ file_name=None,
+ group_name=None,
+ profile_name=None,
+ wait_time=60,
+ config=False,
+ default_config=True,
+ thpt_mbps=False,
+ help_summary=False
+ ):
+
+ if dowebgui:
+ if (upload == '0'):
+ upload = '2560'
+ if (download == '0'):
+ download = '2560'
+
+ logger_config = lf_logger_config.lf_logger_config()
+
+ if(thpt_mbps):
+ if download != '2560' and download != '0' and upload != '0' and upload != '2560':
+ download = str(int(download) * 1000000)
+ upload = str(int(upload) * 1000000)
+ elif upload != '2560' and upload != '0':
+ upload = str(int(upload) * 1000000)
+ else:
+ download = str(int(download) * 1000000)
+ loads = {}
+ iterations_before_test_stopped_by_user = []
+ gave_incremental = False
+ # Case based on download and upload arguments are provided
+ if download and upload:
+ loads = {'upload': str(upload).split(","), 'download': str(download).split(",")}
+ loads_data = loads["download"]
+ elif download:
+ loads = {'upload': [], 'download': str(download).split(",")}
+ for i in range(len(download)):
+ loads['upload'].append(2560)
+ loads_data = loads["download"]
+ else:
+ if upload:
+ loads = {'upload': str(upload).split(","), 'download': []}
+ for i in range(len(upload)):
+ loads['download'].append(2560)
+ loads_data = loads["upload"]
+
+ if download != '2560' and download != '0' and upload != '0' and upload != '2560':
+ csv_direction = 'L3_' + traffic_type.split('_')[1].upper() + '_BiDi'
+ elif upload != '2560' and upload != '0':
+ csv_direction = 'L3_' + traffic_type.split('_')[1].upper() + '_UL'
+ else:
+ csv_direction = 'L3_' + traffic_type.split('_')[1].upper() + '_DL'
+
+ # validate_args(args)
+ if incremental_capacity == 'no_increment' and dowebgui:
+ incremental_capacity = str(len(device_list.split(",")))
+ gave_incremental = True
+
+ if do_interopability:
+ incremental_capacity = "1"
+
+ # Parsing test_duration
+ if test_duration.endswith('s') or test_duration.endswith('S'):
+ test_duration = int(test_duration[0:-1])
+
+ elif test_duration.endswith('m') or test_duration.endswith('M'):
+ test_duration = int(test_duration[0:-1]) * 60
+
+ elif test_duration.endswith('h') or test_duration.endswith('H'):
+ test_duration = int(test_duration[0:-1]) * 60 * 60
+
+ elif test_duration.endswith(''):
+ test_duration = int(test_duration)
+
+ # Parsing report_timer
+ if report_timer.endswith('s') or report_timer.endswith('S'):
+ report_timer = int(report_timer[0:-1])
+
+ elif report_timer.endswith('m') or report_timer.endswith('M'):
+ report_timer = int(report_timer[0:-1]) * 60
+
+ elif report_timer.endswith('h') or report_timer.endswith('H'):
+ report_timer = int(report_timer[0:-1]) * 60 * 60
+
+ elif test_duration.endswith(''):
+ report_timer = int(report_timer)
+
+ if (int(packet_size) < 16 or int(packet_size) > 65507) and int(packet_size) != -1:
+ logger.error("Packet size should be greater than 16 bytes and less than 65507 bytes incorrect")
+ return
+ ce = self.current_exec #seires
+ if ce == "parallel":
+ obj_name = "thput_test"
+ else:
+ obj_no = 1
+ while f"thput_test_{obj_no}" in self.thput_obj_dict[ce]:
+ obj_no+=1
+ obj_name = f"thput_test_{obj_no}"
+ self.thput_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+ for index in range(len(loads_data)):
+ self.thput_obj_dict[ce][obj_name]["obj"] = Throughput(host=self.lanforge_ip,
+ ip=self.lanforge_ip,
+ port=self.port,
+ number_template="0000",
+ ap_name=ap_name,
+ name_prefix="TOS-",
+ upstream=upstream_port,
+ ssid=ssid,
+ password=passwd,
+ security=security,
+ test_duration=test_duration,
+ use_ht160=False,
+ side_a_min_rate=int(loads['upload'][index]),
+ side_b_min_rate=int(loads['download'][index]),
+ side_a_min_pdu=int(packet_size),
+ side_b_min_pdu=int(packet_size),
+ traffic_type=traffic_type,
+ tos=tos,
+ dowebgui=dowebgui,
+ test_name=test_name,
+ result_dir=result_dir,
+ device_list=device_list,
+ incremental_capacity=incremental_capacity,
+ report_timer=report_timer,
+ load_type=load_type,
+ do_interopability=do_interopability,
+ incremental=incremental,
+ precleanup=precleanup,
+ get_live_view= get_live_view,
+ total_floors = total_floors,
+ csv_direction=csv_direction,
+ expected_passfail_value=expected_passfail_value,
+ device_csv_name=device_csv_name,
+ file_name=file_name,
+ group_name=group_name,
+ profile_name=profile_name,
+ eap_method=eap_method,
+ eap_identity=eap_identity,
+ ieee80211=ieee8021x,
+ ieee80211u=ieee80211u,
+ ieee80211w=ieee80211w,
+ enable_pkc=enable_pkc,
+ bss_transition=bss_transition,
+ power_save=power_save,
+ disable_ofdma=disable_ofdma,
+ roam_ft_ds=roam_ft_ds,
+ key_management=key_management,
+ pairwise=pairwise,
+ private_key=private_key,
+ ca_cert=ca_cert,
+ client_cert=client_cert,
+ pk_passwd=pk_passwd,
+ pac_file=pac_file,
+ wait_time=wait_time,
+ config=config,
+ default_config = default_config
+ )
+
+ if gave_incremental:
+ self.thput_obj_dict[ce][obj_name]["obj"].gave_incremental = True
+ self.thput_obj_dict[ce][obj_name]["obj"].os_type()
+
+ check_condition, clients_to_run = self.thput_obj_dict[ce][obj_name]["obj"].phantom_check()
+
+ if check_condition == False:
+ return
+
+ check_increment_condition = self.thput_obj_dict[ce][obj_name]["obj"].check_incremental_list()
+
+ if check_increment_condition == False:
+ logger.error("Incremental values given for selected devices are incorrect")
+ return
+
+ elif (len(incremental_capacity) > 0 and check_increment_condition == False):
+ logger.error("Incremental values given for selected devices are incorrect")
+ return
+
+ created_cxs = self.thput_obj_dict[ce][obj_name]["obj"].build()
+ time.sleep(10)
+ created_cxs = list(created_cxs.keys())
+ individual_dataframe_column = []
+
+ to_run_cxs, to_run_cxs_len, created_cx_lists_keys, incremental_capacity_list = self.thput_obj_dict[ce][obj_name]["obj"].get_incremental_capacity_list()
+
+ for i in range(len(clients_to_run)):
+
+ # Extend individual_dataframe_column with dynamically generated column names
+ individual_dataframe_column.extend([f'Download{clients_to_run[i]}', f'Upload{clients_to_run[i]}', f'Rx % Drop {clients_to_run[i]}',
+ f'Tx % Drop{clients_to_run[i]}', f'Average RTT {clients_to_run[i]} ', f'RSSI {clients_to_run[i]} ', f'Tx-Rate {clients_to_run[i]} ', f'Rx-Rate {clients_to_run[i]} '])
+
+ individual_dataframe_column.extend(['Overall Download', 'Overall Upload', 'Overall Rx % Drop ', 'Overall Tx % Drop', 'Iteration',
+ 'TIMESTAMP', 'Start_time', 'End_time', 'Remaining_Time', 'Incremental_list', 'status'])
+ individual_df = pd.DataFrame(columns=individual_dataframe_column)
+
+ overall_start_time = datetime.now()
+ overall_end_time = overall_start_time + timedelta(seconds=int(test_duration) * len(incremental_capacity_list))
+
+ for i in range(len(to_run_cxs)):
+ is_device_configured = True
+ if do_interopability:
+ # To get resource of device under test in interopability
+ device_to_run_resource = self.thput_obj_dict[ce][obj_name]["obj"].extract_digits_until_alpha(to_run_cxs[i][0])
+
+ # Check the load type specified by the user
+ if load_type == "wc_intended_load":
+ # Perform intended load for the current iteration
+ self.thput_obj_dict[ce][obj_name]["obj"].perform_intended_load(i, incremental_capacity_list)
+ if i != 0:
+
+ # Stop throughput testing if not the first iteration
+ self.thput_obj_dict[ce][obj_name]["obj"].stop()
+
+ # Start specific connections for the current iteration
+ self.thput_obj_dict[ce][obj_name]["obj"].start_specific(created_cx_lists_keys[:incremental_capacity_list[i]])
+ else:
+ if (do_interopability and i != 0):
+ self.thput_obj_dict[ce][obj_name]["obj"].stop_specific(to_run_cxs[i - 1])
+ time.sleep(5)
+ if not default_config:
+ if (do_interopability and i == 0):
+ self.thput_obj_dict[ce][obj_name]["obj"].disconnect_all_devices()
+ if do_interopability and "iOS" not in to_run_cxs[i][0]:
+ logger.info("Configuring device of resource{}".format(to_run_cxs[i][0]))
+ is_device_configured = self.thput_obj_dict[ce][obj_name]["obj"].configure_specific([device_to_run_resource])
+ if is_device_configured:
+ self.thput_obj_dict[ce][obj_name]["obj"].start_specific(to_run_cxs[i])
+
+ # Determine device names based on the current iteration
+ device_names = created_cx_lists_keys[:to_run_cxs_len[i][-1]]
+
+ # Monitor throughput and capture all dataframes and test stop status
+ all_dataframes, test_stopped_by_user = self.thput_obj_dict[ce][obj_name]["obj"].monitor(i, individual_df, device_names, incremental_capacity_list, overall_start_time, overall_end_time, is_device_configured)
+ if do_interopability and "iOS" not in to_run_cxs[i][0] and not default_config:
+ # logger.info("Disconnecting device of resource{}".format(to_run_cxs[i][0]))
+ self.thput_obj_dict[ce][obj_name]["obj"].disconnect_all_devices([device_to_run_resource])
+ # Check if the test was stopped by the user
+ if test_stopped_by_user == False:
+
+ # Append current iteration index to iterations_before_test_stopped_by_user
+ iterations_before_test_stopped_by_user.append(i)
+ else:
+
+ # Append current iteration index to iterations_before_test_stopped_by_user
+ iterations_before_test_stopped_by_user.append(i)
+ break
+
+ # logger.info("connections download {}".format(connections_download))
+ # logger.info("connections upload {}".format(connections_upload))
+ self.thput_obj_dict[ce][obj_name]["obj"].stop()
+ if postcleanup:
+ self.thput_obj_dict[ce][obj_name]["obj"].cleanup()
+ self.thput_obj_dict[ce][obj_name]["obj"].generate_report(list(set(iterations_before_test_stopped_by_user)), incremental_capacity_list, data=all_dataframes, data1=to_run_cxs_len, report_path=self.result_path if not self.thput_obj_dict[ce][obj_name]["obj"].dowebgui else self.thput_obj_dict[ce][obj_name]["obj"].result_dir)
+ if self.thput_obj_dict[ce][obj_name]["obj"].dowebgui:
+ # copying to home directory i.e home/user_name
+ self.thput_obj_dict[ce][obj_name]["obj"].copy_reports_to_home_dir()
+ params = {
+ "iterations_before_test_stopped_by_user": list(set(iterations_before_test_stopped_by_user)),
+ "incremental_capacity_list": incremental_capacity_list,
+ "data": all_dataframes,
+ "data1": to_run_cxs_len,
+ "report_path": self.result_path if not self.thput_obj_dict[ce][obj_name]["obj"].dowebgui else self.thput_obj_dict[ce][obj_name]["obj"].result_dir
+ }
+ self.thput_obj_dict[ce][obj_name]["data"] = params.copy()
+
+ return True
+
+ def run_mc_test(self,args):
+ endp_types = "lf_udp"
+
+ help_summary = '''\
+ The Layer 3 Traffic Generation Test is designed to test the performance of the
+ Access Point by running layer 3 TCP and/or UDP Traffic. Layer-3 Cross-Connects represent a stream
+ of data flowing through the system under test. A Cross-Connect (CX) is composed of two Endpoints,
+ each of which is associated with a particular Port (physical or virtual interface).
+
+ The test will create stations, create CX traffic between upstream port and stations, run traffic
+ and generate a report.
+ '''
+ # args = parse_args()
+
+ test_name = ""
+ ip = ""
+ # print('newww',args.local_lf_report_dir)
+ # exit(0)
+ if args.dowebgui:
+ logger.info("In webGUI execution")
+ if args.dowebgui:
+ test_name = args.test_name
+ ip = args.lfmgr
+ logger.info(" dowebgui %s %s %s", args.dowebgui, test_name, ip)
+
+ # initialize pass / fail
+ test_passed = False
+
+ # Configure logging
+ logger_config = lf_logger_config.lf_logger_config()
+
+ # set the logger level to debug
+ if args.log_level:
+ logger_config.set_level(level=args.log_level)
+
+ # lf_logger_config_json will take presidence to changing debug levels
+ if args.lf_logger_config_json:
+ # logger_config.lf_logger_config_json = "lf_logger_config.json"
+ logger_config.lf_logger_config_json = args.lf_logger_config_json
+ logger_config.load_lf_logger_config()
+
+ # validate_args(args)
+ endp_input_list = []
+ graph_input_list = []
+ if args.real:
+ endp_input_list, graph_input_list, config_devices, group_device_map = query_real_clients(args)
+ # Validate existing station list configuration if specified before starting test
+ if not args.use_existing_station_list and args.existing_station_list:
+ logger.error("Existing stations specified, but argument \'--use_existing_station_list\' not specified")
+ return False
+ elif args.use_existing_station_list and not args.existing_station_list:
+ logger.error(
+ "Argument \'--use_existing_station_list\' specified, but no existing stations provided. See \'--existing_station_list\'")
+ return False
+
+ # Gather data for test reporting and KPI generation
+ logger.info("Read in command line paramaters")
+ interopt_mode = args.interopt_mode
+
+ if args.endp_type:
+ endp_types = args.endp_type
+
+ if args.radio:
+ radios = args.radio
+ else:
+ radios = None
+
+ MAX_NUMBER_OF_STATIONS = 1000
+
+ # Lists to help with station creation
+ radio_name_list = []
+ number_of_stations_per_radio_list = []
+ ssid_list = []
+ ssid_password_list = []
+ ssid_security_list = []
+ station_lists = []
+ existing_station_lists = []
+
+ # wifi settings configuration
+ wifi_mode_list = []
+ wifi_enable_flags_list = []
+
+ # optional radio configuration
+ reset_port_enable_list = []
+ reset_port_time_min_list = []
+ reset_port_time_max_list = []
+
+ # wifi extra configuration
+ key_mgmt_list = []
+ pairwise_list = []
+ group_list = []
+ psk_list = []
+ wep_key_list = []
+ ca_cert_list = []
+ eap_list = []
+ identity_list = []
+ anonymous_identity_list = []
+ phase1_list = []
+ phase2_list = []
+ passwd_list = []
+ pin_list = []
+ pac_file_list = []
+ private_key_list = []
+ pk_password_list = []
+ hessid_list = []
+ realm_list = []
+ client_cert_list = []
+ imsi_list = []
+ milenage_list = []
+ domain_list = []
+ roaming_consortium_list = []
+ venue_group_list = []
+ network_type_list = []
+ ipaddr_type_avail_list = []
+ network_auth_type_list = []
+ anqp_3gpp_cell_net_list = []
+ ieee80211w_list = []
+
+ logger.debug("Parse radio arguments used for station configuration")
+ if radios is not None:
+ logger.info("radios {}".format(radios))
+ for radio_ in radios:
+ radio_keys = ['radio', 'stations', 'ssid', 'ssid_pw', 'security']
+ logger.info("radio_dict before format {}".format(radio_))
+ radio_info_dict = dict(
+ map(
+ lambda x: x.split('=='),
+ str(radio_).replace(
+ '"',
+ '').replace(
+ '[',
+ '').replace(
+ ']',
+ '').replace(
+ "'",
+ "").replace(
+ ",",
+ " ").split()))
+
+ logger.debug("radio_dict {}".format(radio_info_dict))
+
+ for key in radio_keys:
+ if key not in radio_info_dict:
+ logger.critical(
+ "missing config, for the {}, all of the following need to be present {} ".format(
+ key, radio_keys))
+ return False
+
+ radio_name_list.append(radio_info_dict['radio'])
+ number_of_stations_per_radio_list.append(
+ radio_info_dict['stations'])
+ ssid_list.append(radio_info_dict['ssid'])
+ ssid_password_list.append(radio_info_dict['ssid_pw'])
+ ssid_security_list.append(radio_info_dict['security'])
+
+ # check for set_wifi_extra
+ # check for wifi_settings
+ wifi_extra_keys = ['wifi_extra']
+ wifi_extra_found = False
+ for wifi_extra_key in wifi_extra_keys:
+ if wifi_extra_key in radio_info_dict:
+ logger.info("wifi_extra_keys found")
+ wifi_extra_found = True
+ break
+
+ if wifi_extra_found:
+ logger.debug("wifi_extra: {extra}".format(
+ extra=radio_info_dict['wifi_extra']))
+
+ wifi_extra_dict = dict(
+ map(
+ lambda x: x.split('&&'),
+ str(radio_info_dict['wifi_extra']).replace(
+ '"',
+ '').replace(
+ '[',
+ '').replace(
+ ']',
+ '').replace(
+ "'",
+ "").replace(
+ ",",
+ " ").replace(
+ "!!",
+ " "
+ )
+ .split()))
+
+ logger.info("wifi_extra_dict: {wifi_extra}".format(
+ wifi_extra=wifi_extra_dict))
+
+ if 'key_mgmt' in wifi_extra_dict:
+ key_mgmt_list.append(wifi_extra_dict['key_mgmt'])
+ else:
+ key_mgmt_list.append('[BLANK]')
+
+ if 'pairwise' in wifi_extra_dict:
+ pairwise_list.append(wifi_extra_dict['pairwise'])
+ else:
+ pairwise_list.append('[BLANK]')
+
+ if 'group' in wifi_extra_dict:
+ group_list.append(wifi_extra_dict['group'])
+ else:
+ group_list.append('[BLANK]')
+
+ if 'psk' in wifi_extra_dict:
+ psk_list.append(wifi_extra_dict['psk'])
+ else:
+ psk_list.append('[BLANK]')
+
+ if 'wep_key' in wifi_extra_dict:
+ wep_key_list.append(wifi_extra_dict['wep_key'])
+ else:
+ wep_key_list.append('[BLANK]')
+
+ if 'ca_cert' in wifi_extra_dict:
+ ca_cert_list.append(wifi_extra_dict['ca_cert'])
+ else:
+ ca_cert_list.append('[BLANK]')
+
+ if 'eap' in wifi_extra_dict:
+ eap_list.append(wifi_extra_dict['eap'])
+ else:
+ eap_list.append('[BLANK]')
+
+ if 'identity' in wifi_extra_dict:
+ identity_list.append(wifi_extra_dict['identity'])
+ else:
+ identity_list.append('[BLANK]')
+
+ if 'anonymous' in wifi_extra_dict:
+ anonymous_identity_list.append(
+ wifi_extra_dict['anonymous'])
+ else:
+ anonymous_identity_list.append('[BLANK]')
+
+ if 'phase1' in wifi_extra_dict:
+ phase1_list.append(wifi_extra_dict['phase1'])
+ else:
+ phase1_list.append('[BLANK]')
+
+ if 'phase2' in wifi_extra_dict:
+ phase2_list.append(wifi_extra_dict['phase2'])
+ else:
+ phase2_list.append('[BLANK]')
+
+ if 'passwd' in wifi_extra_dict:
+ passwd_list.append(wifi_extra_dict['passwd'])
+ else:
+ passwd_list.append('[BLANK]')
+
+ if 'pin' in wifi_extra_dict:
+ pin_list.append(wifi_extra_dict['pin'])
+ else:
+ pin_list.append('[BLANK]')
+
+ if 'pac_file' in wifi_extra_dict:
+ pac_file_list.append(wifi_extra_dict['pac_file'])
+ else:
+ pac_file_list.append('[BLANK]')
+
+ if 'private_key' in wifi_extra_dict:
+ private_key_list.append(wifi_extra_dict['private_key'])
+ else:
+ private_key_list.append('[BLANK]')
+
+ if 'pk_password' in wifi_extra_dict:
+ pk_password_list.append(wifi_extra_dict['pk_password'])
+ else:
+ pk_password_list.append('[BLANK]')
+
+ if 'hessid' in wifi_extra_dict:
+ hessid_list.append(wifi_extra_dict['hessid'])
+ else:
+ hessid_list.append("00:00:00:00:00:00")
+
+ if 'realm' in wifi_extra_dict:
+ realm_list.append(wifi_extra_dict['realm'])
+ else:
+ realm_list.append('[BLANK]')
+
+ if 'client_cert' in wifi_extra_dict:
+ client_cert_list.append(wifi_extra_dict['client_cert'])
+ else:
+ client_cert_list.append('[BLANK]')
+
+ if 'imsi' in wifi_extra_dict:
+ imsi_list.append(wifi_extra_dict['imsi'])
+ else:
+ imsi_list.append('[BLANK]')
+
+ if 'milenage' in wifi_extra_dict:
+ milenage_list.append(wifi_extra_dict['milenage'])
+ else:
+ milenage_list.append('[BLANK]')
+
+ if 'domain' in wifi_extra_dict:
+ domain_list.append(wifi_extra_dict['domain'])
+ else:
+ domain_list.append('[BLANK]')
+
+ if 'roaming_consortium' in wifi_extra_dict:
+ roaming_consortium_list.append(
+ wifi_extra_dict['roaming_consortium'])
+ else:
+ roaming_consortium_list.append('[BLANK]')
+
+ if 'venue_group' in wifi_extra_dict:
+ venue_group_list.append(wifi_extra_dict['venue_group'])
+ else:
+ venue_group_list.append('[BLANK]')
+
+ if 'network_type' in wifi_extra_dict:
+ network_type_list.append(wifi_extra_dict['network_type'])
+ else:
+ network_type_list.append('[BLANK]')
+
+ if 'ipaddr_type_avail' in wifi_extra_dict:
+ ipaddr_type_avail_list.append(
+ wifi_extra_dict['ipaddr_type_avail'])
+ else:
+ ipaddr_type_avail_list.append('[BLANK]')
+
+ if 'network_auth_type' in wifi_extra_dict:
+ network_auth_type_list.append(
+ wifi_extra_dict['network_auth_type'])
+ else:
+ network_auth_type_list.append('[BLANK]')
+
+ if 'anqp_3gpp_cell_net' in wifi_extra_dict:
+ anqp_3gpp_cell_net_list.append(
+ wifi_extra_dict['anqp_3gpp_cell_net'])
+ else:
+ anqp_3gpp_cell_net_list.append('[BLANK]')
+
+ if 'ieee80211w' in wifi_extra_dict:
+ ieee80211w_list.append(wifi_extra_dict['ieee80211w'])
+ else:
+ ieee80211w_list.append('Optional')
+
+ '''
+ # wifi extra configuration
+ key_mgmt_list.append(key_mgmt)
+ pairwise_list.append(pairwise)
+ group_list.append(group)
+ psk_list.append(psk)
+ eap_list.append(eap)
+ identity_list.append(identity)
+ anonymous_identity_list.append(anonymous_identity)
+ phase1_list.append(phase1)
+ phase2_list.append(phase2)
+ passwd_list.append(passwd)
+ pin_list.append(pin)
+ pac_file_list.append(pac_file)
+ private_key_list.append(private)
+ pk_password_list.append(pk_password)
+ hessid_list.append(hssid)
+ realm_list.append(realm)
+ client_cert_list.append(client_cert)
+ imsi_list.append(imsi)
+ milenage_list.append(milenage)
+ domain_list.append(domain)
+ roaming_consortium_list.append(roaming_consortium)
+ venue_group_list.append(venue_group)
+ network_type_list.append(network_type)
+ ipaddr_type_avail_list.append(ipaddr_type_avail)
+ network_auth_type_list.append(network_ath_type)
+ anqp_3gpp_cell_net_list.append(anqp_3gpp_cell_net)
+
+ '''
+ # no wifi extra for this station
+ else:
+ key_mgmt_list.append('[BLANK]')
+ pairwise_list.append('[BLANK]')
+ group_list.append('[BLANK]')
+ psk_list.append('[BLANK]')
+ # for testing
+ # psk_list.append(radio_info_dict['ssid_pw'])
+ wep_key_list.append('[BLANK]')
+ ca_cert_list.append('[BLANK]')
+ eap_list.append('[BLANK]')
+ identity_list.append('[BLANK]')
+ anonymous_identity_list.append('[BLANK]')
+ phase1_list.append('[BLANK]')
+ phase2_list.append('[BLANK]')
+ passwd_list.append('[BLANK]')
+ pin_list.append('[BLANK]')
+ pac_file_list.append('[BLANK]')
+ private_key_list.append('[BLANK]')
+ pk_password_list.append('[BLANK]')
+ hessid_list.append("00:00:00:00:00:00")
+ realm_list.append('[BLANK]')
+ client_cert_list.append('[BLANK]')
+ imsi_list.append('[BLANK]')
+ milenage_list.append('[BLANK]')
+ domain_list.append('[BLANK]')
+ roaming_consortium_list.append('[BLANK]')
+ venue_group_list.append('[BLANK]')
+ network_type_list.append('[BLANK]')
+ ipaddr_type_avail_list.append('[BLANK]')
+ network_auth_type_list.append('[BLANK]')
+ anqp_3gpp_cell_net_list.append('[BLANK]')
+ ieee80211w_list.append('Optional')
+
+ # check for wifi_settings
+ wifi_settings_keys = ['wifi_settings']
+ wifi_settings_found = True
+ for key in wifi_settings_keys:
+ if key not in radio_info_dict:
+ logger.debug("wifi_settings_keys not enabled")
+ wifi_settings_found = False
+ break
+
+ if wifi_settings_found:
+ # Check for additional flags
+ if {'wifi_mode', 'enable_flags'}.issubset(
+ radio_info_dict.keys()):
+ logger.debug("wifi_settings flags set")
+ else:
+ logger.debug("wifi_settings is present wifi_mode, enable_flags need to be set "
+ "or remove the wifi_settings or set wifi_settings==False flag on "
+ "the radio for defaults")
+ return False
+ wifi_mode_list.append(radio_info_dict['wifi_mode'])
+ enable_flags_str = radio_info_dict['enable_flags'].replace(
+ '(', '').replace(')', '').replace('|', ',').replace('&&', ',')
+ enable_flags_list = list(enable_flags_str.split(","))
+ wifi_enable_flags_list.append(enable_flags_list)
+ else:
+ wifi_mode_list.append(0)
+ wifi_enable_flags_list.append(
+ ["wpa2_enable", "80211u_enable", "create_admin_down"])
+ # 8021x_radius is the same as Advanced/8021x on the gui
+
+ # check for optional radio key , currently only reset is enabled
+ # update for checking for reset_port_time_min, reset_port_time_max
+ optional_radio_reset_keys = ['reset_port_enable']
+ radio_reset_found = True
+ for key in optional_radio_reset_keys:
+ if key not in radio_info_dict:
+ # logger.debug("port reset test not enabled")
+ radio_reset_found = False
+ break
+
+ if radio_reset_found:
+ reset_port_enable_list.append(
+ radio_info_dict['reset_port_enable'])
+ reset_port_time_min_list.append(
+ radio_info_dict['reset_port_time_min'])
+ reset_port_time_max_list.append(
+ radio_info_dict['reset_port_time_max'])
+ else:
+ reset_port_enable_list.append(False)
+ reset_port_time_min_list.append('0s')
+ reset_port_time_max_list.append('0s')
+
+ index = 0
+ for (radio_name_, number_of_stations_per_radio_) in zip(
+ radio_name_list, number_of_stations_per_radio_list):
+ number_of_stations = int(number_of_stations_per_radio_)
+ if number_of_stations > MAX_NUMBER_OF_STATIONS:
+ logger.critical("number of stations per radio exceeded max of : {}".format(
+ MAX_NUMBER_OF_STATIONS))
+ quit(1)
+ station_list = LFUtils.portNameSeries(
+ prefix_="sta",
+ start_id_=0 + index * 1000 + int(args.sta_start_offset),
+ end_id_=number_of_stations - 1 + index *
+ 1000 + int(args.sta_start_offset),
+ padding_number_=10000,
+ radio=radio_name_)
+ station_lists.append(station_list)
+ index += 1
+
+ # create a secondary station_list
+ if args.use_existing_station_list:
+ if args.existing_station_list is not None:
+ # these are entered stations
+ for existing_sta_list in args.existing_station_list:
+ existing_stations = str(existing_sta_list).replace(
+ '"',
+ '').replace(
+ '[',
+ '').replace(
+ ']',
+ '').replace(
+ "'",
+ "").replace(
+ ",",
+ " ").split()
+
+ for existing_sta in existing_stations:
+ existing_station_lists.append(existing_sta)
+ else:
+ logger.error(
+ "--use_station_list set true, --station_list is None Exiting")
+ raise Exception(
+ "--use_station_list is used in conjunction with a --station_list")
+
+ logger.info("existing_station_lists: {sta}".format(
+ sta=existing_station_lists))
+
+ # logger.info("endp-types: %s"%(endp_types))
+ ul_rates = args.side_a_min_bps.replace(',', ' ').split()
+ dl_rates = args.side_b_min_bps.replace(',', ' ').split()
+ ul_pdus = args.side_a_min_pdu.replace(',', ' ').split()
+ dl_pdus = args.side_b_min_pdu.replace(',', ' ').split()
+ if args.attenuators == "":
+ attenuators = []
+ else:
+ attenuators = args.attenuators.split(",")
+ if args.atten_vals == "":
+ atten_vals = [-1]
+ else:
+ atten_vals = args.atten_vals.split(",")
+
+ if len(ul_rates) != len(dl_rates):
+ # todo make fill assignable
+ logger.info(
+ "ul_rates %s and dl_rates %s arrays are of different length will fill shorter list with 256000\n" %
+ (len(ul_rates), len(dl_rates)))
+ if len(ul_pdus) != len(dl_pdus):
+ logger.info(
+ "ul_pdus %s and dl_pdus %s arrays are of different lengths will fill shorter list with size AUTO \n" %
+ (len(ul_pdus), len(dl_pdus)))
+
+ # Configure reporting
+ logger.info("Configuring report")
+ report, kpi_csv, csv_outfile = configure_reporting(**vars(args))
+ ce = self.current_exec #seires
+ if ce == "parallel":
+ obj_name = "mcast_test"
+ else:
+ obj_no = 1
+ while f"mcast_test_{obj_no}" in self.mcast_obj_dict[ce]:
+ obj_no+=1
+ obj_name = f"mcast_test_{obj_no}"
+ self.mcast_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+ logger.debug("Configure test object")
+ self.mcast_obj_dict[ce][obj_name]["obj"] = L3VariableTime(
+ endp_types=endp_types,
+ args=args,
+ tos=args.tos,
+ side_b=args.upstream_port,
+ side_a=args.downstream_port,
+ radio_name_list=radio_name_list,
+ number_of_stations_per_radio_list=number_of_stations_per_radio_list,
+ ssid_list=ssid_list,
+ ssid_password_list=ssid_password_list,
+ ssid_security_list=ssid_security_list,
+ wifi_mode_list=wifi_mode_list,
+ enable_flags_list=wifi_enable_flags_list,
+ station_lists=station_lists,
+ name_prefix="LT-",
+ outfile=csv_outfile,
+ reset_port_enable_list=reset_port_enable_list,
+ reset_port_time_min_list=reset_port_time_min_list,
+ reset_port_time_max_list=reset_port_time_max_list,
+ side_a_min_rate=ul_rates,
+ side_b_min_rate=dl_rates,
+ side_a_min_pdu=ul_pdus,
+ side_b_min_pdu=dl_pdus,
+ rates_are_totals=args.rates_are_totals,
+ mconn=args.multiconn,
+ attenuators=attenuators,
+ atten_vals=atten_vals,
+ number_template="00",
+ test_duration=args.test_duration,
+ polling_interval=args.polling_interval,
+ lfclient_host=args.lfmgr,
+ lfclient_port=args.lfmgr_port,
+ debug=args.debug,
+ kpi_csv=kpi_csv,
+ no_cleanup=args.no_cleanup,
+ use_existing_station_lists=args.use_existing_station_list,
+ existing_station_lists=existing_station_lists,
+ wait_for_ip_sec=args.wait_for_ip_sec,
+ exit_on_ip_acquired=args.exit_on_ip_acquired,
+ ap_read=args.ap_read,
+ ap_module=args.ap_module,
+ ap_test_mode=args.ap_test_mode,
+ ap_ip=args.ap_ip,
+ ap_user=args.ap_user,
+ ap_passwd=args.ap_passwd,
+ ap_scheme=args.ap_scheme,
+ ap_serial_port=args.ap_serial_port,
+ ap_ssh_port=args.ap_ssh_port,
+ ap_telnet_port=args.ap_telnet_port,
+ ap_serial_baud=args.ap_serial_baud,
+ ap_if_2g=args.ap_if_2g,
+ ap_if_5g=args.ap_if_5g,
+ ap_if_6g=args.ap_if_6g,
+ ap_report_dir="",
+ ap_file=args.ap_file,
+ ap_band_list=args.ap_band_list.split(','),
+
+ # for webgui execution
+ test_name=test_name,
+ dowebgui=args.dowebgui,
+ ip=ip,
+ get_live_view= args.get_live_view,
+ total_floors = args.total_floors,
+ # for uniformity from webGUI result_dir as variable is used insead of local_lf_report_dir
+ result_dir=args.local_lf_report_dir,
+
+ # wifi extra configuration
+ key_mgmt_list=key_mgmt_list,
+ pairwise_list=pairwise_list,
+ group_list=group_list,
+ psk_list=psk_list,
+ wep_key_list=wep_key_list,
+ ca_cert_list=ca_cert_list,
+ eap_list=eap_list,
+ identity_list=identity_list,
+ anonymous_identity_list=anonymous_identity_list,
+ phase1_list=phase1_list,
+ phase2_list=phase2_list,
+ passwd_list=passwd_list,
+ pin_list=pin_list,
+ pac_file_list=pac_file_list,
+ private_key_list=private_key_list,
+ pk_password_list=pk_password_list,
+ hessid_list=hessid_list,
+ realm_list=realm_list,
+ client_cert_list=client_cert_list,
+ imsi_list=imsi_list,
+ milenage_list=milenage_list,
+ domain_list=domain_list,
+ roaming_consortium_list=roaming_consortium_list,
+ venue_group_list=venue_group_list,
+ network_type_list=network_type_list,
+ ipaddr_type_avail_list=ipaddr_type_avail_list,
+ network_auth_type_list=network_auth_type_list,
+ anqp_3gpp_cell_net_list=anqp_3gpp_cell_net_list,
+ ieee80211w_list=ieee80211w_list,
+ interopt_mode=interopt_mode,
+ endp_input_list=endp_input_list,
+ graph_input_list=graph_input_list,
+ real=args.real,
+ expected_passfail_value=args.expected_passfail_value,
+ device_csv_name=args.device_csv_name,
+ group_name=args.group_name
+ )
+
+ # Perform pre-test cleanup, if configured to do so
+ if args.no_pre_cleanup:
+ logger.info("Skipping pre-test cleanup, '--no_pre_cleanup' specified")
+ elif args.use_existing_station_list:
+ logger.info("Skipping pre-test cleanup, '--use_existing_station_list' specified")
+ else:
+ logger.info("Performing pre-test cleanup")
+ self.mcast_obj_dict[ce][obj_name]["obj"].pre_cleanup()
+
+ # Build test configuration
+ logger.info("Building test configuration")
+ self.mcast_obj_dict[ce][obj_name]["obj"].build()
+ if not self.mcast_obj_dict[ce][obj_name]["obj"].passes():
+ logger.critical("Test configuration build failed")
+ logger.critical(self.mcast_obj_dict[ce][obj_name]["obj"].get_fail_message())
+ return False
+
+ # Run test
+ logger.info("Starting test")
+ self.mcast_obj_dict[ce][obj_name]["obj"].start(False)
+
+ if args.wait > 0:
+ logger.info(f"Pausing {args.wait} seconds for manual inspection before test conclusion and "
+ "possible traffic stop/post-test cleanup")
+ time.sleep(args.wait)
+
+ # Admin down the stations
+ if args.no_stop_traffic:
+ logger.info("Test complete, '--no_stop_traffic' specified, traffic continues to run")
+ else:
+ if args.quiesce_cx:
+ logger.info("Test complete, quiescing traffic")
+ self.mcast_obj_dict[ce][obj_name]["obj"].quiesce_cx()
+ time.sleep(3)
+ else:
+ logger.info("Test complete, stopping traffic")
+ self.mcast_obj_dict[ce][obj_name]["obj"].stop()
+
+ # Set DUT information for reporting
+ self.mcast_obj_dict[ce][obj_name]["obj"].set_dut_info(
+ dut_model_num=args.dut_model_num,
+ dut_hw_version=args.dut_hw_version,
+ dut_sw_version=args.dut_sw_version,
+ dut_serial_num=args.dut_serial_num)
+ self.mcast_obj_dict[ce][obj_name]["obj"].set_report_obj(report=report)
+ if args.dowebgui:
+ self.mcast_obj_dict[ce][obj_name]["obj"].webgui_finalize()
+ # Generate and write out test report
+ logger.info("Generating test report")
+ if args.real:
+ self.mcast_obj_dict[ce][obj_name]["obj"].generate_report(config_devices, group_device_map)
+ else:
+ self.mcast_obj_dict[ce][obj_name]["obj"].generate_report()
+ params = {
+ "config_devices" : None,
+ "group_device_map": None
+ }
+ params["group_device_map"] = group_device_map
+ params["config_devices"] = config_devices
+ self.mcast_obj_dict[ce][obj_name]["data"] = params.copy()
+ self.mcast_obj_dict[ce][obj_name]["obj"].write_report()
+
+ # TODO move to after reporting
+ if not self.mcast_obj_dict[ce][obj_name]["obj"].passes():
+ logger.warning("Test Ended: There were Failures")
+ logger.warning(self.mcast_obj_dict[ce][obj_name]["obj"].get_fail_message())
+
+ if args.no_cleanup:
+ logger.info("Skipping post-test cleanup, '--no_cleanup' specified")
+ elif args.no_stop_traffic:
+ logger.info("Skipping post-test cleanup, '--no_stop_traffic' specified")
+ else:
+ logger.info("Performing post-test cleanup")
+ self.mcast_obj_dict[ce][obj_name]["obj"].cleanup()
+
+ # TODO: This is redundant if '--no_cleanup' is not specified (already taken care of there)
+ if args.cleanup_cx:
+ logger.info("Performing post-test CX traffic pair cleanup")
+ self.mcast_obj_dict[ce][obj_name]["obj"].cleanup_cx()
+
+ if self.mcast_obj_dict[ce][obj_name]["obj"].passes():
+ test_passed = True
+ logger.info("Full test passed, all connections increased rx bytes")
+
+ # Run WebGUI-specific post test logic
+ if args.dowebgui:
+ self.mcast_obj_dict[ce][obj_name]["obj"].copy_reports_to_home_dir()
+
+ if test_passed:
+ self.mcast_obj_dict[ce][obj_name]["obj"].exit_success()
+ else:
+ self.mcast_obj_dict[ce][obj_name]["obj"].exit_fail()
+
+ return True
+
+
+ def run_mc_test1(
+ self,
+ local_lf_report_dir="",
+ results_dir_name="test_l3",
+ test_rig="",
+ test_tag="",
+ dut_hw_version="",
+ dut_sw_version="",
+ dut_model_num="",
+ dut_serial_num="",
+ test_priority="",
+ test_id="test l3",
+ csv_outfile="",
+ tty="",
+ baud="9600",
+ test_duration="3m",
+ tos="BE",
+ debug=False,
+ log_level=None,
+ interopt_mode=False,
+ endp_type="mc_udp",
+ upstream_port="eth1",
+ downstream_port=None,
+ polling_interval="5s",
+ radio=None,
+ side_a_min_bps="0",
+ side_a_min_pdu="MTU",
+ side_b_min_bps="256000",
+ side_b_min_pdu="MTU",
+ rates_are_totals=True,
+ multiconn=1,
+ attenuators="",
+ atten_vals="",
+ wait=0,
+ sta_start_offset="0",
+ no_pre_cleanup=False,
+ no_cleanup=True,
+ cleanup_cx=False,
+ csv_data_to_report=False,
+ no_stop_traffic=False,
+ quiesce_cx=False,
+ use_existing_station_list=False,
+ existing_station_list=None,
+ wait_for_ip_sec="120s",
+ exit_on_ip_acquired=False,
+ lf_logger_config_json=None,
+ ap_read=False,
+ ap_module=None,
+ ap_test_mode=True,
+ ap_scheme="serial",
+ ap_serial_port="/dev/ttyUSB0",
+ ap_serial_baud="115200",
+ ap_ip="192.168.50.1",
+ ap_ssh_port="1025",
+ ap_telnet_port="23",
+ ap_user="lanforge",
+ ap_passwd="lanforge",
+ ap_if_2g="wl0",
+ ap_if_5g="wl1",
+ ap_if_6g="wl2",
+ ap_file=None,
+ ap_band_list="2g,5g,6g",
+ dowebgui=False,
+ test_name=None,
+ ssid=None,
+ passwd=None,
+ security=None,
+ device_list=None,
+ expected_passfail_value=None,
+ device_csv_name=None,
+ file_name=None,
+ group_name=None,
+ profile_name=None,
+ eap_method="DEFAULT",
+ eap_identity="",
+ ieee8021x=False,
+ ieee80211u=False,
+ ieee80211w=1,
+ enable_pkc=False,
+ bss_transition=False,
+ power_save=False,
+ disable_ofdma=False,
+ roam_ft_ds=False,
+ key_management="DEFAULT",
+ pairwise="NA",
+ private_key="NA",
+ ca_cert="NA",
+ client_cert="NA",
+ pk_passwd="NA",
+ pac_file="NA",
+ config=False,
+ wait_time=60,
+ real=True,
+ get_live_view=False,
+ total_floors="0",
+ help_summary=False
+ ):
+ args = SimpleNamespace(**locals())
+ args.lfmgr_port = self.port
+ args.lfmgr = self.lanforge_ip
+ args.local_lf_report_dir = os.getcwd()
+ return self.run_mc_test(args)
+
+
+ def run_yt_test(
+ self,
+ url=None,
+ duration=None,
+ ap_name="TIP",
+ sec="wpa2",
+ band="5GHZ",
+ test_name=None,
+ upstream_port=None,
+ resource_list=None,
+ no_pre_cleanup=False,
+ no_post_cleanup=False,
+ debug=False,
+ log_level=None,
+ res="Auto",
+ lf_logger_config_json=None,
+ ui_report_dir=None,
+ do_webUI=False,
+ file_name=None,
+ group_name=None,
+ profile_name=None,
+ ssid=None,
+ passwd=None,
+ encryp=None,
+ eap_method="DEFAULT",
+ eap_identity="DEFAULT",
+ ieee8021x=False,
+ ieee80211u=False,
+ ieee80211w=1,
+ enable_pkc=False,
+ bss_transition=False,
+ power_save=False,
+ disable_ofdma=False,
+ roam_ft_ds=False,
+ key_management="DEFAULT",
+ pairwise="NA",
+ private_key="NA",
+ ca_cert="NA",
+ pac_file="NA",
+ client_cert="NA",
+ pk_passwd="NA",
+ help_summary=None,
+ expected_passfail_value=None,
+ device_csv_name=None,
+ config=False,
+ exec_type=None
+ ):
+ try:
+ print('duration',duration)
+ if type(duration) == int:
+ pass
+ elif duration.endswith('s') or duration.endswith('S'):
+ duration = int(duration[0:-1])/60
+ elif duration.endswith('m') or duration.endswith('M'):
+ duration = int(duration[0:-1])
+ elif duration.endswith('h') or duration.endswith('H'):
+ duration = int(duration[0:-1])*60
+ else:
+ duration = int(duration)
+
+ # set the logger level to debug
+ logger_config = lf_logger_config.lf_logger_config()
+
+ if log_level:
+ logger_config.set_level(level=log_level)
+
+ if lf_logger_config_json:
+ logger_config.lf_logger_config_json = lf_logger_config_json
+ logger_config.load_lf_logger_config()
+
+ mgr_ip = self.lanforge_ip
+ mgr_port = self.port
+ url = url
+ duration = duration
+
+ do_webUI = do_webUI
+ ui_report_dir = ui_report_dir
+ debug = debug
+ # Print debug information if debugging is enabled
+ if debug:
+ logging.info('''Specified configuration:
+ ip: {}
+ port: {}
+ Duration: {}
+ debug: {}
+ '''.format(mgr_ip, mgr_port, duration, debug))
+
+ if True:
+ if group_name is not None:
+ group_name = group_name.strip()
+ selected_groups = group_name.split(',')
+ else:
+ selected_groups = []
+
+ if profile_name is not None:
+ profile_name = profile_name.strip()
+ selected_profiles = profile_name.split(',')
+ else:
+ selected_profiles = []
+
+
+ Devices = RealDevice(manager_ip=mgr_ip,
+ server_ip='192.168.1.61',
+ ssid_2g='Test Configured',
+ passwd_2g='',
+ encryption_2g='',
+ ssid_5g='Test Configured',
+ passwd_5g='',
+ encryption_5g='',
+ ssid_6g='Test Configured',
+ passwd_6g='',
+ encryption_6g='',
+ selected_bands=['5G'])
+ Devices.get_devices()
+
+ # Create a YouTube object with the specified parameters
+
+ self.yt_test_obj = Youtube(
+ host=mgr_ip,
+ port=mgr_port,
+ url=url,
+ duration=duration,
+ lanforge_password='lanforge',
+ sta_list=[],
+ do_webUI=do_webUI,
+ ui_report_dir=ui_report_dir,
+ debug=debug,
+ resolution=res,
+ ap_name=ap_name,
+ ssid=ssid,
+ security=encryp,
+ band=band,
+ test_name=test_name,
+ upstream_port=upstream_port,
+ config=config,
+ selected_groups=selected_groups,
+ selected_profiles=selected_profiles,
+ no_browser_precleanup=True,
+ no_browser_postcleanup=True)
+
+ print('CHECKING PORT AVAILBILITY for YT TEST')
+ self.port_clean_up(5002)
+ self.yt_test_obj.start_flask_server()
+ upstream_port = self.yt_test_obj.change_port_to_ip(upstream_port)
+
+ resources = []
+ self.yt_test_obj.Devices = Devices
+ if file_name:
+ new_filename = file_name.removesuffix(".csv")
+ else:
+ new_filename = file_name
+ config_obj = DeviceConfig.DeviceConfig(lanforge_ip=self.lanforge_ip, file_name=new_filename)
+ # if not expected_passfail_value and device_csv_name is None:
+ # config_obj.device_csv_file(csv_name="device.csv")
+ if group_name is not None and file_name is not None and profile_name is not None:
+ selected_groups = group_name.split(',')
+ selected_profiles = profile_name.split(',')
+ config_devices = {}
+ for i in range(len(selected_groups)):
+ config_devices[selected_groups[i]] = selected_profiles[i]
+
+ config_obj.initiate_group()
+
+ asyncio.run(config_obj.connectivity(config_devices))
+
+ adbresponse = config_obj.adb_obj.get_devices()
+ resource_manager = config_obj.laptop_obj.get_devices()
+ all_res = {}
+ df1 = config_obj.display_groups(config_obj.groups)
+ groups_list = df1.to_dict(orient='list')
+ group_devices = {}
+
+ for adb in adbresponse:
+ group_devices[adb['serial']] = adb['eid']
+ for res in resource_manager:
+ all_res[res['hostname']] = res['shelf'] + '.' + res['resource']
+ eid_list = []
+ for grp_name in groups_list.keys():
+ for g_name in selected_groups:
+ if grp_name == g_name:
+ for j in groups_list[grp_name]:
+ if j in group_devices.keys():
+ eid_list.append(group_devices[j])
+ elif j in all_res.keys():
+ eid_list.append(all_res[j])
+ resource_list = ",".join(id for id in eid_list)
+ else:
+ config_dict = {
+ 'ssid': ssid,
+ 'passwd': passwd,
+ 'enc': encryp,
+ 'eap_method': eap_method,
+ 'eap_identity': eap_identity,
+ 'ieee80211': ieee8021x,
+ 'ieee80211u': ieee80211u,
+ 'ieee80211w': ieee80211w,
+ 'enable_pkc': enable_pkc,
+ 'bss_transition': bss_transition,
+ 'power_save': power_save,
+ 'disable_ofdma': disable_ofdma,
+ 'roam_ft_ds': roam_ft_ds,
+ 'key_management': key_management,
+ 'pairwise': pairwise,
+ 'private_key': private_key,
+ 'ca_cert': ca_cert,
+ 'client_cert': client_cert,
+ 'pk_passwd': pk_passwd,
+ 'pac_file': pac_file,
+ 'server_ip': upstream_port,
+ }
+ if resource_list:
+ all_devices = config_obj.get_all_devices()
+ if group_name is None and file_name is None and profile_name is None:
+ dev_list = resource_list.split(',')
+ if config:
+ asyncio.run(config_obj.connectivity(device_list=dev_list, wifi_config=config_dict))
+ else:
+ all_devices = config_obj.get_all_devices()
+ device_list = []
+ for device in all_devices:
+ if device["type"] != 'laptop':
+ device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["serial"])
+ elif device["type"] == 'laptop':
+ device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["hostname"])
+
+ print("Available devices:")
+ for device in device_list:
+ print(device)
+
+ resource_list = input("Enter the desired resources to run the test:")
+ dev1_list = resource_list.split(',')
+ if config:
+ asyncio.run(config_obj.connectivity(device_list=dev1_list, wifi_config=config_dict))
+
+ if not do_webUI:
+ if resource_list:
+ resources = [r.strip() for r in resource_list.split(',')]
+ resources = [r for r in resources if len(r.split('.')) > 1]
+
+ self.yt_test_obj.select_real_devices(real_devices=Devices, real_sta_list=resources, base_interop_obj=Devices)
+
+ else:
+ self.yt_test_obj.select_real_devices(real_devices=Devices)
+ else:
+ resources = [r.strip() for r in resource_list.split(',')]
+
+ extracted_parts = [res.split('.')[:2] for res in resources]
+ formatted_parts = ['.'.join(parts) for parts in extracted_parts]
+ self.yt_test_obj.select_real_devices(real_devices=Devices, real_sta_list=formatted_parts, base_interop_obj=Devices)
+
+ if do_webUI:
+
+ if len(self.yt_test_obj.real_sta_hostname) == 0:
+ logging.error("No device is available to run the test")
+ obj = {
+ "status": "Stopped",
+ "configuration_status": "configured"
+ }
+ self.yt_test_obj.updating_webui_runningjson(obj)
+ return
+ else:
+ obj = {
+ "configured_devices": self.yt_test_obj.real_sta_hostname,
+ "configuration_status": "configured",
+ "no_of_devices": f' Total({len(self.yt_test_obj.real_sta_os_types)}) : W({self.yt_test_obj.windows}),L({self.yt_test_obj.linux}),M({self.yt_test_obj.mac})',
+ "device_list": self.yt_test_obj.hostname_os_combination
+
+ }
+ self.yt_test_obj.updating_webui_runningjson(obj)
+
+ # Perform pre-test cleanup if not skipped
+ if not no_pre_cleanup:
+ self.yt_test_obj.cleanup()
+
+ # Check if the required tab exists, and exit if not
+ if not self.yt_test_obj.check_tab_exists():
+ logging.error('Generic Tab is not available.\nAborting the test.')
+ return False
+
+ if len(self.yt_test_obj.real_sta_list) > 0:
+ logging.info(f"checking real sta list while creating endpionts {self.yt_test_obj.real_sta_list}")
+ print('HII',self.yt_test_obj.real_sta_list)
+ self.yt_test_obj.create_generic_endp(self.yt_test_obj.real_sta_list)
+ else:
+ logging.info(f"checking real sta list while creating endpionts {self.yt_test_obj.real_sta_list}")
+ logging.error("No Real Devies Available")
+ return False
+
+ logging.info("TEST STARTED")
+ logging.info('Running the youtube Streaming test for {} minutes'.format(duration))
+
+ time.sleep(10)
+
+ self.yt_test_obj.start_time = datetime.now()
+ self.yt_test_obj.start_generic()
+
+ duration = duration
+ end_time = datetime.now() + timedelta(minutes=duration)
+ initial_data = self.yt_test_obj.get_data_from_api()
+
+ while len(initial_data) == 0:
+ initial_data = self.yt_test_obj.get_data_from_api()
+ time.sleep(1)
+ if initial_data:
+ end_time_webgui = []
+ for i in range(len(self.yt_test_obj.device_names)):
+ end_time_webgui.append(initial_data['result'].get(self.yt_test_obj.device_names[i], {}).get('stop', False))
+ else:
+ for i in range(len(self.yt_test_obj.device_names)):
+ end_time_webgui.append("")
+
+ end_time = datetime.now() + timedelta(minutes=duration)
+
+ while datetime.now() < end_time or not self.yt_test_obj.check_gen_cx():
+ self.yt_test_obj.get_data_from_api()
+ time.sleep(1)
+
+ self.yt_test_obj.generic_endps_profile.stop_cx()
+ logging.info("Duration ended")
+
+ logging.info('Stopping the test')
+ if do_webUI:
+ self.yt_test_obj.create_report(self.yt_test_obj.stats_api_response, self.yt_test_obj.ui_report_dir)
+ else:
+
+ self.yt_test_obj.create_report(self.yt_test_obj.stats_api_response, '')
+
+ # Perform post-test cleanup if not skipped
+ # if not no_post_cleanup:
+ # self.yt_test_obj.generic_endps_profile.cleanup()
+ except Exception as e:
+ logging.error(f"Error occured {e}")
+ traceback.print_exc()
+ finally:
+ if not ('--help' in sys.argv or '-h' in sys.argv):
+ traceback.print_exc()
+ self.yt_test_obj.stop()
+ if self.current_exec == "parallel":
+ self.yt_obj_dict["parallel"]["yt_test"]["obj"] =self.yt_test_obj
+ else:
+ for i in range(len(self.yt_obj_dict["series"])):
+ if self.yt_obj_dict["series"][f"yt_test_{i+1}"]["obj"] is None:
+ self.yt_obj_dict["series"][f"yt_test_{i+1}"]["obj"] = self.yt_test_obj
+ break
+ # Stopping the Youtube test
+ if do_webUI:
+ self.yt_test_obj.stop_test_yt()
+ logging.info("Waiting for Cleanup of Browsers in Devices")
+ time.sleep(10)
+ return True
+
+ def run_zoom_test(
+ self,
+ duration: int,
+ signin_email: str,
+ signin_passwd: str,
+ participants: int,
+ audio: bool = False,
+ video: bool = False,
+ wait_time: int = 30,
+ log_level: str = None,
+ lf_logger_config_json: str = None,
+ resource_list: str = None,
+ do_webUI: bool = False,
+ report_dir: str = None,
+ testname: str = None,
+ zoom_host: str = None,
+ file_name: str = None,
+ group_name: str = None,
+ profile_name: str = None,
+ ssid: str = None,
+ passwd: str = None,
+ encryp: str = None,
+ eap_method: str = 'DEFAULT',
+ eap_identity: str = 'DEFAULT',
+ ieee8021x: bool = False,
+ ieee80211u: bool = False,
+ ieee80211w: int = 1,
+ enable_pkc: bool = False,
+ bss_transition: bool = False,
+ power_save: bool = False,
+ disable_ofdma: bool = False,
+ roam_ft_ds: bool = False,
+ key_management: str = 'DEFAULT',
+ pairwise: str = 'NA',
+ private_key: str = 'NA',
+ ca_cert: str = 'NA',
+ client_cert: str = 'NA',
+ pk_passwd: str = 'NA',
+ pac_file: str = 'NA',
+ upstream_port: str = 'NA',
+ help_summary: str = None,
+ expected_passfail_value: str = None,
+ device_csv_name: str = None,
+ config: bool = False,
+ exec_type: str = None
+ ):
+ try:
+ lanforge_ip = self.lanforge_ip
+
+ if True:
+
+ if group_name is not None:
+ group_name = group_name.strip()
+ selected_groups = group_name.split(',')
+ else:
+ selected_groups = []
+
+ if profile_name is not None:
+ profile_name = profile_name.strip()
+ selected_profiles = profile_name.split(',')
+ else:
+ selected_profiles = []
+
+
+ self.zoom_test_obj = ZoomAutomation(audio=audio, video=video, lanforge_ip=lanforge_ip, wait_time=wait_time, testname=testname,
+ upstream_port=upstream_port, config=config, selected_groups=selected_groups, selected_profiles=selected_profiles,no_browser_precleanup = True,no_browser_postcleanup = True)
+ upstream_port = self.zoom_test_obj.change_port_to_ip(upstream_port)
+ realdevice = RealDevice(manager_ip=lanforge_ip,
+ server_ip="192.168.1.61",
+ ssid_2g='Test Configured',
+ passwd_2g='',
+ encryption_2g='',
+ ssid_5g='Test Configured',
+ passwd_5g='',
+ encryption_5g='',
+ ssid_6g='Test Configured',
+ passwd_6g='',
+ encryption_6g='',
+ selected_bands=['5G'])
+ laptops = realdevice.get_devices()
+ print('CHECKING PORT AVAILBILITY for ZOOM TEST')
+ self.port_clean_up(5000)
+
+ if file_name:
+ new_filename = file_name.removesuffix(".csv")
+ else:
+ new_filename = file_name
+ config_obj = DeviceConfig.DeviceConfig(lanforge_ip=lanforge_ip, file_name=new_filename)
+
+ # if not expected_passfail_value and device_csv_name is None:
+ # config_obj.device_csv_file(csv_name="device.csv")
+ if group_name is not None and file_name is not None and profile_name is not None:
+ selected_groups = group_name.split(',')
+ selected_profiles = profile_name.split(',')
+ config_devices = {}
+ for i in range(len(selected_groups)):
+ config_devices[selected_groups[i]] = selected_profiles[i]
+
+ config_obj.initiate_group()
+ asyncio.run(config_obj.connectivity(config_devices))
+
+ adbresponse = config_obj.adb_obj.get_devices()
+ resource_manager = config_obj.laptop_obj.get_devices()
+ all_res = {}
+ df1 = config_obj.display_groups(config_obj.groups)
+ groups_list = df1.to_dict(orient='list')
+ group_devices = {}
+
+ for adb in adbresponse:
+ group_devices[adb['serial']] = adb['eid']
+ for res in resource_manager:
+ all_res[res['hostname']] = res['shelf'] + '.' + res['resource']
+ eid_list = []
+ for grp_name in groups_list.keys():
+ for g_name in selected_groups:
+ if grp_name == g_name:
+ for j in groups_list[grp_name]:
+ if j in group_devices.keys():
+ eid_list.append(group_devices[j])
+ elif j in all_res.keys():
+ eid_list.append(all_res[j])
+ if zoom_host in eid_list:
+ # Remove the existing instance of zoom_host from the list
+ eid_list.remove(zoom_host)
+ # Insert zoom_host at the beginning of the list
+ eid_list.insert(0, zoom_host)
+
+ resource_list = ",".join(id for id in eid_list)
+ else:
+ config_dict = {
+ 'ssid': ssid,
+ 'passwd': passwd,
+ 'enc': encryp,
+ 'eap_method': eap_method,
+ 'eap_identity': eap_identity,
+ 'ieee80211': ieee8021x,
+ 'ieee80211u': ieee80211u,
+ 'ieee80211w': ieee80211w,
+ 'enable_pkc': enable_pkc,
+ 'bss_transition': bss_transition,
+ 'power_save': power_save,
+ 'disable_ofdma': disable_ofdma,
+ 'roam_ft_ds': roam_ft_ds,
+ 'key_management': key_management,
+ 'pairwise': pairwise,
+ 'private_key': private_key,
+ 'ca_cert': ca_cert,
+ 'client_cert': client_cert,
+ 'pk_passwd': pk_passwd,
+ 'pac_file': pac_file,
+ 'server_ip': upstream_port,
+
+ }
+ if resource_list:
+ all_devices = config_obj.get_all_devices()
+ if group_name is None and file_name is None and profile_name is None:
+ dev_list = resource_list.split(',')
+ if not do_webUI:
+ zoom_host = zoom_host.strip()
+ if zoom_host in dev_list:
+ dev_list.remove(zoom_host)
+ dev_list.insert(0, zoom_host)
+ if config:
+ asyncio.run(config_obj.connectivity(device_list=dev_list, wifi_config=config_dict))
+ resource_list = ",".join(id for id in dev_list)
+ else:
+ # If no resources provided, prompt user to select devices manually
+ if config:
+ all_devices = config_obj.get_all_devices()
+ device_list = []
+ for device in all_devices:
+ if device["type"] != 'laptop':
+ device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["serial"])
+ elif device["type"] == 'laptop':
+ device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["hostname"])
+ print("Available Devices For Testing")
+ for device in device_list:
+ print(device)
+ zm_host = input("Enter Host Resource for the Test : ")
+ zm_host = zm_host.strip()
+ resource_list = input("Enter client Resources to run the test :")
+ resource_list = zm_host + "," + resource_list
+ dev1_list = resource_list.split(',')
+ asyncio.run(config_obj.connectivity(device_list=dev1_list, wifi_config=config_dict))
+
+ result_list = []
+ if not do_webUI:
+ if resource_list:
+ resources = resource_list.split(',')
+ resources = [r for r in resources if len(r.split('.')) > 1]
+ # resources = sorted(resources, key=lambda x: int(x.split('.')[1]))
+ get_data = self.zoom_test_obj.select_real_devices(real_device_obj=realdevice, real_sta_list=resources)
+ for item in get_data:
+ item = item.strip()
+ # Find and append the matching lap to result_list
+ matching_laps = [lap for lap in laptops if lap.startswith(item)]
+ result_list.extend(matching_laps)
+ if not result_list:
+ logging.info("Resources donot exist hence Terminating the test.")
+ return
+ if len(result_list) != len(get_data):
+ logging.info("Few Resources donot exist")
+ else:
+ resources = self.zoom_test_obj.select_real_devices(real_device_obj=realdevice)
+ else:
+ if do_webUI:
+ self.zoom_test_obj.path = report_dir
+ resources = resource_list.split(',')
+ extracted_parts = [res.split('.')[:2] for res in resources]
+ formatted_parts = ['.'.join(parts) for parts in extracted_parts]
+
+ self.zoom_test_obj.select_real_devices(real_device_obj=realdevice, real_sta_list=formatted_parts)
+ if do_webUI:
+
+ if len(self.zoom_test_obj.real_sta_hostname) == 0:
+ logging.info("No device is available to run the test")
+ obj = {
+ "status": "Stopped",
+ "configuration_status": "configured"
+ }
+ self.zoom_test_obj.updating_webui_runningjson(obj)
+ return False
+ else:
+ obj = {
+ "configured_devices": self.zoom_test_obj.real_sta_hostname,
+ "configuration_status": "configured",
+ "no_of_devices": f' Total({len(self.zoom_test_obj.real_sta_os_type)}) : W({self.zoom_test_obj.windows}),L({self.zoom_test_obj.linux}),M({self.zoom_test_obj.mac})',
+ "device_list": self.zoom_test_obj.hostname_os_combination,
+ # "zoom_host":self.zoom_test_obj.zoom_host
+
+ }
+ self.zoom_test_obj.updating_webui_runningjson(obj)
+
+ if not self.zoom_test_obj.check_tab_exists():
+ logging.error('Generic Tab is not available.\nAborting the test.')
+ return False
+ self.zoom_test_obj.run(duration, upstream_port, signin_email, signin_passwd, participants)
+ self.zoom_test_obj.data_store.clear()
+ self.zoom_test_obj.generate_report()
+ logging.info("Test Completed Sucessfully")
+ except Exception as e:
+ logging.error(f"AN ERROR OCCURED WHILE RUNNING TEST {e}")
+ traceback.print_exc()
+ finally:
+ if not ('--help' in sys.argv or '-h' in sys.argv):
+ if do_webUI:
+ try:
+ url = f"http://{lanforge_ip}:5454/update_status_yt"
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ data = {
+ 'status': 'Completed',
+ 'name': testname
+ }
+
+ response = requests.post(url, json=data, headers=headers)
+
+ if response.status_code == 200:
+ logging.info("Successfully updated STOP status to 'Completed'")
+ pass
+ else:
+ logging.error(f"Failed to update STOP status: {response.status_code} - {response.text}")
+
+ except Exception as e:
+ # Print an error message if an exception occurs during the request
+ logging.error(f"An error occurred while updating status: {e}")
+
+ self.zoom_test_obj.redis_client.set('login_completed', 0)
+ self.zoom_test_obj.stop_signal = True
+ self.zoom_test_obj.app = None
+ self.zoom_test_obj.redis_client = None
+ if self.current_exec == "parallel":
+ self.zoom_obj_dict["parallel"]["zoom_test"]["obj"] =self.zoom_test_obj
+ else:
+ for i in range(len(self.zoom_obj_dict["series"])):
+ if self.zoom_obj_dict["series"][f"zoom_test_{i+1}"]["obj"] is None:
+ self.zoom_obj_dict["series"][f"zoom_test_{i+1}"]["obj"] = self.zoom_test_obj
+ break
+ logging.info("Waiting for Browser Cleanup in Laptops")
+ self.zoom_test_obj.generic_endps_profile.cleanup()
+ # self.zoom_test_obj.generic_endps_profile.cleanup()
+ time.sleep(10)
+
+ return True
+
+
+ def run_rb_test1(self,args):
+ try:
+ logger_config = lf_logger_config.lf_logger_config()
+
+ if args.log_level:
+ logger_config.set_level(level=args.log_level)
+
+ if args.lf_logger_config_json:
+ logger_config.lf_logger_config_json = args.lf_logger_config_json
+ logger_config.load_lf_logger_config()
+ if args.url.lower().startswith("www."):
+ args.url = "https://" + args.url
+ if args.url.lower().startswith("http://"):
+ args.url = "https://" + args.url.removeprefix("http://")
+ # ce = self.current_exec #seires
+ # if ce == "parallel":
+ # obj_name = "rb_test"
+ # else:
+ # obj_no = 1
+ # while f"rb_test_{obj_no}" in self.rb_obj_dict[ce]:
+ # obj_no+=1
+ # obj_name = f"rb_test_{obj_no}"
+ # self.rb_obj_dict[ce][obj_name] = {"obj":None,"data":None}
+ # Initialize an instance of RealBrowserTest with various parameters
+ self.rb_test = RealBrowserTest(host=args.host,
+ ssid=args.ssid,
+ passwd=args.passwd,
+ encryp=args.encryp,
+ suporrted_release=["7.0", "10", "11", "12"],
+ max_speed=args.max_speed,
+ url=args.url, count=args.count,
+ duration=args.duration,
+ resource_ids=args.device_list,
+ dowebgui=args.dowebgui,
+ result_dir=args.result_dir,
+ test_name=args.test_name,
+ incremental=args.incremental,
+ no_postcleanup=args.no_postcleanup,
+ no_precleanup=args.no_precleanup,
+ file_name=args.file_name,
+ group_name=args.group_name,
+ profile_name=args.profile_name,
+ eap_method=args.eap_method,
+ eap_identity=args.eap_identity,
+ ieee80211=args.ieee80211,
+ ieee80211u=args.ieee80211u,
+ ieee80211w=args.ieee80211w,
+ enable_pkc=args.enable_pkc,
+ bss_transition=args.bss_transition,
+ power_save=args.power_save,
+ disable_ofdma=args.disable_ofdma,
+ roam_ft_ds=args.roam_ft_ds,
+ key_management=args.key_management,
+ pairwise=args.pairwise,
+ private_key=args.private_key,
+ ca_cert=args.ca_cert,
+ client_cert=args.client_cert,
+ pk_passwd=args.pk_passwd,
+ pac_file=args.pac_file,
+ upstream_port=args.upstream_port,
+ expected_passfail_value=args.expected_passfail_value,
+ device_csv_name=args.device_csv_name,
+ wait_time=args.wait_time,
+ config=args.config,
+ selected_groups=args.group_name,
+ selected_profiles=args.profile_name,
+ no_browser_precleanup=True,
+ no_browser_postcleanup=True
+ )
+ print('CHECKING PORT AVAILBILITY for RB TEST')
+ self.port_clean_up(5003)
+ self.rb_test.change_port_to_ip()
+ self.rb_test.validate_and_process_args()
+ self.rb_test.config_obj = DeviceConfig.DeviceConfig(lanforge_ip=self.rb_test.host, file_name=self.rb_test.file_name, wait_time=self.rb_test.wait_time)
+ # if not self.rb_test.expected_passfail_value and self.rb_test.device_csv_name is None:
+ # self.rb_test.config_self.rb_test.device_csv_file(csv_name="device.csv")
+ self.rb_test.run_flask_server()
+ if self.rb_test.group_name and self.rb_test.profile_name and self.rb_test.file_name:
+ available_resources = self.rb_test.process_group_profiles()
+ else:
+ # --- Build configuration dictionary for WiFi parameters ---
+ config_dict = {
+ 'ssid': args.ssid,
+ 'passwd': args.passwd,
+ 'enc': args.encryp,
+ 'eap_method': args.eap_method,
+ 'eap_identity': args.eap_identity,
+ 'ieee80211': args.ieee80211,
+ 'ieee80211u': args.ieee80211u,
+ 'ieee80211w': args.ieee80211w,
+ 'enable_pkc': args.enable_pkc,
+ 'bss_transition': args.bss_transition,
+ 'power_save': args.power_save,
+ 'disable_ofdma': args.disable_ofdma,
+ 'roam_ft_ds': args.roam_ft_ds,
+ 'key_management': args.key_management,
+ 'pairwise': args.pairwise,
+ 'private_key': args.private_key,
+ 'ca_cert': args.ca_cert,
+ 'client_cert': args.client_cert,
+ 'pk_passwd': args.pk_passwd,
+ 'pac_file': args.pac_file,
+ 'server_ip': self.rb_test.upstream_port,
+ }
+ available_resources = self.rb_test.process_resources(config_dict)
+ if len(available_resources) != 0:
+ available_resources = self.rb_test.filter_ios_devices(available_resources)
+ if len(available_resources) == 0:
+ logging.error("No devices available to run the test. Exiting...")
+ return False
+
+ # --- Print available resources ---
+ logging.info("Devices available: {}".format(available_resources))
+ if self.rb_test.expected_passfail_value or self.rb_test.device_csv_name:
+ self.rb_test.update_passfail_value(available_resources)
+ # --- Handle incremental values ---
+ self.rb_test.handle_incremental(args, self.rb_test, available_resources, available_resources)
+ self.rb_test.handle_duration()
+ self.rb_test.run_test(available_resources)
+
+ except Exception as e:
+ logging.error("Error occured", e)
+ # traceback.print_exc()
+ finally:
+ if '--help' not in sys.argv and '-h' not in sys.argv:
+ self.rb_test.create_report()
+ if self.rb_test.dowebgui:
+ self.rb_test.webui_stop()
+ self.rb_test.stop()
+
+ # if not args.no_postcleanup:
+ # self.rb_test_obj.postcleanup()
+ self.rb_test.app = None
+ if self.current_exec == "parallel":
+ self.rb_obj_dict["parallel"]["rb_test"]["obj"] =self.rb_test
+ else:
+ for i in range(len(self.rb_obj_dict["series"])):
+ if self.rb_obj_dict["series"][f"rb_test_{i+1}"]["obj"] is None:
+ self.rb_obj_dict["series"][f"rb_test_{i+1}"]["obj"] = self.rb_test
+ break
+
+
+
+ return True
+
+
+ def run_rb_test(
+ self,
+ ssid: str = None,
+ passwd: str = None,
+ encryp: str = None,
+ url: str = "https://google.com",
+ max_speed: int = 0,
+ count: int = 1,
+ duration: str = None,
+ test_name: str = None,
+ dowebgui: bool = False,
+ result_dir: str = '',
+ lf_logger_config_json: str = None,
+ log_level: str = None,
+ debug: bool = False,
+ device_list: str = None,
+ webgui_incremental: str = None,
+ incremental: bool = False,
+ no_laptops: bool = False,
+ no_postcleanup: bool = False,
+ no_precleanup: bool = False,
+ file_name: str = None,
+ group_name: str = None,
+ profile_name: str = None,
+ eap_method: str = 'DEFAULT',
+ eap_identity: str = 'DEFAULT',
+ ieee80211: bool = False,
+ ieee80211u: bool = False,
+ ieee80211w: int = 1,
+ enable_pkc: bool = False,
+ bss_transition: bool = False,
+ power_save: bool = False,
+ disable_ofdma: bool = False,
+ roam_ft_ds: bool = False,
+ key_management: str = 'DEFAULT',
+ pairwise: str = 'NA',
+ private_key: str = 'NA',
+ ca_cert: str = 'NA',
+ client_cert: str = 'NA',
+ pk_passwd: str = 'NA',
+ pac_file: str = 'NA',
+ upstream_port: str = 'NA',
+ help_summary: str = None,
+ expected_passfail_value: str = None,
+ device_csv_name: str = None,
+ wait_time: int = 60,
+ config: bool = False,
+ exec_type: str = None
+ ):
+ args = SimpleNamespace(**locals())
+ args.host = self.lanforge_ip
+ return self.run_rb_test1(args)
+
+ def browser_cleanup(self,rb_test=False,yt_test=False):
+ # count = 0
+ # series_tests = args.series_tests.split(',') if args.series_tests else None
+ # parallel_tests = args.parallel_tests.split(',') if args.parallel_tests else None
+ # zoom_test = False
+ # yt_test = False
+ # rb_test = False
+ # if 'zoom_test' in parallel_tests:
+ # count += 1
+ # if 'yt_test' in parallel_tests:
+ # count += 1
+ # if 'rb_test' in parallel_tests:
+ # count += 1
+ # if count <=1:
+ # self.browser_kill = True
+ # if args.series_test and not parallel_tests:
+ # self.browser_kill = True
+ # return True
+ # if rb_test:
+ # cnt = 0
+ # flag = False
+ # while not self.rb_build_done:
+ # time.sleep(1)
+ # cnt+=1
+ # if cnt >= 30:
+ # flag = True
+ # break
+ # if flag:
+ # return False
+ print('calledddddd')
+ # time.sleep(20)
+ if rb_test:
+ print('inn000')
+ print('laptop_os_types',self.rb_test_obj.laptop_os_types)
+ print('endpsss',self.rb_test_obj.generic_endps_profile.created_endp)
+ for i in range(0, len(self.rb_test_obj.laptop_os_types)):
+ print('inn1111')
+ if self.rb_test_obj.laptop_os_types[i] == 'windows':
+ cmd = "echo Performing POST cleanup of browser processes... & taskkill /F /IM chrome.exe /T >nul 2>&1 & taskkill /F /IM chromedriver.exe /T >nul 2>&1 & echo Browser processes terminated."
+ self.rb_test_obj.generic_endps_profile.set_cmd(self.rb_test_obj.generic_endps_profile.created_endp[i], cmd)
+ elif self.rb_test_obj.laptop_os_types[i] == 'linux':
+ # cmd = "su -l lanforge ctrb.bash %s %s %s %s" % (self.rb_test_obj.new_port_list[i], self.rb_test_obj.url, self.rb_test_obj.upstream_port, self.rb_test_obj.duration)
+ cmd = "pkill -f chrome; pkill -f chromedriver"
+ self.rb_test_obj.generic_endps_profile.set_cmd(self.rb_test_obj.generic_endps_profile.created_endp[i], cmd)
+ elif self.rb_test_obj.laptop_os_types[i] == 'macos':
+ cmd = "pkill -f Google Chrome; pkill -f chromedriver;"
+ self.rb_test_obj.generic_endps_profile.set_cmd(self.rb_test_obj.generic_endps_profile.created_endp[i], cmd)
+ if self.rb_test_obj.browser_precleanup:
+ cmd+=" precleanup"
+ if self.rb_test_obj.browser_postcleanup:
+ cmd+=" postcleanup"
+
+ for i, cx_batch in enumerate(self.rb_test_obj.cx_order_list):
+ self.rb_test_obj.start_specific(cx_batch)
+ logging.info(f"browser cleanup on {cx_batch}")
+ print('realbrowser test laptop cleaing.....')
+ time.sleep(20)
+
+
+ if yt_test:
+ for i in range(0, len(self.yt_test_obj.real_sta_os_types)):
+ if self.yt_test_obj.real_sta_os_types[i] == 'windows':
+ cmd = "echo Performing POST cleanup of browser processes... & taskkill /F /IM chrome.exe /T >nul 2>&1 & taskkill /F /IM chromedriver.exe /T >nul 2>&1 & echo Browser processes terminated."
+ self.yt_test_obj.generic_endps_profile.set_cmd(self.yt_test_obj.generic_endps_profile.created_endp[i], cmd)
+ elif self.yt_test_obj.real_sta_os_types[i] == 'linux':
+ cmd = "pkill -f chrome; pkill -f chromedriver"
+ self.yt_test_obj.generic_endps_profile.set_cmd(self.yt_test_obj.generic_endps_profile.created_endp[i], cmd)
+
+ elif self.yt_test_obj.real_sta_os_types[i] == 'macos':
+ cmd = "pkill -f Google Chrome; pkill -f chromedriver;"
+ self.yt_test_obj.generic_endps_profile.set_cmd(self.yt_test_obj.generic_endps_profile.created_endp[i], cmd)
+
+ self.yt_test_obj.generic_endps_profile.start_cx()
+ print('youtube test laptop cleaing.....')
+ time.sleep(20)
+
+ # if zoom_test:
+ # for i in range(len(self.zoom_test_obj.real_sta_os_type)):
+ # if self.zoom_test_obj.real_sta_os_type[i] == "windows":
+ # cmd = f"py zoom_client.py --ip {self.zoom_test_obj.upstream_port}"
+ # self.zoom_test_obj.generic_endps_profile.set_cmd(self.zoom_test_obj.generic_endps_profile.created_endp[i], cmd)
+ # elif self.zoom_test_obj.real_sta_os_type[i] == 'linux':
+ # cmd = "su -l lanforge ctzoom.bash %s %s %s" % (self.zoom_test_obj.new_port_list[i], self.zoom_test_obj.upstream_port, "client")
+ # self.zoom_test_obj.generic_endps_profile.set_cmd(self.zoom_test_obj.generic_endps_profile.created_endp[i], cmd)
+ # elif self.zoom_test_obj.real_sta_os_type[i] == 'macos':
+ # cmd = "sudo bash ctzoom.bash %s %s" % (self.zoom_test_obj.upstream_port, "client")
+ # self.zoom_test_obj.generic_endps_profile.set_cmd(self.zoom_test_obj.generic_endps_profile.created_endp[i], cmd)
+
+ # self.zoom_test_obj.generic_endps_profile.start_cx()
+ def render_each_test(self,ce):
+ # ce = "series"
+ unq_tests = []
+ test_map = {}
+ if ce == "series":
+ series_tests = self.series_tests.copy()
+ for test in series_tests:
+ if test not in test_map:
+ test_map[test] = 1
+ unq_tests.append(test)
+ else:
+ test_map[test] += 1
+ else:
+ unq_tests = self.parallel_tests.copy()
+ print('self.series_tests',self.series_tests)
+ print('test_map',test_map)
+ print('unq_tests',unq_tests)
+ for test_name in unq_tests:
+ try:
+ if test_name == "http_test":
+ # obj = []
+ obj_no = 1
+ obj_name = 'http_test'
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.http_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ # report_path = self.result_path
+ # print("Current working directory:", os.getcwd())
+ http_data = self.http_obj_dict[ce][obj_name]["data"]
+ if http_data["bands"] == "Both":
+ num_stations = num_stations * 2
+
+ # report.set_title("HTTP DOWNLOAD TEST")
+ # report.set_date(date)
+ # if 'http_test' not in self.test_count_dict:
+ # self.test_count_dict['http_test']=0
+ # self.test_count_dict['http_test']+=1
+ self.overall_report.set_obj_html(_obj_title=f'HTTP Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ self.overall_report.set_table_title("Test Setup Information")
+ self.overall_report.build_table_title()
+ self.overall_report.test_setup_table(value="Test Setup Information", test_setup_data=http_data["test_setup_info"])
+
+ graph2 = self.http_obj_dict[ce][obj_name]["obj"].graph_2(http_data["dataset2"], lis=http_data["lis"], bands=http_data["bands"],graph_no=obj_no)
+ print("graph name {}".format(graph2))
+ self.overall_report.set_graph_image(graph2)
+ self.overall_report.set_csv_filename(graph2)
+ self.overall_report.move_csv_file()
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_obj_html(
+ "Average time taken to download file ",
+ "The below graph represents average time taken to download for each client "
+ ". X- axis shows “Average time taken to download a file ” and Y-axis shows "
+ "Client names."
+ )
+ self.overall_report.build_objective()
+
+ graph = self.http_obj_dict[ce][obj_name]["obj"].generate_graph(dataset=http_data["dataset"], lis=http_data["lis"], bands=http_data["bands"],graph_no=obj_no)
+ self.overall_report.set_graph_image(graph)
+ self.overall_report.set_csv_filename(graph)
+ self.overall_report.move_csv_file()
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_obj_html(
+ "Download Time Table Description",
+ "This Table will provide you information of the "
+ "minimum, maximum and the average time taken by clients to download a webpage in seconds"
+ )
+ self.overall_report.build_objective()
+
+ self.http_obj_dict[ce][obj_name]["obj"].response_port = self.http_obj_dict[ce][obj_name]["obj"].local_realm.json_get("/port/all")
+ self.http_obj_dict[ce][obj_name]["obj"].channel_list, self.http_obj_dict[ce][obj_name]["obj"].mode_list, self.http_obj_dict[ce][obj_name]["obj"].ssid_list = [], [], []
+
+ if self.http_obj_dict[ce][obj_name]["obj"].client_type == "Real":
+ self.http_obj_dict[ce][obj_name]["obj"].devices = self.http_obj_dict[ce][obj_name]["obj"].devices_list
+ for interface in self.http_obj_dict[ce][obj_name]["obj"].response_port['interfaces']:
+ for port, port_data in interface.items():
+ if port in self.http_obj_dict[ce][obj_name]["obj"].port_list:
+ self.http_obj_dict[ce][obj_name]["obj"].channel_list.append(str(port_data['channel']))
+ self.http_obj_dict[ce][obj_name]["obj"].mode_list.append(str(port_data['mode']))
+ self.http_obj_dict[ce][obj_name]["obj"].ssid_list.append(str(port_data['ssid']))
+ elif self.http_obj_dict[ce][obj_name]["obj"].client_type == "Virtual":
+ self.http_obj_dict[ce][obj_name]["obj"].devices = self.http_obj_dict[ce][obj_name]["obj"].station_list[0]
+ for interface in self.http_obj_dict[ce][obj_name]["obj"].response_port['interfaces']:
+ for port, port_data in interface.items():
+ if port in self.http_obj_dict[ce][obj_name]["obj"].station_list[0]:
+ self.http_obj_dict[ce][obj_name]["obj"].channel_list.append(str(port_data['channel']))
+ self.http_obj_dict[ce][obj_name]["obj"].mode_list.append(str(port_data['mode']))
+ self.http_obj_dict[ce][obj_name]["obj"].macid_list.append(str(port_data['mac']))
+ self.http_obj_dict[ce][obj_name]["obj"].ssid_list.append(str(port_data['ssid']))
+
+ # Processing result_data
+ z, z1, z2 = [], [], []
+ for fcc in list(http_data["result_data"].keys()):
+ z.extend([str(round(i / 1000, 1)) for i in http_data["result_data"][fcc]["min"]])
+ z1.extend([str(round(i / 1000, 1)) for i in http_data["result_data"][fcc]["max"]])
+ z2.extend([str(round(i / 1000, 1)) for i in http_data["result_data"][fcc]["avg"]])
+
+ download_table_value_dup = {"Minimum": z, "Maximum": z1, "Average": z2}
+ download_table_value = {"Band": http_data["bands"], "Minimum": z, "Maximum": z1, "Average": z2}
+
+ # KPI reporting
+ kpi_path = self.overall_report.get_report_path()
+ print("kpi_path :{kpi_path}".format(kpi_path=kpi_path))
+
+ kpi_csv = lf_kpi_csv.lf_kpi_csv(
+ _kpi_path=kpi_path,
+ _kpi_test_rig=http_data["test_rig"],
+ _kpi_test_tag=http_data["test_tag"],
+ _kpi_dut_hw_version=http_data["dut_hw_version"],
+ _kpi_dut_sw_version=http_data["dut_sw_version"],
+ _kpi_dut_model_num=http_data["dut_model_num"],
+ _kpi_dut_serial_num=http_data["dut_serial_num"],
+ _kpi_test_id=http_data["test_id"]
+ )
+ kpi_csv.kpi_dict['Units'] = "Mbps"
+ for band in range(len(download_table_value["Band"])):
+ kpi_csv.kpi_csv_get_dict_update_time()
+ kpi_csv.kpi_dict['Graph-Group'] = "Webpage Download {band}".format(
+ band=download_table_value['Band'][band])
+ kpi_csv.kpi_dict['short-description'] = "Webpage download {band} Minimum".format(
+ band=download_table_value['Band'][band])
+ kpi_csv.kpi_dict['numeric-score'] = "{min}".format(min=download_table_value['Minimum'][band])
+ kpi_csv.kpi_csv_write_dict(kpi_csv.kpi_dict)
+
+ kpi_csv.kpi_dict['short-description'] = "Webpage download {band} Maximum".format(
+ band=download_table_value['Band'][band])
+ kpi_csv.kpi_dict['numeric-score'] = "{max}".format(max=download_table_value['Maximum'][band])
+ kpi_csv.kpi_csv_write_dict(kpi_csv.kpi_dict)
+
+ kpi_csv.kpi_dict['short-description'] = "Webpage download {band} Average".format(
+ band=download_table_value['Band'][band])
+ kpi_csv.kpi_dict['numeric-score'] = "{avg}".format(avg=download_table_value['Average'][band])
+ kpi_csv.kpi_csv_write_dict(kpi_csv.kpi_dict)
+
+ if http_data["csv_outfile"] is not None:
+ current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
+ http_data["csv_outfile"] = "{}_{}-test_l3_longevity.csv".format(http_data["csv_outfile"], current_time)
+ http_data["csv_outfile"] = self.overall_report.file_add_path(http_data["csv_outfile"])
+ print("csv output file : {}".format(http_data["csv_outfile"]))
+
+ test_setup = pd.DataFrame(download_table_value_dup)
+ self.overall_report.set_table_dataframe(test_setup)
+ self.overall_report.build_table()
+
+ if self.http_obj_dict[ce][obj_name]["obj"].group_name:
+ self.overall_report.set_table_title("Overall Results for Groups")
+ else:
+ self.overall_report.set_table_title("Overall Results")
+ self.overall_report.build_table_title()
+
+ if self.http_obj_dict[ce][obj_name]["obj"].client_type == "Real":
+ if self.http_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.http_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ test_input_list, pass_fail_list = self.http_obj_dict[ce][obj_name]["obj"].get_pass_fail_list(http_data["dataset2"])
+
+ if self.http_obj_dict[ce][obj_name]["obj"].group_name:
+ for key, val in self.http_obj_dict[ce][obj_name]["obj"].group_device_map.items():
+ if self.http_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.http_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ dataframe = self.http_obj_dict[ce][obj_name]["obj"].generate_dataframe(
+ val, self.http_obj_dict[ce][obj_name]["obj"].devices, self.http_obj_dict[ce][obj_name]["obj"].macid_list, self.http_obj_dict[ce][obj_name]["obj"].channel_list,
+ self.http_obj_dict[ce][obj_name]["obj"].ssid_list, self.http_obj_dict[ce][obj_name]["obj"].mode_list, http_data["dataset2"], test_input_list,
+ http_data["dataset"], http_data["dataset1"], http_data["rx_rate"], pass_fail_list
+ )
+ else:
+ dataframe = self.http_obj_dict[ce][obj_name]["obj"].generate_dataframe(
+ val, self.http_obj_dict[ce][obj_name]["obj"].devices, self.http_obj_dict[ce][obj_name]["obj"].macid_list, self.http_obj_dict[ce][obj_name]["obj"].channel_list,
+ self.http_obj_dict[ce][obj_name]["obj"].ssid_list, self.http_obj_dict[ce][obj_name]["obj"].mode_list, http_data["dataset2"], [], http_data["dataset"],
+ http_data["dataset1"], http_data["rx_rate"], []
+ )
+ if dataframe:
+ self.overall_report.set_obj_html("", "Group: {}".format(key))
+ self.overall_report.build_objective()
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+ else:
+ dataframe = {
+ " Clients": self.http_obj_dict[ce][obj_name]["obj"].devices,
+ " MAC ": self.http_obj_dict[ce][obj_name]["obj"].macid_list,
+ " Channel": self.http_obj_dict[ce][obj_name]["obj"].channel_list,
+ " SSID ": self.http_obj_dict[ce][obj_name]["obj"].ssid_list,
+ " Mode": self.http_obj_dict[ce][obj_name]["obj"].mode_list,
+ " No of times File downloaded ": http_data["dataset2"],
+ " Average time taken to Download file (ms)": http_data["dataset"],
+ " Bytes-rd (Mega Bytes) ": http_data["dataset1"],
+ "Rx Rate (Mbps)": http_data["rx_rate"],
+ "Failed url's": self.http_obj_dict[ce][obj_name]["obj"].data["total_err"]
+ }
+ if self.http_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.http_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ dataframe[" Expected value of no of times file downloaded"] = test_input_list
+ dataframe["Status"] = pass_fail_list
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+ else:
+ dataframe = {
+ " Clients": self.http_obj_dict[ce][obj_name]["obj"].devices,
+ " MAC ": self.http_obj_dict[ce][obj_name]["obj"].macid_list,
+ " Channel": self.http_obj_dict[ce][obj_name]["obj"].channel_list,
+ " SSID ": self.http_obj_dict[ce][obj_name]["obj"].ssid_list,
+ " Mode": self.http_obj_dict[ce][obj_name]["obj"].mode_list,
+ " No of times File downloaded ": http_data["dataset2"],
+ " Average time taken to Download file (ms)": http_data["dataset"],
+ " Bytes-rd (Mega Bytes) ": http_data["dataset1"]
+ }
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ # self.http_obj_dict[ce]
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"http_test_{obj_no}"
+ else:
+ break
+
+ elif test_name == "ftp_test":
+ obj_no=1
+ obj_name = "ftp_test"
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.ftp_obj_dict[ce]:
+ # obj_name = f"ftp_test_{obj_no}"
+ if ce == "parallel":
+ obj_no = ''
+ params = self.ftp_obj_dict[ce][obj_name]["data"].copy()
+ ftp_data = params["ftp_data"].copy() if isinstance(params["ftp_data"], (list, dict, set)) else params["ftp_data"]
+ date = params["date"].copy() if isinstance(params["date"], (list, dict, set)) else params["date"]
+ input_setup_info = params["input_setup_info"].copy() if isinstance(params["input_setup_info"], (list, dict, set)) else params["input_setup_info"]
+ test_rig = params["test_rig"].copy() if isinstance(params["test_rig"], (list, dict, set)) else params["test_rig"]
+ test_tag = params["test_tag"].copy() if isinstance(params["test_tag"], (list, dict, set)) else params["test_tag"]
+ dut_hw_version = params["dut_hw_version"].copy() if isinstance(params["dut_hw_version"], (list, dict, set)) else params["dut_hw_version"]
+ dut_sw_version = params["dut_sw_version"].copy() if isinstance(params["dut_sw_version"], (list, dict, set)) else params["dut_sw_version"]
+ dut_model_num = params["dut_model_num"].copy() if isinstance(params["dut_model_num"], (list, dict, set)) else params["dut_model_num"]
+ dut_serial_num = params["dut_serial_num"].copy() if isinstance(params["dut_serial_num"], (list, dict, set)) else params["dut_serial_num"]
+ test_id = params["test_id"].copy() if isinstance(params["test_id"], (list, dict, set)) else params["test_id"]
+ bands = params["bands"].copy() if isinstance(params["bands"], (list, dict, set)) else params["bands"]
+ csv_outfile = params["csv_outfile"].copy() if isinstance(params["csv_outfile"], (list, dict, set)) else params["csv_outfile"]
+ local_lf_report_dir = params["local_lf_report_dir"].copy() if isinstance(params["local_lf_report_dir"], (list, dict, set)) else params["local_lf_report_dir"]
+ report_path = params["report_path"].copy() if isinstance(params["report_path"], (list, dict, set)) else params["report_path"]
+
+ # Optional parameter
+ config_devices = ""
+ if "config_devices" in params:
+ config_devices = params["config_devices"].copy() if isinstance(params["config_devices"], (list, dict, set)) else params["config_devices"]
+
+ no_of_stations = ""
+ duration = ""
+ x_fig_size = 18
+ y_fig_size = len(self.ftp_obj_dict[ce][obj_name]["obj"].real_client_list1) * .5 + 4
+
+ if int(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration) < 60:
+ duration = str(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration) + "s"
+ elif int(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration == 60) or (int(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration) > 60 and int(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration) < 3600):
+ duration = str(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration / 60) + "m"
+ else:
+ if int(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration == 3600) or (int(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration) > 3600):
+ duration = str(self.ftp_obj_dict[ce][obj_name]["obj"].traffic_duration / 3600) + "h"
+
+ client_list = []
+ if self.ftp_obj_dict[ce][obj_name]["obj"].clients_type == "Real":
+ client_list = self.ftp_obj_dict[ce][obj_name]["obj"].real_client_list1
+ android_devices, windows_devices, linux_devices, mac_devices = 0, 0, 0, 0
+ all_devices_names = []
+ device_type = []
+ total_devices = ""
+ for i in self.ftp_obj_dict[ce][obj_name]["obj"].real_client_list:
+ split_device_name = i.split(" ")
+ if 'android' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Android)"))
+ device_type.append("Android")
+ android_devices += 1
+ elif 'Win' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Windows)"))
+ device_type.append("Windows")
+ windows_devices += 1
+ elif 'Lin' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Linux)"))
+ device_type.append("Linux")
+ linux_devices += 1
+ elif 'Mac' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Mac)"))
+ device_type.append("Mac")
+ mac_devices += 1
+
+ if android_devices > 0:
+ total_devices += f" Android({android_devices})"
+ if windows_devices > 0:
+ total_devices += f" Windows({windows_devices})"
+ if linux_devices > 0:
+ total_devices += f" Linux({linux_devices})"
+ if mac_devices > 0:
+ total_devices += f" Mac({mac_devices})"
+ else:
+ if self.ftp_obj_dict[ce][obj_name]["obj"].clients_type == "Virtual":
+ client_list = self.ftp_obj_dict[ce][obj_name]["obj"].station_list
+ if 'ftp_test' not in self.test_count_dict:
+ self.test_count_dict['ftp_test']=0
+ self.test_count_dict['ftp_test']+=1
+ self.overall_report.set_obj_html(_obj_title=f'FTP Test ', _obj="")
+ self.overall_report.build_objective()
+ self.overall_report.set_table_title("Test Setup Information")
+ self.overall_report.build_table_title()
+
+ if self.ftp_obj_dict[ce][obj_name]["obj"].clients_type == "Virtual":
+ no_of_stations = str(len(self.ftp_obj_dict[ce][obj_name]["obj"].station_list))
+ else:
+ no_of_stations = str(len(self.ftp_obj_dict[ce][obj_name]["obj"].input_devices_list))
+
+ if self.ftp_obj_dict[ce][obj_name]["obj"].clients_type == "Real":
+ if config_devices == "":
+ test_setup_info = {
+ "AP Name": self.ftp_obj_dict[ce][obj_name]["obj"].ap_name,
+ "SSID": self.ftp_obj_dict[ce][obj_name]["obj"].ssid,
+ "Security": self.ftp_obj_dict[ce][obj_name]["obj"].security,
+ "Device List": ", ".join(all_devices_names),
+ "No of Devices": "Total" + f"({no_of_stations})" + total_devices,
+ "Failed CXs": self.ftp_obj_dict[ce][obj_name]["obj"].failed_cx if self.ftp_obj_dict[ce][obj_name]["obj"].failed_cx else "NONE",
+ "File size": self.ftp_obj_dict[ce][obj_name]["obj"].file_size,
+ "File location": "/home/lanforge",
+ "Traffic Direction": self.ftp_obj_dict[ce][obj_name]["obj"].direction,
+ "Traffic Duration ": duration
+ }
+ else:
+ group_names = ', '.join(config_devices.keys())
+ profile_names = ', '.join(config_devices.values())
+ configmap = "Groups:" + group_names + " -> Profiles:" + profile_names
+ test_setup_info = {
+ "AP Name": self.ftp_obj_dict[ce][obj_name]["obj"].ap_name,
+ 'Configuration': configmap,
+ "No of Devices": "Total" + f"({no_of_stations})" + total_devices,
+ "File size": self.ftp_obj_dict[ce][obj_name]["obj"].file_size,
+ "File location": "/home/lanforge",
+ "Traffic Direction": self.ftp_obj_dict[ce][obj_name]["obj"].direction,
+ "Traffic Duration ": duration
+ }
+ else:
+ test_setup_info = {
+ "AP Name": self.ftp_obj_dict[ce][obj_name]["obj"].ap_name,
+ "SSID": self.ftp_obj_dict[ce][obj_name]["obj"].ssid,
+ "Security": self.ftp_obj_dict[ce][obj_name]["obj"].security,
+ "No of Devices": no_of_stations,
+ "File size": self.ftp_obj_dict[ce][obj_name]["obj"].file_size,
+ "File location": "/home/lanforge",
+ "Traffic Direction": self.ftp_obj_dict[ce][obj_name]["obj"].direction,
+ "Traffic Duration ": duration
+ }
+
+ self.overall_report.test_setup_table(value="Test Setup Information", test_setup_data=test_setup_info)
+
+ self.overall_report.set_obj_html(
+ _obj_title=f"No of times file {self.ftp_obj_dict[ce][obj_name]['obj'].direction}",
+ _obj=f"The below graph represents number of times a file {self.ftp_obj_dict[ce][obj_name]['obj'].direction} for each client"
+ f"(WiFi) traffic. X- axis shows “No of times file {self.ftp_obj_dict[ce][obj_name]['obj'].direction}” and Y-axis shows "
+ f"Client names.")
+
+ self.overall_report.build_objective()
+ graph = lf_bar_graph_horizontal(_data_set=[self.ftp_obj_dict[ce][obj_name]["obj"].url_data], _xaxis_name=f"No of times file {self.ftp_obj_dict[ce][obj_name]['obj'].direction}",
+ _yaxis_name="Client names",
+ _yaxis_categories=[i for i in client_list],
+ _yaxis_label=[i for i in client_list],
+ _yaxis_step=1,
+ _yticks_font=8,
+ _yticks_rotation=None,
+ _graph_title=f"No of times file {self.ftp_obj_dict[ce][obj_name]['obj'].direction} (Count)",
+ _title_size=16,
+ _figsize=(x_fig_size, y_fig_size),
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _color_name=['orange'],
+ _show_bar_value=True,
+ _enable_csv=True,
+ _graph_image_name=f"Total-url_ftp_{obj_no}", _color_edge=['black'],
+ _color=['orange'],
+ _label=[self.ftp_obj_dict[ce][obj_name]["obj"].direction])
+ graph_png = graph.build_bar_graph_horizontal()
+ print("graph name {}".format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ # need to move the graph image to the results
+ self.overall_report.move_graph_image()
+ self.overall_report.set_csv_filename(graph_png)
+ self.overall_report.move_csv_file()
+ self.overall_report.build_graph()
+ self.overall_report.set_obj_html(
+ _obj_title=f"Average time taken to {self.ftp_obj_dict[ce][obj_name]['obj'].direction} file ",
+ _obj=f"The below graph represents average time taken to {self.ftp_obj_dict[ce][obj_name]['obj'].direction} for each client "
+ f"(WiFi) traffic. X- axis shows “Average time taken to {self.ftp_obj_dict[ce][obj_name]['obj'].direction} a file ” and Y-axis shows "
+ f"Client names.")
+
+ self.overall_report.build_objective()
+ graph = lf_bar_graph_horizontal(_data_set=[self.ftp_obj_dict[ce][obj_name]["obj"].uc_avg], _xaxis_name=f"Average time taken to {self.ftp_obj_dict[ce][obj_name]['obj'].direction} file in ms",
+ _yaxis_name="Client names",
+ _yaxis_categories=[i for i in client_list],
+ _yaxis_label=[i for i in client_list],
+ _yaxis_step=1,
+ _yticks_font=8,
+ _yticks_rotation=None,
+ _graph_title=f"Average time taken to {self.ftp_obj_dict[ce][obj_name]['obj'].direction} file",
+ _title_size=16,
+ _figsize=(x_fig_size, y_fig_size),
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _color_name=['steelblue'],
+ _show_bar_value=True,
+ _enable_csv=True,
+ _graph_image_name=f"ucg-avg_ftp_{obj_no}", _color_edge=['black'],
+ _color=['steelblue'],
+ _label=[self.ftp_obj_dict[ce][obj_name]["obj"].direction])
+ graph_png = graph.build_bar_graph_horizontal()
+ print("graph name {}".format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ # need to move the graph image to the results
+ self.overall_report.set_csv_filename(graph_png)
+ self.overall_report.move_csv_file()
+ self.overall_report.build_graph()
+ if(self.ftp_obj_dict[ce][obj_name]["obj"].dowebgui and self.ftp_obj_dict[ce][obj_name]["obj"].get_live_view):
+ for floor in range(0,int(self.ftp_obj_dict[ce][obj_name]["obj"].total_floors)):
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ throughput_image_path = os.path.join(script_dir, "heatmap_images", f"ftp_{self.ftp_obj_dict[ce][obj_name]['obj'].test_name}_{floor+1}.png")
+ # rssi_image_path = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_rssi_{floor+1}.png")
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(throughput_image_path)):
+ if time.time() - start_time > timeout:
+ print("Timeout: Images not found within 60 seconds.")
+ break
+ time.sleep(1)
+ while not os.path.exists(throughput_image_path):
+ if os.path.exists(throughput_image_path):
+ break
+ # time.sleep(10)
+ if os.path.exists(throughput_image_path):
+ self.overall_report.set_custom_html('')
+ self.overall_report.build_custom()
+ # self.overall_report.set_custom_html("Average Throughput Heatmap:
")
+ # self.overall_report.build_custom()
+ self.overall_report.set_custom_html(f'
')
+ self.overall_report.build_custom()
+ # os.remove(throughput_image_path)
+ self.overall_report.set_obj_html("File Download Time (sec)", "The below table will provide information of "
+ "minimum, maximum and the average time taken by clients to download a file in seconds")
+ self.overall_report.build_objective()
+ dataframe2 = {
+ "Minimum": [str(round(min(self.ftp_obj_dict[ce][obj_name]["obj"].uc_min) / 1000, 1))],
+ "Maximum": [str(round(max(self.ftp_obj_dict[ce][obj_name]["obj"].uc_max) / 1000, 1))],
+ "Average": [str(round((sum(self.ftp_obj_dict[ce][obj_name]["obj"].uc_avg) / len(client_list)) / 1000, 1))]
+ }
+ dataframe3 = pd.DataFrame(dataframe2)
+ self.overall_report.set_table_dataframe(dataframe3)
+ self.overall_report.build_table()
+ self.overall_report.set_table_title("Overall Results")
+ self.overall_report.build_table_title()
+ if self.ftp_obj_dict[ce][obj_name]["obj"].clients_type == 'Real':
+ # Calculating the pass/fail criteria when either expected_passfail_val or csv_name is provided
+ if self.ftp_obj_dict[ce][obj_name]["obj"].expected_passfail_val or self.ftp_obj_dict[ce][obj_name]["obj"].csv_name:
+ self.ftp_obj_dict[ce][obj_name]["obj"].get_pass_fail_list(client_list)
+ # When groups are provided a seperate table will be generated for each group using generate_dataframe
+ if self.ftp_obj_dict[ce][obj_name]["obj"].group_name:
+ for key, val in self.ftp_obj_dict[ce][obj_name]["obj"].group_device_map.items():
+ if self.ftp_obj_dict[ce][obj_name]["obj"].expected_passfail_val or self.ftp_obj_dict[ce][obj_name]["obj"].csv_name:
+ dataframe = self.ftp_obj_dict[ce][obj_name]["obj"].generate_dataframe(val, client_list, self.ftp_obj_dict[ce][obj_name]["obj"].mac_id_list, self.ftp_obj_dict[ce][obj_name]["obj"].channel_list, self.ftp_obj_dict[ce][obj_name]["obj"].ssid_list, self.ftp_obj_dict[ce][obj_name]["obj"].mode_list,
+ self.ftp_obj_dict[ce][obj_name]["obj"].url_data, self.ftp_obj_dict[ce][obj_name]["obj"].test_input_list, self.ftp_obj_dict[ce][obj_name]["obj"].uc_avg, self.ftp_obj_dict[ce][obj_name]["obj"].bytes_rd, self.ftp_obj_dict[ce][obj_name]["obj"].rx_rate, self.ftp_obj_dict[ce][obj_name]["obj"].pass_fail_list)
+ else:
+ dataframe = self.ftp_obj_dict[ce][obj_name]["obj"].generate_dataframe(val, client_list, self.ftp_obj_dict[ce][obj_name]["obj"].mac_id_list, self.ftp_obj_dict[ce][obj_name]["obj"].channel_list, self.ftp_obj_dict[ce][obj_name]["obj"].ssid_list,
+ self.ftp_obj_dict[ce][obj_name]["obj"].mode_list, self.ftp_obj_dict[ce][obj_name]["obj"].url_data, [], self.ftp_obj_dict[ce][obj_name]["obj"].uc_avg, self.ftp_obj_dict[ce][obj_name]["obj"].bytes_rd, self.ftp_obj_dict[ce][obj_name]["obj"].rx_rate, [])
+
+ if dataframe:
+ self.overall_report.set_obj_html("", "Group: {}".format(key))
+ self.overall_report.build_objective()
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+ else:
+ dataframe = {
+ " Clients": client_list,
+ " MAC ": self.ftp_obj_dict[ce][obj_name]["obj"].mac_id_list,
+ " Channel": self.ftp_obj_dict[ce][obj_name]["obj"].channel_list,
+ " SSID ": self.ftp_obj_dict[ce][obj_name]["obj"].ssid_list,
+ " Mode": self.ftp_obj_dict[ce][obj_name]["obj"].mode_list,
+ " No of times File downloaded ": self.ftp_obj_dict[ce][obj_name]["obj"].url_data,
+ " Time Taken to Download file (ms)": self.ftp_obj_dict[ce][obj_name]["obj"].uc_avg,
+ " Bytes-rd (Mega Bytes)": self.ftp_obj_dict[ce][obj_name]["obj"].bytes_rd,
+ " RX RATE (Mbps) ": self.ftp_obj_dict[ce][obj_name]["obj"].rx_rate,
+ "Failed Urls": self.ftp_obj_dict[ce][obj_name]["obj"].total_err
+ }
+ if self.ftp_obj_dict[ce][obj_name]["obj"].expected_passfail_val or self.ftp_obj_dict[ce][obj_name]["obj"].csv_name:
+ dataframe[" Expected output "] = self.ftp_obj_dict[ce][obj_name]["obj"].test_input_list
+ dataframe[" Status "] = self.ftp_obj_dict[ce][obj_name]["obj"].pass_fail_list
+
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ else:
+ dataframe = {
+ " Clients": client_list,
+ " MAC ": self.ftp_obj_dict[ce][obj_name]["obj"].mac_id_list,
+ " Channel": self.ftp_obj_dict[ce][obj_name]["obj"].channel_list,
+ " SSID ": self.ftp_obj_dict[ce][obj_name]["obj"].ssid_list,
+ " Mode": self.ftp_obj_dict[ce][obj_name]["obj"].mode_list,
+ " No of times File downloaded ": self.ftp_obj_dict[ce][obj_name]["obj"].url_data,
+ " Time Taken to Download file (ms)": self.ftp_obj_dict[ce][obj_name]["obj"].uc_avg,
+ " Bytes-rd (Mega Bytes)": self.ftp_obj_dict[ce][obj_name]["obj"].bytes_rd,
+ }
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+ # self.overall_report.build_footer()
+ # html_file = self.overall_report.write_html()
+ # logger.info("returned file {}".format(html_file))
+ # logger.info(html_file)
+ # self.overall_report.write_pdf()
+
+ if csv_outfile is not None:
+ current_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
+ csv_outfile = "{}_{}-test_l4_ftp.csv".format(
+ csv_outfile, current_time)
+ csv_outfile = self.overall_report.file_add_path(csv_outfile)
+ logger.info("csv output file : {}".format(csv_outfile))
+ if ce == "series":
+ obj_no+=1
+ obj_name = f"ftp_test_{obj_no}"
+ else:
+ break
+
+ elif test_name == "thput_test":
+ obj_no=1
+ obj_name = "thput_test"
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.thput_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ self.overall_report.set_obj_html(_obj_title=f'THROUGHPUT Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ # obj_name = f"thput_test_{obj_no}"
+ params = self.thput_obj_dict[ce][obj_name]["data"].copy()
+ iterations_before_test_stopped_by_user = params["iterations_before_test_stopped_by_user"].copy() if isinstance(params["iterations_before_test_stopped_by_user"], (list, dict, set)) else params["iterations_before_test_stopped_by_user"]
+ incremental_capacity_list = params["incremental_capacity_list"].copy() if isinstance(params["incremental_capacity_list"], (list, dict, set)) else params["incremental_capacity_list"]
+ data = params["data"].copy() if isinstance(params["data"], (list, dict, set)) else params["data"]
+ data1 = params["data1"].copy() if isinstance(params["data1"], (list, dict, set)) else params["data1"]
+ report_path = params["report_path"].copy() if isinstance(params["report_path"], (list, dict, set)) else params["report_path"]
+
+ self.thput_obj_dict[ce][obj_name]["obj"].ssid_list = self.thput_obj_dict[ce][obj_name]["obj"].get_ssid_list(self.thput_obj_dict[ce][obj_name]["obj"].input_devices_list)
+ self.thput_obj_dict[ce][obj_name]["obj"].signal_list, self.thput_obj_dict[ce][obj_name]["obj"].channel_list, self.thput_obj_dict[ce][obj_name]["obj"].mode_list, self.thput_obj_dict[ce][obj_name]["obj"].link_speed_list, rx_rate_list = self.thput_obj_dict[ce][obj_name]["obj"].get_signal_and_channel_data(self.thput_obj_dict[ce][obj_name]["obj"].input_devices_list)
+ selected_real_clients_names = params["selected_real_clients_names"] if "selected_real_clients_names" in params else None
+ if selected_real_clients_names is not None:
+ self.thput_obj_dict[ce][obj_name]["obj"].num_stations = selected_real_clients_names
+
+ # Initialize the report object
+ if self.thput_obj_dict[ce][obj_name]["obj"].do_interopability == False:
+ # df.to_csv(os.path.join(report_path_date_time, 'throughput_data.csv'))
+ # For groups and profiles configuration through webgui
+
+ self.overall_report.set_obj_html(_obj_title="Input Parameters",
+ _obj="The below tables provides the input parameters for the test")
+ self.overall_report.build_objective()
+
+ # Initialize counts and lists for device types
+ android_devices, windows_devices, linux_devices, mac_devices, ios_devices = 0, 0, 0, 0, 0
+ all_devices_names = []
+ device_type = []
+ packet_size_text = ''
+ total_devices = ""
+ if self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu == -1:
+ packet_size_text = 'AUTO'
+ else:
+ packet_size_text = str(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu) + ' Bytes'
+ # Determine load type name based on self.thput_obj_dict[ce][obj_name]["obj"].load_type
+ if self.thput_obj_dict[ce][obj_name]["obj"].load_type == "wc_intended_load":
+ load_type_name = "Intended Load"
+ else:
+ load_type_name = "Per Client Load"
+ for i in self.thput_obj_dict[ce][obj_name]["obj"].real_client_list:
+ split_device_name = i.split(" ")
+ if 'android' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Android)"))
+ device_type.append("Android")
+ android_devices += 1
+ elif 'Win' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Windows)"))
+ device_type.append("Windows")
+ windows_devices += 1
+ elif 'Lin' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Linux)"))
+ device_type.append("Linux")
+ linux_devices += 1
+ elif 'Mac' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Mac)"))
+ device_type.append("Mac")
+ mac_devices += 1
+ elif 'iOS' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(iOS)"))
+ device_type.append("iOS")
+ ios_devices += 1
+
+ # Build total_devices string based on counts
+ if android_devices > 0:
+ total_devices += f" Android({android_devices})"
+ if windows_devices > 0:
+ total_devices += f" Windows({windows_devices})"
+ if linux_devices > 0:
+ total_devices += f" Linux({linux_devices})"
+ if mac_devices > 0:
+ total_devices += f" Mac({mac_devices})"
+ if ios_devices > 0:
+ total_devices += f" iOS({ios_devices})"
+
+ # Determine incremental_capacity_data based on self.thput_obj_dict[ce][obj_name]["obj"].incremental_capacity
+ if self.thput_obj_dict[ce][obj_name]["obj"].gave_incremental:
+ incremental_capacity_data = "No Incremental values provided"
+ elif len(self.thput_obj_dict[ce][obj_name]["obj"].incremental_capacity) == 1:
+ if len(incremental_capacity_list) == 1:
+ incremental_capacity_data = str(self.thput_obj_dict[ce][obj_name]["obj"].incremental_capacity[0])
+ else:
+ incremental_capacity_data = ','.join(map(str, incremental_capacity_list))
+ elif (len(self.thput_obj_dict[ce][obj_name]["obj"].incremental_capacity) > 1):
+ self.thput_obj_dict[ce][obj_name]["obj"].incremental_capacity = self.thput_obj_dict[ce][obj_name]["obj"].incremental_capacity.split(',')
+ incremental_capacity_data = ', '.join(self.thput_obj_dict[ce][obj_name]["obj"].incremental_capacity)
+ else:
+ incremental_capacity_data = "None"
+
+ # Construct test_setup_info dictionary for test setup table
+ if self.thput_obj_dict[ce][obj_name]["obj"].group_name:
+ group_names = ', '.join(self.thput_obj_dict[ce][obj_name]["obj"].configdevices.keys())
+ profile_names = ', '.join(self.thput_obj_dict[ce][obj_name]["obj"].configdevices.values())
+ configmap = "Groups:" + group_names + " -> Profiles:" + profile_names
+ test_setup_info = {
+ "Test name": self.thput_obj_dict[ce][obj_name]["obj"].test_name,
+ "Configuration": configmap,
+ "Configured Devices": ", ".join(all_devices_names),
+ "No of Devices": "Total" + f"({str(self.thput_obj_dict[ce][obj_name]['obj'].num_stations)})" + total_devices,
+ "Increment": incremental_capacity_data,
+ "Traffic Duration in minutes": round(int(self.thput_obj_dict[ce][obj_name]["obj"].test_duration) * len(incremental_capacity_list) / 60, 2),
+ "Traffic Type": (self.thput_obj_dict[ce][obj_name]["obj"].traffic_type.strip("lf_")).upper(),
+ "Traffic Direction": self.thput_obj_dict[ce][obj_name]["obj"].direction,
+ "Upload Rate(Mbps)": str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps",
+ "Download Rate(Mbps)": str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps",
+ "Load Type": load_type_name,
+ "Packet Size": packet_size_text
+ }
+ else:
+ test_setup_info = {
+ "Test name": self.thput_obj_dict[ce][obj_name]["obj"].test_name,
+ "Device List": ", ".join(all_devices_names),
+ "No of Devices": "Total" + f"({str(self.thput_obj_dict[ce][obj_name]['obj'].num_stations)})" + total_devices,
+ "Increment": incremental_capacity_data,
+ "Traffic Duration in minutes": round(int(self.thput_obj_dict[ce][obj_name]["obj"].test_duration) * len(incremental_capacity_list) / 60, 2),
+ "Traffic Type": (self.thput_obj_dict[ce][obj_name]["obj"].traffic_type.strip("lf_")).upper(),
+ "Traffic Direction": self.thput_obj_dict[ce][obj_name]["obj"].direction,
+ "Upload Rate(Mbps)": str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps",
+ "Download Rate(Mbps)": str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps",
+ "Load Type": load_type_name,
+ "Packet Size": packet_size_text
+ }
+ self.overall_report.test_setup_table(test_setup_data=test_setup_info, value="Test Configuration")
+
+ # Loop through iterations and build graphs, tables for each iteration
+ for i in range(len(iterations_before_test_stopped_by_user)):
+ # rssi_signal_data=[]
+ devices_on_running = []
+ download_data = []
+ upload_data = []
+ upload_drop = []
+ download_drop = []
+ devices_data_to_create_bar_graph = []
+ # signal_data=[]
+ direction_in_table = []
+ packet_size_in_table = []
+ upload_list, download_list = [], []
+ rssi_data = []
+ data_iter = data[data['Iteration'] == i + 1]
+ avg_rtt_data = []
+
+ # for sig in self.thput_obj_dict[ce][obj_name]["obj"].signal_list[0:int(incremental_capacity_list[i])]:
+ # signal_data.append(int(sig)*(-1))
+ # rssi_signal_data.append(signal_data)
+
+ # Fetch devices_on_running from real_client_list
+ for j in range(data1[i][-1]):
+ devices_on_running.append(self.thput_obj_dict[ce][obj_name]["obj"].real_client_list[j].split(" ")[-1])
+
+ # Fetch download_data and upload_data based on load_type and direction
+ for k in devices_on_running:
+ # individual_device_data=[]
+
+ # Checking individual device download and upload rate by searching device name in dataframe
+ columns_with_substring = [col for col in data_iter.columns if k in col]
+ filtered_df = data_iter[columns_with_substring]
+ dl_len = len(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()) - 1
+ ul_len = len(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()) - 1
+ if self.thput_obj_dict[ce][obj_name]["obj"].load_type == "wc_intended_load":
+ if self.thput_obj_dict[ce][obj_name]["obj"].direction == "Bi-direction":
+
+ # Append average download and upload data from filtered dataframe
+ download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ # Append average upload and download drop from filtered dataframe
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round((int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+ download_list.append(str(round((int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+ if self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu == -1:
+ packet_size_in_table.append('AUTO')
+ else:
+ packet_size_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu)
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Download':
+
+ # Append average download data from filtered dataframe
+ download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+
+ # Append 0 for upload data
+ upload_data.append(0)
+
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round((int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+ download_list.append(str(round((int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ # Append average download drop data from filtered dataframe
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ if self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu == -1:
+ packet_size_in_table.append('AUTO')
+ else:
+ packet_size_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu)
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Upload':
+
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round((int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+ download_list.append(str(round((int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+
+ # Append Average upload data from filtered dataframe
+ upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ # Append 0 for download data
+ download_data.append(0)
+ # Append average upload drop data from filtered dataframe
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ if self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu == -1:
+ packet_size_in_table.append('AUTO')
+ else:
+ packet_size_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu)
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+
+ else:
+
+ if self.thput_obj_dict[ce][obj_name]["obj"].direction == "Bi-direction":
+ # Append average download and upload data from filtered dataframe
+ download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ # Append average download and upload drop data from filtered dataframe
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ # upload_data.append(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[-1])
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)))
+
+ if self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu == -1:
+ packet_size_in_table.append('AUTO')
+ else:
+ packet_size_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu)
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Download':
+
+ # Append average download data from filtered dataframe
+ download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ # Append 0 for upload data
+ upload_data.append(0)
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)))
+ # Append average download drop data from filtered dataframe
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ if self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu == -1:
+ packet_size_in_table.append('AUTO')
+ else:
+ packet_size_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu)
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Upload':
+
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)))
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ # Append average upload data from filtered dataframe
+ upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ # Append average upload drop data from filtered dataframe
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+
+ # Append 0 for download data
+ download_data.append(0)
+
+ if self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu == -1:
+ packet_size_in_table.append('AUTO')
+ else:
+ packet_size_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu)
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+
+ data_set_in_graph = []
+
+ # Depending on the test direction, retrieve corresponding throughput data,
+ # organize it into datasets for graphing, and calculate real-time average throughput values accordingly.
+ if self.thput_obj_dict[ce][obj_name]["obj"].direction == "Bi-direction":
+ download_values_list = data['Overall Download'][data['Iteration'] == i + 1].values.tolist()
+ upload_values_list = data['Overall Upload'][data['Iteration'] == i + 1].values.tolist()
+ data_set_in_graph.append(download_values_list)
+ data_set_in_graph.append(upload_values_list)
+ devices_data_to_create_bar_graph.append(download_data)
+ devices_data_to_create_bar_graph.append(upload_data)
+ label_data = ['Download', 'Upload']
+ real_time_data = (
+ f"Real Time Throughput: Achieved Throughput: Download: {round(sum(download_data[0:int(incremental_capacity_list[i])]), 2)} Mbps, "
+ f"Upload: {round(sum(upload_data[0:int(incremental_capacity_list[i])]), 2)} Mbps"
+ )
+
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Download':
+ download_values_list = data['Overall Download'][data['Iteration'] == i + 1].values.tolist()
+ data_set_in_graph.append(download_values_list)
+ devices_data_to_create_bar_graph.append(download_data)
+ label_data = ['Download']
+ real_time_data = f"Real Time Throughput: Achieved Throughput: Download : {round(((sum(download_data[0:int(incremental_capacity_list[i])]))), 2)} Mbps"
+
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Upload':
+ upload_values_list = data['Overall Upload'][data['Iteration'] == i + 1].values.tolist()
+ data_set_in_graph.append(upload_values_list)
+ devices_data_to_create_bar_graph.append(upload_data)
+ label_data = ['Upload']
+ real_time_data = f"Real Time Throughput: Achieved Throughput: Upload : {round((sum(upload_data[0:int(incremental_capacity_list[i])])), 2)} Mbps"
+
+ if len(incremental_capacity_list) > 1:
+ self.overall_report.set_custom_html(f"Iteration-{i + 1}: Number of Devices Running : {len(devices_on_running)}
")
+ self.overall_report.build_custom()
+
+ self.overall_report.set_obj_html(
+ _obj_title=f"{real_time_data}",
+ _obj=" ")
+ self.overall_report.build_objective()
+ graph_png = self.thput_obj_dict[ce][obj_name]["obj"].build_line_graph(
+ data_set=data_set_in_graph,
+ xaxis_name="Time",
+ yaxis_name="Throughput (Mbps)",
+ xaxis_categories=data['TIMESTAMP'][data['Iteration'] == i + 1].values.tolist(),
+ label=label_data,
+ graph_image_name=f"line_graph{i}"
+ )
+ logger.info("graph name {}".format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+
+ self.overall_report.build_graph()
+ x_fig_size = 15
+ y_fig_size = len(devices_on_running) * .5 + 4
+ self.overall_report.set_obj_html(
+ _obj_title="Per Client Avg-Throughput",
+ _obj=" ")
+ self.overall_report.build_objective()
+ devices_on_running_trimmed = [n[:17] if len(n) > 17 else n for n in devices_on_running]
+ graph = lf_bar_graph_horizontal(_data_set=devices_data_to_create_bar_graph,
+ _xaxis_name="Avg Throughput(Mbps)",
+ _yaxis_name="Devices",
+ _graph_image_name=f"image_name{i}_{obj_no}",
+ _label=label_data,
+ _yaxis_categories=devices_on_running_trimmed,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size)
+ )
+
+ graph_png = graph.build_bar_graph_horizontal()
+ logger.info("graph name {}".format(graph_png))
+ graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+ self.overall_report.set_obj_html(
+ _obj_title="RSSI Of The Clients Connected",
+ _obj=" ")
+ self.overall_report.build_objective()
+ graph = lf_bar_graph_horizontal(_data_set=[rssi_data],
+ _xaxis_name="Signal(-dBm)",
+ _yaxis_name="Devices",
+ _graph_image_name=f"signal_image_name{i}_{obj_no}",
+ _label=['RSSI'],
+ _yaxis_categories=devices_on_running_trimmed,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size)
+ # _color=['lightcoral']
+ )
+ graph_png = graph.build_bar_graph_horizontal()
+ logger.info("graph name {}".format(graph_png))
+ graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+ if(self.thput_obj_dict[ce][obj_name]["obj"].dowebgui and self.thput_obj_dict[ce][obj_name]["obj"].get_live_view):
+ self.thput_obj_dict[ce][obj_name]["obj"].add_live_view_images_to_report(self.overall_report)
+
+ if self.thput_obj_dict[ce][obj_name]["obj"].group_name:
+ self.overall_report.set_obj_html(
+ _obj_title="Detailed Result Table For Groups ",
+ _obj="The below tables provides detailed information for the throughput test on each group.")
+ else:
+
+ self.overall_report.set_obj_html(
+ _obj_title="Detailed Result Table ",
+ _obj="The below tables provides detailed information for the throughput test on each device.")
+ self.overall_report.build_objective()
+ self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list = [item.split()[-1] if ' ' in item else item for item in self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list]
+ if self.thput_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.thput_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ test_input_list, pass_fail_list = self.thput_obj_dict[ce][obj_name]["obj"].get_pass_fail_list(device_type, incremental_capacity_list[i], devices_on_running, download_data, upload_data)
+ if self.thput_obj_dict[ce][obj_name]["obj"].group_name:
+ for key, val in self.thput_obj_dict[ce][obj_name]["obj"].group_device_map.items():
+ if self.thput_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.thput_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ # Generating Dataframe when Groups with their profiles and pass_fail case is specified
+ dataframe = self.thput_obj_dict[ce][obj_name]["obj"].generate_dataframe(val,
+ device_type[0:int(incremental_capacity_list[i])],
+ devices_on_running[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].ssid_list[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].channel_list[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].mode_list[0:int(incremental_capacity_list[i])],
+ direction_in_table[0:int(incremental_capacity_list[i])],
+ download_list[0:int(incremental_capacity_list[i])],
+ [str(n) for n in avg_rtt_data[0:int(incremental_capacity_list[i])]],
+ [str(n) + " Mbps" for n in download_data[0:int(incremental_capacity_list[i])]],
+ upload_list[0:int(incremental_capacity_list[i])],
+ [str(n) + " Mbps" for n in upload_data[0:int(incremental_capacity_list[i])]],
+ ['' if n == 0 else '-' + str(n) + " dbm" for n in rssi_data[0:int(incremental_capacity_list[i])]],
+ test_input_list,
+ self.thput_obj_dict[ce][obj_name]["obj"].link_speed_list[0:int(incremental_capacity_list[i])],
+ [str(n) for n in packet_size_in_table[0:int(incremental_capacity_list[i])]],
+ pass_fail_list,
+ upload_drop,
+ download_drop)
+ # Generating Dataframe for groups when pass_fail case is not specified
+ else:
+ dataframe = self.thput_obj_dict[ce][obj_name]["obj"].generate_dataframe(val,
+ device_type[0:int(incremental_capacity_list[i])],
+ devices_on_running[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].ssid_list[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].channel_list[0:int(incremental_capacity_list[i])],
+ self.thput_obj_dict[ce][obj_name]["obj"].mode_list[0:int(incremental_capacity_list[i])],
+ direction_in_table[0:int(incremental_capacity_list[i])],
+ download_list[0:int(incremental_capacity_list[i])],
+ [str(n) for n in avg_rtt_data[0:int(incremental_capacity_list[i])]],
+ [str(n) + " Mbps" for n in download_data[0:int(incremental_capacity_list[i])]],
+ upload_list[0:int(incremental_capacity_list[i])],
+ [str(n) + " Mbps" for n in upload_data[0:int(incremental_capacity_list[i])]],
+ ['' if n == 0 else '-' + str(n) + " dbm" for n in rssi_data[0:int(incremental_capacity_list[i])]],
+ [],
+ self.thput_obj_dict[ce][obj_name]["obj"].link_speed_list[0:int(incremental_capacity_list[i])],
+ [str(n) for n in packet_size_in_table[0:int(incremental_capacity_list[i])]],
+ [],
+ upload_drop,
+ download_drop)
+ if dataframe:
+ self.overall_report.set_obj_html("", "Group: {}".format(key))
+ self.overall_report.build_objective()
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+ else:
+ bk_dataframe = {
+ " Device Type ": device_type[0:int(incremental_capacity_list[i])],
+ " Username": devices_on_running[0:int(incremental_capacity_list[i])],
+ " SSID ": self.thput_obj_dict[ce][obj_name]["obj"].ssid_list[0:int(incremental_capacity_list[i])],
+ " MAC ": self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list[0:int(incremental_capacity_list[i])],
+ " Channel ": self.thput_obj_dict[ce][obj_name]["obj"].channel_list[0:int(incremental_capacity_list[i])],
+ " Mode": self.thput_obj_dict[ce][obj_name]["obj"].mode_list[0:int(incremental_capacity_list[i])],
+ # " Direction":direction_in_table[0:int(incremental_capacity_list[i])],
+ " Offered download rate (Mbps) ": download_list[0:int(incremental_capacity_list[i])],
+ " Observed Average download rate (Mbps) ": [str(n) for n in download_data[0:int(incremental_capacity_list[i])]],
+ " Offered upload rate (Mbps) ": upload_list[0:int(incremental_capacity_list[i])],
+ " Observed Average upload rate (Mbps) ": [str(n) for n in upload_data[0:int(incremental_capacity_list[i])]],
+ " RSSI (dBm) ": ['' if n == 0 else '-' + str(n) for n in rssi_data[0:int(incremental_capacity_list[i])]],
+ # " Link Speed ":self.thput_obj_dict[ce][obj_name]["obj"].link_speed_list[0:int(incremental_capacity_list[i])],
+ " Average RTT (ms)" : avg_rtt_data[0:int(incremental_capacity_list[i])],
+ " Packet Size(Bytes) ": [str(n) for n in packet_size_in_table[0:int(incremental_capacity_list[i])]],
+ }
+ if self.thput_obj_dict[ce][obj_name]["obj"].direction == "Bi-direction":
+ bk_dataframe[" Average Tx Drop % "] = upload_drop
+ bk_dataframe[" Average Rx Drop % "] = download_drop
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Download':
+ bk_dataframe[" Average Rx Drop % "] = download_drop
+ # adding rx drop while uploading as 0
+ bk_dataframe[" Average Tx Drop % "] = [0.0] * len(download_drop)
+
+ else:
+ bk_dataframe[" Average Tx Drop % "] = upload_drop
+ # adding rx drop while downloading as 0
+ bk_dataframe[" Average Rx Drop % "] = [0.0] * len(upload_drop)
+ if self.thput_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.thput_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ bk_dataframe[" Expected " + self.thput_obj_dict[ce][obj_name]["obj"].direction + " rate "] = [str(n) + " Mbps" for n in test_input_list]
+ bk_dataframe[" Status "] = pass_fail_list
+ dataframe1 = pd.DataFrame(bk_dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ self.overall_report.set_custom_html('
')
+ self.overall_report.build_custom()
+
+ elif self.thput_obj_dict[ce][obj_name]["obj"].do_interopability:
+
+ self.overall_report.set_obj_html(_obj_title="Input Parameters",
+ _obj="The below tables provides the input parameters for the test")
+ self.overall_report.build_objective()
+
+ # Initialize counts and lists for device types
+ android_devices, windows_devices, linux_devices, mac_devices, ios_devices = 0, 0, 0, 0, 0
+ all_devices_names = []
+ device_type = []
+ total_devices = ""
+
+ for i in self.thput_obj_dict[ce][obj_name]["obj"].real_client_list:
+ split_device_name = i.split(" ")
+ if 'android' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Android)"))
+ device_type.append("Android")
+ android_devices += 1
+ elif 'Win' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Windows)"))
+ device_type.append("Windows")
+ windows_devices += 1
+ elif 'Lin' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Linux)"))
+ device_type.append("Linux")
+ linux_devices += 1
+ elif 'Mac' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Mac)"))
+ device_type.append("Mac")
+ mac_devices += 1
+ elif 'iOS' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(iOS)"))
+ device_type.append("iOS")
+ ios_devices += 1
+
+ # Build total_devices string based on counts
+ if android_devices > 0:
+ total_devices += f" Android({android_devices})"
+ if windows_devices > 0:
+ total_devices += f" Windows({windows_devices})"
+ if linux_devices > 0:
+ total_devices += f" Linux({linux_devices})"
+ if mac_devices > 0:
+ total_devices += f" Mac({mac_devices})"
+ if ios_devices > 0:
+ total_devices += f" iOS({ios_devices})"
+
+ # Construct test_setup_info dictionary for test setup table
+ test_setup_info = {
+ "Test name": self.thput_obj_dict[ce][obj_name]["obj"].test_name,
+ "Device List": ", ".join(all_devices_names),
+ "No of Devices": "Total" + f"({str(self.thput_obj_dict[ce][obj_name]['obj'].num_stations)})" + total_devices,
+ "Traffic Duration in minutes": round(int(self.thput_obj_dict[ce][obj_name]["obj"].test_duration) * len(incremental_capacity_list) / 60, 2),
+ "Traffic Type": (self.thput_obj_dict[ce][obj_name]["obj"].traffic_type.strip("lf_")).upper(),
+ "Traffic Direction": self.thput_obj_dict[ce][obj_name]["obj"].direction,
+ "Upload Rate(Mbps)": str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps",
+ "Download Rate(Mbps)": str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps",
+ # "Packet Size" : str(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_pdu) + " Bytes"
+ }
+ self.overall_report.test_setup_table(test_setup_data=test_setup_info, value="Test Configuration")
+
+ if(not self.thput_obj_dict[ce][obj_name]["obj"].default_config):
+
+ self.overall_report.set_obj_html(_obj_title="Configuration Status of Devices",
+ _obj="The table below shows the configuration status of each device (except iOS) with respect to the SSID connection.")
+ self.overall_report.build_objective()
+
+ configured_dataframe = self.thput_obj_dict[ce][obj_name]["obj"].convert_to_table(self.thput_obj_dict[ce][obj_name]["obj"].configured_devices_check)
+ dataframe1 = pd.DataFrame(configured_dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ # Loop through iterations and build graphs, tables for each device
+ for i in range(len(iterations_before_test_stopped_by_user)):
+ rssi_signal_data = []
+ devices_on_running = []
+ download_data = []
+ upload_data = []
+ devices_data_to_create_bar_graph = []
+ signal_data = []
+ upload_drop = []
+ download_drop = []
+ direction_in_table = []
+ # packet_size_in_table=[]
+ upload_list, download_list = [], []
+ rssi_data = []
+ data_iter = data[data['Iteration'] == i + 1]
+ avg_rtt_data = []
+
+ # Fetch devices_on_running from real_client_list
+ devices_on_running.append(self.thput_obj_dict[ce][obj_name]["obj"].real_client_list[data1[i][-1] - 1].split(" ")[-1])
+
+ if not self.thput_obj_dict[ce][obj_name]["obj"].default_config and devices_on_running[0] in self.thput_obj_dict[ce][obj_name]["obj"].configured_devices_check and not self.thput_obj_dict[ce][obj_name]["obj"].configured_devices_check[devices_on_running[0]]:
+ continue
+
+ for k in devices_on_running:
+ # individual_device_data=[]
+
+ # Checking individual device download and upload rate by searching device name in dataframe
+ columns_with_substring = [col for col in data_iter.columns if k in col]
+ filtered_df = data_iter[columns_with_substring]
+ dl_len = len(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()) - 1
+ ul_len = len(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()) - 1
+ if self.thput_obj_dict[ce][obj_name]["obj"].direction == "Bi-direction":
+
+ # Append download and upload data from filtered dataframe
+ download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)))
+
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Download':
+
+ # Append download data from filtered dataframe
+ download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+
+ # Append 0 for upload data
+ upload_data.append(0)
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)))
+
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Upload':
+
+ # Calculate and append upload and download throughput to lists
+ upload_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.thput_obj_dict[ce][obj_name]["obj"].cx_profile.side_b_min_bps) / 1000000, 2)))
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
+ # Append upload data from filtered dataframe
+ upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+
+ # Append 0 for download data
+ download_data.append(0)
+
+ direction_in_table.append(self.thput_obj_dict[ce][obj_name]["obj"].direction)
+
+ data_set_in_graph = []
+
+ # Depending on the test direction, retrieve corresponding throughput data,
+ # organize it into datasets for graphing, and calculate real-time average throughput values accordingly.
+ if self.thput_obj_dict[ce][obj_name]["obj"].direction == "Bi-direction":
+ download_values_list = data['Overall Download'][data['Iteration'] == i + 1].values.tolist()
+ upload_values_list = data['Overall Upload'][data['Iteration'] == i + 1].values.tolist()
+ data_set_in_graph.append(download_values_list)
+ data_set_in_graph.append(upload_values_list)
+ devices_data_to_create_bar_graph.append(download_data)
+ devices_data_to_create_bar_graph.append(upload_data)
+ label_data = ['Download', 'Upload']
+ real_time_data = (
+ f"Real Time Throughput: Achieved Throughput: Download: "
+ f"{round(sum(download_data[0:int(incremental_capacity_list[i])]) / len(download_data[0:int(incremental_capacity_list[i])]), 2)} Mbps, "
+ f"Upload: {round(sum(upload_data[0:int(incremental_capacity_list[i])]) / len(upload_data[0:int(incremental_capacity_list[i])]), 2)} Mbps"
+ )
+
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Download':
+ download_values_list = data['Overall Download'][data['Iteration'] == i + 1].values.tolist()
+ data_set_in_graph.append(download_values_list)
+ devices_data_to_create_bar_graph.append(download_data)
+ label_data = ['Download']
+ real_time_data = (
+ f"Real Time Throughput: Achieved Throughput: Download: "
+ f"{round(sum(download_data[0:int(incremental_capacity_list[i])]) / len(download_data[0:int(incremental_capacity_list[i])]), 2)} Mbps"
+ )
+
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Upload':
+ upload_values_list = data['Overall Upload'][data['Iteration'] == i + 1].values.tolist()
+ data_set_in_graph.append(upload_values_list)
+ devices_data_to_create_bar_graph.append(upload_data)
+ label_data = ['Upload']
+ real_time_data = (
+ f"Real Time Throughput: Achieved Throughput: Upload: "
+ f"{round(sum(upload_data[0:int(incremental_capacity_list[i])]) / len(upload_data[0:int(incremental_capacity_list[i])]), 2)} Mbps"
+ )
+
+ self.overall_report.set_custom_html(f"{i + 1}. Test On Device {', '.join(devices_on_running)}:
")
+ self.overall_report.build_custom()
+
+ self.overall_report.set_obj_html(
+ _obj_title=f"{real_time_data}",
+ _obj=" ")
+ self.overall_report.build_objective()
+ graph_png = self.thput_obj_dict[ce][obj_name]["obj"].build_line_graph(
+ data_set=data_set_in_graph,
+ xaxis_name="Time",
+ yaxis_name="Throughput (Mbps)",
+ xaxis_categories=data['TIMESTAMP'][data['Iteration'] == i + 1].values.tolist(),
+ label=label_data,
+ graph_image_name=f"line_graph{i}"
+ )
+ logger.info("graph name {}".format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+
+ self.overall_report.build_graph()
+ x_fig_size = 15
+ y_fig_size = len(devices_on_running) * .5 + 4
+ self.overall_report.set_obj_html(
+ _obj_title="Per Client Avg-Throughput",
+ _obj=" ")
+ self.overall_report.build_objective()
+ devices_on_running_trimmed = [n[:17] if len(n) > 17 else n for n in devices_on_running]
+ graph = lf_bar_graph_horizontal(_data_set=devices_data_to_create_bar_graph,
+ _xaxis_name="Avg Throughput(Mbps)",
+ _yaxis_name="Devices",
+ _graph_image_name=f"image_name{i}_{obj_no}",
+ _label=label_data,
+ _yaxis_categories=devices_on_running_trimmed,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size)
+ )
+
+ graph_png = graph.build_bar_graph_horizontal()
+ logger.info("graph name {}".format(graph_png))
+ graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+ self.overall_report.set_obj_html(
+ _obj_title="RSSI Of The Clients Connected",
+ _obj=" ")
+ self.overall_report.build_objective()
+ graph = lf_bar_graph_horizontal(_data_set=[rssi_data],
+ _xaxis_name="Signal(-dBm)",
+ _yaxis_name="Devices",
+ _graph_image_name=f"signal_image_name{i}_{obj_no}",
+ _label=['RSSI'],
+ _yaxis_categories=devices_on_running_trimmed,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size)
+ # _color=['lightcoral']
+ )
+ graph_png = graph.build_bar_graph_horizontal()
+ logger.info("graph name {}".format(graph_png))
+ graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_obj_html(
+ _obj_title="Detailed Result Table ",
+ _obj="The below tables provides detailed information for the throughput test on each device.")
+ self.overall_report.build_objective()
+ self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list = [item.split()[-1] if ' ' in item else item for item in self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list]
+ if self.thput_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.thput_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ test_input_list, pass_fail_list = self.thput_obj_dict[ce][obj_name]["obj"].get_pass_fail_list(device_type, incremental_capacity_list[i], devices_on_running, download_data, upload_data)
+ bk_dataframe = {}
+
+ # Dataframe changes with respect to groups and profiles in case of interopability
+ if self.thput_obj_dict[ce][obj_name]["obj"].group_name:
+ interop_tab_data = self.thput_obj_dict[ce][obj_name]["obj"].json_get('/adb/')["devices"]
+ res_list = []
+ grp_name = []
+ if device_type[int(incremental_capacity_list[i]) - 1] != 'Android':
+ res_list.append(devices_on_running[-1])
+ else:
+ for dev in interop_tab_data:
+ for item in dev.values():
+ if item['user-name'] == devices_on_running[-1]:
+ res_list.append(item['name'].split('.')[2])
+ break
+ for key, value in self.thput_obj_dict[ce][obj_name]["obj"].group_device_map.items():
+ if res_list[-1] in value:
+ grp_name.append(key)
+ break
+ bk_dataframe["Group Name"] = grp_name[-1]
+
+ bk_dataframe[" Device Type "] = device_type[int(incremental_capacity_list[i]) - 1]
+ bk_dataframe[" Username"] = devices_on_running[-1]
+ bk_dataframe[" SSID "] = self.thput_obj_dict[ce][obj_name]["obj"].ssid_list[int(incremental_capacity_list[i]) - 1]
+ bk_dataframe[" MAC "] = self.thput_obj_dict[ce][obj_name]["obj"].mac_id_list[int(incremental_capacity_list[i]) - 1]
+ bk_dataframe[" Channel "] = self.thput_obj_dict[ce][obj_name]["obj"].channel_list[int(incremental_capacity_list[i]) - 1]
+ bk_dataframe[" Mode"] = self.thput_obj_dict[ce][obj_name]["obj"].mode_list[int(incremental_capacity_list[i]) - 1]
+ bk_dataframe[" Offered download rate (Mbps)"] = download_list[-1]
+ bk_dataframe[" Observed Average download rate (Mbps)"] = [str(download_data[-1])]
+ bk_dataframe[" Offered upload rate (Mbps)"] = upload_list[-1]
+ bk_dataframe[" Observed Average upload rate (Mbps)"] = [str(upload_data[-1])]
+ bk_dataframe[" Average RTT (ms) "] = avg_rtt_data[-1]
+ bk_dataframe[" RSSI (dBm)"] = ['' if rssi_data[-1] == 0 else '-' + str(rssi_data[-1])]
+ if self.thput_obj_dict[ce][obj_name]["obj"].direction == "Bi-direction":
+ bk_dataframe[" Average Tx Drop % "] = upload_drop
+ bk_dataframe[" Average Rx Drop % "] = download_drop
+ elif self.thput_obj_dict[ce][obj_name]["obj"].direction == 'Download':
+ bk_dataframe[" Average Rx Drop % "] = download_drop
+ bk_dataframe[" Average Tx Drop % "] = [0.0] * len(download_drop)
+ else:
+ bk_dataframe[" Average Tx Drop % "] = upload_drop
+ bk_dataframe[" Average Rx Drop % "] = [0.0] * len(upload_drop)
+ # When pass fail criteria is specified
+ if self.thput_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.thput_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ bk_dataframe[" Expected " + self.thput_obj_dict[ce][obj_name]["obj"].direction + " rate "] = test_input_list
+ bk_dataframe[" Status "] = pass_fail_list
+ dataframe1 = pd.DataFrame(bk_dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ self.overall_report.set_custom_html('
')
+ self.overall_report.build_custom()
+
+ if(self.thput_obj_dict[ce][obj_name]["obj"].dowebgui and self.thput_obj_dict[ce][obj_name]["obj"].get_live_view and self.thput_obj_dict[ce][obj_name]["obj"].do_interopability):
+ self.thput_obj_dict[ce][obj_name]["obj"].add_live_view_images_to_report(self.overall_report)
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"ftp_test_{obj_no}"
+ else:
+ break
+
+ elif test_name == "ping_test":
+ obj_no = 1
+ obj_name = 'ping_test'
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.ping_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ params = self.ping_obj_dict[ce][obj_name]["data"].copy()
+ result_json = params["result_json"]
+ result_dir = params["result_dir"]
+ report_path = params["report_path"]
+ config_devices = params["config_devices"]
+ group_device_map = params["group_device_map"]
+ if result_json is not None:
+ self.ping_obj_dict[ce][obj_name]["obj"].result_json = result_json
+ self.overall_report.set_obj_html(_obj_title=f'PING Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ # Test setup information table for devices in device list
+ if config_devices == '':
+ test_setup_info = {
+ 'SSID': self.ping_obj_dict[ce][obj_name]["obj"].ssid,
+ 'Security': self.ping_obj_dict[ce][obj_name]["obj"].security,
+ 'Website / IP': self.ping_obj_dict[ce][obj_name]["obj"].target,
+ 'No of Devices': '{} (V:{}, A:{}, W:{}, L:{}, M:{})'.format(len(self.ping_obj_dict[ce][obj_name]["obj"].sta_list), len(self.ping_obj_dict[ce][obj_name]["obj"].sta_list) - len(self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list), self.ping_obj_dict[ce][obj_name]["obj"].android, self.ping_obj_dict[ce][obj_name]["obj"].windows, self.ping_obj_dict[ce][obj_name]["obj"].linux, self.ping_obj_dict[ce][obj_name]["obj"].mac),
+ 'Duration (in minutes)': self.ping_obj_dict[ce][obj_name]["obj"].duration
+ }
+ # Test setup information table for devices in groups
+ else:
+ group_names = ', '.join(config_devices.keys())
+ profile_names = ', '.join(config_devices.values())
+ configmap = "Groups:" + group_names + " -> Profiles:" + profile_names
+ test_setup_info = {
+ 'Configuration': configmap,
+ 'Website / IP': self.ping_obj_dict[ce][obj_name]["obj"].target,
+ 'No of Devices': '{} (V:{}, A:{}, W:{}, L:{}, M:{})'.format(len(self.ping_obj_dict[ce][obj_name]["obj"].sta_list), len(self.ping_obj_dict[ce][obj_name]["obj"].sta_list) - len(self.ping_obj_dict[ce][obj_name]["obj"].real_sta_list), self.ping_obj_dict[ce][obj_name]["obj"].android, self.ping_obj_dict[ce][obj_name]["obj"].windows, self.ping_obj_dict[ce][obj_name]["obj"].linux, self.ping_obj_dict[ce][obj_name]["obj"].mac),
+ 'Duration (in minutes)': self.ping_obj_dict[ce][obj_name]["obj"].duration
+ }
+ self.overall_report.test_setup_table(
+ test_setup_data=test_setup_info, value='Test Setup Information')
+
+ # packets sent vs received vs dropped
+ self.overall_report.set_table_title(
+ 'Packets sent vs packets received vs packets dropped')
+ self.overall_report.build_table_title()
+ # graph for the above
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_sent = []
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_received = []
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_dropped = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_names = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_modes = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_channels = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_min = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_max = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_avg = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_mac = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_names_with_errors = []
+ self.ping_obj_dict[ce][obj_name]["obj"].devices_with_errors = []
+ self.ping_obj_dict[ce][obj_name]["obj"].report_names = []
+ self.ping_obj_dict[ce][obj_name]["obj"].remarks = []
+ self.ping_obj_dict[ce][obj_name]["obj"].device_ssid = []
+ # packet_count_data = {}
+ os_type = []
+ for device, device_data in self.ping_obj_dict[ce][obj_name]["obj"].result_json.items():
+ logging.info('Device data: {} {}'.format(device, device_data))
+ os_type.append(device_data['os'])
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_sent.append(int(device_data['sent']))
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_received.append(int(device_data['recv']))
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_dropped.append(int(device_data['dropped']))
+ self.ping_obj_dict[ce][obj_name]["obj"].device_names.append(device_data['name'] + ' ' + device_data['os'])
+ self.ping_obj_dict[ce][obj_name]["obj"].device_modes.append(device_data['mode'])
+ self.ping_obj_dict[ce][obj_name]["obj"].device_channels.append(device_data['channel'])
+ self.ping_obj_dict[ce][obj_name]["obj"].device_mac.append(device_data['mac'])
+ self.ping_obj_dict[ce][obj_name]["obj"].device_ssid.append(device_data['ssid'])
+ self.ping_obj_dict[ce][obj_name]["obj"].device_min.append(float(device_data['min_rtt'].replace(',', '')))
+ self.ping_obj_dict[ce][obj_name]["obj"].device_max.append(float(device_data['max_rtt'].replace(',', '')))
+ self.ping_obj_dict[ce][obj_name]["obj"].device_avg.append(float(device_data['avg_rtt'].replace(',', '')))
+ if (device_data['os'] == 'Virtual'):
+ self.ping_obj_dict[ce][obj_name]["obj"].report_names.append('{} {}'.format(device, device_data['os'])[0:25])
+ else:
+ self.ping_obj_dict[ce][obj_name]["obj"].report_names.append('{} {} {}'.format(device, device_data['os'], device_data['name']))
+ if (device_data['remarks'] != []):
+ self.ping_obj_dict[ce][obj_name]["obj"].device_names_with_errors.append(device_data['name'])
+ self.ping_obj_dict[ce][obj_name]["obj"].devices_with_errors.append(device)
+ self.ping_obj_dict[ce][obj_name]["obj"].remarks.append(','.join(device_data['remarks']))
+ x_fig_size = 15
+ y_fig_size = len(self.ping_obj_dict[ce][obj_name]["obj"].device_names) * .5 + 4
+ graph = lf_bar_graph_horizontal(_data_set=[self.ping_obj_dict[ce][obj_name]["obj"].packets_dropped, self.ping_obj_dict[ce][obj_name]["obj"].packets_received, self.ping_obj_dict[ce][obj_name]["obj"].packets_sent],
+ _xaxis_name='Packets Count',
+ _yaxis_name='Wireless Clients',
+ _label=[
+ 'Packets Loss', 'Packets Received', 'Packets Sent'],
+ _graph_image_name=f'Packets sent vs received vs dropped {obj_no}',
+ _yaxis_label=self.ping_obj_dict[ce][obj_name]["obj"].report_names,
+ _yaxis_categories=self.ping_obj_dict[ce][obj_name]["obj"].report_names,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _graph_title='Packets sent vs received vs dropped',
+ _title_size=16,
+ _color=['lightgrey',
+ 'orange', 'steelblue'],
+ _color_edge=['black'],
+ _bar_height=0.15,
+ _figsize=(x_fig_size, y_fig_size),
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _dpi=96,
+ _show_bar_value=False,
+ _enable_csv=True,
+ _color_name=['lightgrey', 'orange', 'steelblue'])
+
+ graph_png = graph.build_bar_graph_horizontal()
+ logging.info('graph name {}'.format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ # need to move the graph image to the results directory
+ self.overall_report.move_graph_image()
+ self.overall_report.set_csv_filename(graph_png)
+ self.overall_report.move_csv_file()
+ self.overall_report.build_graph()
+ if self.ping_obj_dict[ce][obj_name]["obj"].real:
+ # Calculating the pass/fail criteria when either expected_passfail_val or csv_name is provided
+ if self.ping_obj_dict[ce][obj_name]["obj"].expected_passfail_val or self.ping_obj_dict[ce][obj_name]["obj"].csv_name:
+ self.ping_obj_dict[ce][obj_name]["obj"].get_pass_fail_list(os_type)
+ # When groups are provided a seperate table will be generated for each group using generate_dataframe
+ if self.ping_obj_dict[ce][obj_name]["obj"].group_name:
+ for key, val in group_device_map.items():
+ if self.ping_obj_dict[ce][obj_name]["obj"].expected_passfail_val or self.ping_obj_dict[ce][obj_name]["obj"].csv_name:
+ dataframe = self.ping_obj_dict[ce][obj_name]["obj"].generate_dataframe(
+ val,
+ self.ping_obj_dict[ce][obj_name]["obj"].device_names,
+ self.ping_obj_dict[ce][obj_name]["obj"].device_mac,
+ self.ping_obj_dict[ce][obj_name]["obj"].device_channels,
+ self.ping_obj_dict[ce][obj_name]["obj"].device_ssid,
+ self.ping_obj_dict[ce][obj_name]["obj"].device_modes,
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_sent,
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_received,
+ self.ping_obj_dict[ce][obj_name]["obj"].packets_dropped,
+ self.ping_obj_dict[ce][obj_name]["obj"].percent_pac_loss,
+ self.ping_obj_dict[ce][obj_name]["obj"].test_input_list,
+ self.ping_obj_dict[ce][obj_name]["obj"].pass_fail_list)
+ else:
+ dataframe = self.ping_obj_dict[ce][obj_name]["obj"].generate_dataframe(val, self.ping_obj_dict[ce][obj_name]["obj"].device_names, self.ping_obj_dict[ce][obj_name]["obj"].device_mac, self.ping_obj_dict[ce][obj_name]["obj"].device_channels, self.ping_obj_dict[ce][obj_name]["obj"].device_ssid,
+ self.ping_obj_dict[ce][obj_name]["obj"].device_modes, self.ping_obj_dict[ce][obj_name]["obj"].packets_sent, self.ping_obj_dict[ce][obj_name]["obj"].packets_received, self.ping_obj_dict[ce][obj_name]["obj"].packets_dropped, [], [], [])
+ if dataframe:
+ self.overall_report.set_obj_html("", "Group: {}".format(key))
+ self.overall_report.build_objective()
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ else:
+ dataframe1 = pd.DataFrame({
+ 'Wireless Client': self.ping_obj_dict[ce][obj_name]["obj"].device_names,
+ 'MAC': self.ping_obj_dict[ce][obj_name]["obj"].device_mac,
+ 'Channel': self.ping_obj_dict[ce][obj_name]["obj"].device_channels,
+ 'SSID ': self.ping_obj_dict[ce][obj_name]["obj"].device_ssid,
+ 'Mode': self.ping_obj_dict[ce][obj_name]["obj"].device_modes,
+ 'Packets Sent': self.ping_obj_dict[ce][obj_name]["obj"].packets_sent,
+ 'Packets Received': self.ping_obj_dict[ce][obj_name]["obj"].packets_received,
+ 'Packets Loss': self.ping_obj_dict[ce][obj_name]["obj"].packets_dropped,
+ })
+ if self.ping_obj_dict[ce][obj_name]["obj"].expected_passfail_val or self.ping_obj_dict[ce][obj_name]["obj"].csv_name:
+ dataframe1[" Percentage of Packet loss %"] = self.ping_obj_dict[ce][obj_name]["obj"].percent_pac_loss
+ dataframe1['Expected Packet loss %'] = self.ping_obj_dict[ce][obj_name]["obj"].test_input_list
+ dataframe1['Status'] = self.ping_obj_dict[ce][obj_name]["obj"].pass_fail_list
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+ else:
+ dataframe1 = pd.DataFrame({
+ 'Wireless Client': self.ping_obj_dict[ce][obj_name]["obj"].device_names,
+ 'MAC': self.ping_obj_dict[ce][obj_name]["obj"].device_mac,
+ 'Channel': self.ping_obj_dict[ce][obj_name]["obj"].device_channels,
+ 'SSID ': self.ping_obj_dict[ce][obj_name]["obj"].device_ssid,
+ 'Mode': self.ping_obj_dict[ce][obj_name]["obj"].device_modes,
+ 'Packets Sent': self.ping_obj_dict[ce][obj_name]["obj"].packets_sent,
+ 'Packets Received': self.ping_obj_dict[ce][obj_name]["obj"].packets_received,
+ 'Packets Loss': self.ping_obj_dict[ce][obj_name]["obj"].packets_dropped,
+ })
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ # packets latency graph
+ self.overall_report.set_table_title('Ping Latency Graph')
+ self.overall_report.build_table_title()
+
+ graph = lf_bar_graph_horizontal(_data_set=[self.ping_obj_dict[ce][obj_name]["obj"].device_min, self.ping_obj_dict[ce][obj_name]["obj"].device_avg, self.ping_obj_dict[ce][obj_name]["obj"].device_max],
+ _xaxis_name='Time (ms)',
+ _yaxis_name='Wireless Clients',
+ _label=[
+ 'Min Latency (ms)', 'Average Latency (ms)', 'Max Latency (ms)'],
+ _graph_image_name=f'Ping Latency per client {obj_no}',
+ _yaxis_label=self.ping_obj_dict[ce][obj_name]["obj"].report_names,
+ _yaxis_categories=self.ping_obj_dict[ce][obj_name]["obj"].report_names,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _graph_title='Ping Latency per client',
+ _title_size=16,
+ _color=['lightgrey',
+ 'orange', 'steelblue'],
+ _color_edge='black',
+ _bar_height=0.15,
+ _figsize=(x_fig_size, y_fig_size),
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _dpi=96,
+ _show_bar_value=False,
+ _enable_csv=True,
+ _color_name=['lightgrey', 'orange', 'steelblue'])
+
+ graph_png = graph.build_bar_graph_horizontal()
+ logging.info('graph name {}'.format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ # need to move the graph image to the results directory
+ self.overall_report.move_graph_image()
+ self.overall_report.set_csv_filename(graph_png)
+ self.overall_report.move_csv_file()
+ self.overall_report.build_graph()
+
+ dataframe2 = pd.DataFrame({
+ 'Wireless Client': self.ping_obj_dict[ce][obj_name]["obj"].device_names,
+ 'MAC': self.ping_obj_dict[ce][obj_name]["obj"].device_mac,
+ 'Channel': self.ping_obj_dict[ce][obj_name]["obj"].device_channels,
+ 'SSID ': self.ping_obj_dict[ce][obj_name]["obj"].device_ssid,
+ 'Mode': self.ping_obj_dict[ce][obj_name]["obj"].device_modes,
+ 'Min Latency (ms)': self.ping_obj_dict[ce][obj_name]["obj"].device_min,
+ 'Average Latency (ms)': self.ping_obj_dict[ce][obj_name]["obj"].device_avg,
+ 'Max Latency (ms)': self.ping_obj_dict[ce][obj_name]["obj"].device_max
+ })
+ self.overall_report.set_table_dataframe(dataframe2)
+ self.overall_report.build_table()
+
+ # check if there are remarks for any device. If there are remarks, build table else don't
+ if (self.ping_obj_dict[ce][obj_name]["obj"].remarks != []):
+ self.overall_report.set_table_title('Notes')
+ self.overall_report.build_table_title()
+ dataframe3 = pd.DataFrame({
+ 'Wireless Client': self.ping_obj_dict[ce][obj_name]["obj"].device_names_with_errors,
+ 'Port': self.ping_obj_dict[ce][obj_name]["obj"].devices_with_errors,
+ 'Remarks': self.ping_obj_dict[ce][obj_name]["obj"].remarks
+ })
+ self.overall_report.set_table_dataframe(dataframe3)
+ self.overall_report.build_table()
+
+ # closing
+ self.overall_report.build_custom()
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"ping_test_{obj_no}"
+ else:
+ break
+ elif test_name == "qos_test":
+ obj_no = 1
+ obj_name = 'qos_test'
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.qos_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ params = self.qos_obj_dict[ce][obj_name]["data"]
+ data = params["data"].copy() if isinstance(params["data"], (list, dict, set)) else params["data"]
+ input_setup_info = params["input_setup_info"].copy() if isinstance(params["input_setup_info"], (list, dict, set)) else params["input_setup_info"]
+ connections_download_avg = params["connections_download_avg"].copy() if isinstance(params["connections_download_avg"], (list, dict, set)) else params["connections_download_avg"]
+ connections_upload_avg = params["connections_upload_avg"].copy() if isinstance(params["connections_upload_avg"], (list, dict, set)) else params["connections_upload_avg"]
+ avg_drop_a = params["avg_drop_a"].copy() if isinstance(params["avg_drop_a"], (list, dict, set)) else params["avg_drop_a"]
+ avg_drop_b = params["avg_drop_b"].copy() if isinstance(params["avg_drop_b"], (list, dict, set)) else params["avg_drop_b"]
+ report_path = params["report_path"].copy() if isinstance(params["report_path"], (list, dict, set)) else params["report_path"]
+ result_dir_name = params["result_dir_name"].copy() if isinstance(params["result_dir_name"], (list, dict, set)) else params["result_dir_name"]
+ selected_real_clients_names = params["selected_real_clients_names"].copy() if isinstance(params["selected_real_clients_names"], (list, dict, set)) else params["selected_real_clients_names"]
+ config_devices = params["config_devices"].copy() if isinstance(params["config_devices"], (list, dict, set)) else params["config_devices"]
+ self.qos_obj_dict[ce][obj_name]["obj"].ssid_list = self.qos_obj_dict[ce][obj_name]["obj"].get_ssid_list(self.qos_obj_dict[ce][obj_name]["obj"].input_devices_list)
+ if selected_real_clients_names is not None:
+ self.qos_obj_dict[ce][obj_name]["obj"].num_stations = selected_real_clients_names
+ data_set, load, res = self.qos_obj_dict[ce][obj_name]["obj"].generate_graph_data_set(data)
+ # Initialize counts and lists for device types
+ android_devices, windows_devices, linux_devices, ios_devices, ios_mob_devices = 0, 0, 0, 0, 0
+ all_devices_names = []
+ device_type = []
+ total_devices = ""
+ for i in self.qos_obj_dict[ce][obj_name]["obj"].real_client_list:
+ split_device_name = i.split(" ")
+ if 'android' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Android)"))
+ device_type.append("Android")
+ android_devices += 1
+ elif 'Win' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Windows)"))
+ device_type.append("Windows")
+ windows_devices += 1
+ elif 'Lin' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Linux)"))
+ device_type.append("Linux")
+ linux_devices += 1
+ elif 'Mac' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Mac)"))
+ device_type.append("Mac")
+ ios_devices += 1
+ elif 'iOS' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(iOS)"))
+ device_type.append("iOS")
+ ios_mob_devices += 1
+
+ # Build total_devices string based on counts
+ if android_devices > 0:
+ total_devices += f" Android({android_devices})"
+ if windows_devices > 0:
+ total_devices += f" Windows({windows_devices})"
+ if linux_devices > 0:
+ total_devices += f" Linux({linux_devices})"
+ if ios_devices > 0:
+ total_devices += f" Mac({ios_devices})"
+ if ios_mob_devices > 0:
+ total_devices += f" iOS({ios_mob_devices})"
+
+ # Test setup information table for devices in device list
+ if config_devices == "":
+ test_setup_info = {
+ "Device List": ", ".join(all_devices_names),
+ "Number of Stations": "Total" + f"({self.qos_obj_dict[ce][obj_name]['obj'].num_stations})" + total_devices,
+ "AP Model": self.qos_obj_dict[ce][obj_name]["obj"].ap_name,
+ "SSID": self.qos_obj_dict[ce][obj_name]["obj"].ssid,
+ "Traffic Duration in hours": round(int(self.qos_obj_dict[ce][obj_name]["obj"].test_duration) / 3600, 2),
+ "Security": self.qos_obj_dict[ce][obj_name]["obj"].security,
+ "Protocol": (self.qos_obj_dict[ce][obj_name]["obj"].traffic_type.strip("lf_")).upper(),
+ "Traffic Direction": self.qos_obj_dict[ce][obj_name]["obj"].direction,
+ "TOS": self.qos_obj_dict[ce][obj_name]["obj"].tos,
+ "Per TOS Load in Mbps": load
+ }
+ # Test setup information table for devices in groups
+ else:
+ group_names = ', '.join(config_devices.keys())
+ profile_names = ', '.join(config_devices.values())
+ configmap = "Groups:" + group_names + " -> Profiles:" + profile_names
+ test_setup_info = {
+ "AP Model": self.qos_obj_dict[ce][obj_name]["obj"].ap_name,
+ 'Configuration': configmap,
+ "Traffic Duration in hours": round(int(self.qos_obj_dict[ce][obj_name]["obj"].test_duration) / 3600, 2),
+ "Security": self.qos_obj_dict[ce][obj_name]["obj"].security,
+ "Protocol": (self.qos_obj_dict[ce][obj_name]["obj"].traffic_type.strip("lf_")).upper(),
+ "Traffic Direction": self.qos_obj_dict[ce][obj_name]["obj"].direction,
+ "TOS": self.qos_obj_dict[ce][obj_name]["obj"].tos,
+ "Per TOS Load in Mbps": load
+ }
+ print(res["throughput_table_df"])
+ self.overall_report.set_obj_html(_obj_title=f'QOS Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ self.overall_report.test_setup_table(test_setup_data=test_setup_info, value="Test Configuration")
+ self.overall_report.set_table_title(
+ f"Overall {self.qos_obj_dict[ce][obj_name]['obj'].direction} Throughput for all TOS i.e BK | BE | Video (VI) | Voice (VO)")
+ self.overall_report.build_table_title()
+ df_throughput = pd.DataFrame(res["throughput_table_df"])
+ self.overall_report.set_table_dataframe(df_throughput)
+ self.overall_report.build_table()
+ for key in res["graph_df"]:
+ self.overall_report.set_obj_html(
+ _obj_title=f"Overall {self.qos_obj_dict[ce][obj_name]['obj'].direction} throughput for {len(self.qos_obj_dict[ce][obj_name]['obj'].input_devices_list)} clients with different TOS.",
+ _obj=f"The below graph represents overall {self.qos_obj_dict[ce][obj_name]['obj'].direction} throughput for all "
+ "connected stations running BK, BE, VO, VI traffic with different "
+ f"intended loads{load} per tos")
+ self.overall_report.build_objective()
+ graph = lf_bar_graph(_data_set=data_set,
+ _xaxis_name="Load per Type of Service",
+ _yaxis_name="Throughput (Mbps)",
+ _xaxis_categories=["BK,BE,VI,VO"],
+ _xaxis_label=['1 Mbps', '2 Mbps', '3 Mbps', '4 Mbps', '5 Mbps'],
+ _graph_image_name=f"tos_download_{key}Hz {obj_no}",
+ _label=["BK", "BE", "VI", "VO"],
+ _xaxis_step=1,
+ _graph_title=f"Overall {self.qos_obj_dict[ce][obj_name]['obj'].direction} throughput – BK,BE,VO,VI traffic streams",
+ _title_size=16,
+ _color=['orange', 'lightcoral', 'steelblue', 'lightgrey'],
+ _color_edge='black',
+ _bar_width=0.15,
+ _figsize=(18, 6),
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _dpi=96,
+ _show_bar_value=True,
+ _enable_csv=True,
+ _color_name=['orange', 'lightcoral', 'steelblue', 'lightgrey'])
+ graph_png = graph.build_bar_graph()
+ print("graph name {}".format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ # need to move the graph image to the results directory
+ self.overall_report.move_graph_image()
+ self.overall_report.set_csv_filename(graph_png)
+ self.overall_report.move_csv_file()
+ self.overall_report.build_graph()
+ self.qos_obj_dict[ce][obj_name]["obj"].generate_individual_graph(res, self.overall_report, connections_download_avg, connections_upload_avg, avg_drop_a, avg_drop_b,obj_no)
+ self.overall_report.test_setup_table(test_setup_data=input_setup_info, value="Information")
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"qos_test_{obj_no}"
+ else:
+ break
+
+ elif test_name == "mcast_test":
+ obj_no=1
+ obj_name = "mcast_test"
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.mcast_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ print('is error',self.mcast_obj_dict)
+ params = self.mcast_obj_dict[ce][obj_name]["data"].copy()
+ config_devices = params["config_devices"].copy() if isinstance(params["config_devices"], (list, dict, set)) else params["config_devices"]
+ group_device_map = params["group_device_map"].copy() if isinstance(params["group_device_map"], (list, dict, set)) else params["group_device_map"]
+
+ # self.mcast_obj_dict[ce][obj_name]["obj"].update_a()
+ # self.mcast_obj_dict[ce][obj_name]["obj"].update_b()
+ test_setup_info = {
+ "DUT Name": self.mcast_obj_dict[ce][obj_name]["obj"].dut_model_num,
+ "DUT Hardware Version": self.mcast_obj_dict[ce][obj_name]["obj"].dut_hw_version,
+ "DUT Software Version": self.mcast_obj_dict[ce][obj_name]["obj"].dut_sw_version,
+ "DUT Serial Number": self.mcast_obj_dict[ce][obj_name]["obj"].dut_serial_num,
+ }
+ self.overall_report.set_obj_html(_obj_title=f'MULTICAST Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ self.overall_report.set_table_title("Device Under Test Information")
+ self.overall_report.build_table_title()
+ self.overall_report.test_setup_table(value="Device Under Test",
+ test_setup_data=test_setup_info)
+ # For real devices when groups specified for configuration
+ if self.mcast_obj_dict[ce][obj_name]["obj"].real and self.mcast_obj_dict[ce][obj_name]["obj"].group_name:
+ group_names = ', '.join(config_devices.keys())
+ profile_names = ', '.join(config_devices.values())
+ configmap = "Groups:" + group_names + " -> Profiles:" + profile_names
+ test_input_info = {
+ "LANforge ip": self.mcast_obj_dict[ce][obj_name]["obj"].lfmgr,
+ "LANforge port": self.mcast_obj_dict[ce][obj_name]["obj"].lfmgr_port,
+ "Upstream": self.mcast_obj_dict[ce][obj_name]["obj"].upstream_port,
+ "Test Duration": self.mcast_obj_dict[ce][obj_name]["obj"].test_duration,
+ "Test Configuration": configmap,
+ "Polling Interval": self.mcast_obj_dict[ce][obj_name]["obj"].polling_interval,
+ "Total No. of Devices": self.mcast_obj_dict[ce][obj_name]["obj"].station_count,
+ }
+ else:
+ test_input_info = {
+ "LANforge ip": self.mcast_obj_dict[ce][obj_name]["obj"].lfmgr,
+ "LANforge port": self.mcast_obj_dict[ce][obj_name]["obj"].lfmgr_port,
+ "Upstream": self.mcast_obj_dict[ce][obj_name]["obj"].upstream_port,
+ "Test Duration": self.mcast_obj_dict[ce][obj_name]["obj"].test_duration,
+ "Polling Interval": self.mcast_obj_dict[ce][obj_name]["obj"].polling_interval,
+ "Total No. of Devices": self.mcast_obj_dict[ce][obj_name]["obj"].station_count,
+ }
+
+ self.overall_report.set_table_title("Test Configuration")
+ self.overall_report.build_table_title()
+ self.overall_report.test_setup_table(value="Test Configuration",
+ test_setup_data=test_input_info)
+
+ self.overall_report.set_table_title("Radio Configuration")
+ self.overall_report.build_table_title()
+
+ wifi_mode_dict = {
+ 0: 'AUTO', # 802.11g
+ 1: '802.11a', # 802.11a
+ 2: '802.11b', # 802.11b
+ 3: '802.11g', # 802.11g
+ 4: '802.11abg', # 802.11abg
+ 5: '802.11abgn', # 802.11abgn
+ 6: '802.11bgn', # 802.11bgn
+ 7: '802.11bg', # 802.11bg
+ 8: '802.11abgnAC', # 802.11abgn-AC
+ 9: '802.11anAC', # 802.11an-AC
+ 10: '802.11an', # 802.11an
+ 11: '802.11bgnAC', # 802.11bgn-AC
+ 12: '802.11abgnAX', # 802.11abgn-A+
+ # a/b/g/n/AC/AX (dual-band AX) support
+ 13: '802.11bgnAX', # 802.11bgn-AX
+ 14: '802.11anAX', # 802.11an-AX
+ 15: '802.11aAX', # 802.11a-AX (6E disables /n and /ac)
+ 16: '802.11abgnEHT', # 802.11abgn-EHT a/b/g/n/AC/AX/EHT (dual-band AX) support
+ 17: '802.11bgnEHT', # 802.11bgn-EHT
+ 18: '802.11anEHT', # 802.11an-ETH
+ 19: '802.11aBE', # 802.11a-EHT (6E disables /n and /ac)
+ }
+
+ for (
+ radio_,
+ ssid_,
+ _ssid_password_, # do not print password
+ ssid_security_,
+ mode_,
+ wifi_enable_flags_list_,
+ _reset_port_enable_,
+ _reset_port_time_min_,
+ _reset_port_time_max_) in zip(
+ self.mcast_obj_dict[ce][obj_name]["obj"].radio_name_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].ssid_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].ssid_password_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].ssid_security_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].wifi_mode_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].enable_flags_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].reset_port_enable_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].reset_port_time_min_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].reset_port_time_max_list):
+
+ mode_value = wifi_mode_dict[int(mode_)]
+
+ radio_info = {
+ "SSID": ssid_,
+ "Security": ssid_security_,
+ "Wifi mode set": mode_value,
+ 'Wifi Enable Flags': wifi_enable_flags_list_
+ }
+ self.overall_report.test_setup_table(value=radio_, test_setup_data=radio_info)
+
+ # TODO move the graphing to the class so it may be called as a service
+
+ # Graph TOS data
+ # Once the data is stopped can collect the data for the cx's both multi cast and uni cast
+ # if the traffic is still running will gather the running traffic
+ # self.mcast_obj_dict[ce][obj_name]["obj"].evaluate_qos()
+
+ # graph BK A
+ # try to do as a loop
+ logger.info(f"BEFORE REAL A {self.mcast_obj_dict[ce][obj_name]['obj'].client_dict_A}")
+ tos_list = ['BK', 'BE', 'VI', 'VO']
+ if self.mcast_obj_dict[ce][obj_name]["obj"].real:
+ tos_types = ['BE', 'BK', 'VI', 'VO']
+ print("BOOLLLLL",self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B is self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A)
+ for tos_key in tos_types:
+ if tos_key in self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A:
+ tos_data = self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos_key]
+
+ # Filter A side
+ traffic_proto_A = tos_data.get("traffic_protocol_A", [])
+ indices_to_keep_A = [i for i, proto in enumerate(traffic_proto_A) if proto == "Mcast"]
+
+ # Filter B side
+ traffic_proto_B = tos_data.get("traffic_protocol_B", [])
+ indices_to_keep_B = [i for i, proto in enumerate(traffic_proto_B) if proto == "Mcast"]
+
+ for key in list(tos_data.keys()):
+ if key in ["colors", "labels"]:
+ continue # Keep as-is
+
+ if key.endswith('_A'):
+ filtered_list = [tos_data[key][i] for i in indices_to_keep_A if i < len(tos_data[key])]
+ tos_data[key] = filtered_list
+
+ elif key.endswith('_B'):
+ filtered_list = [tos_data[key][i] for i in indices_to_keep_B if i < len(tos_data[key])]
+ tos_data[key] = filtered_list
+ for tos_key in tos_types:
+ if tos_key in self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B:
+ tos_data = self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos_key]
+
+ # Filter A side
+ traffic_proto_A = tos_data.get("traffic_protocol_A", [])
+ indices_to_keep_A = [i for i, proto in enumerate(traffic_proto_A) if proto == "Mcast"]
+
+ # Filter B side
+ traffic_proto_B = tos_data.get("traffic_protocol_B", [])
+ indices_to_keep_B = [i for i, proto in enumerate(traffic_proto_B) if proto == "Mcast"]
+
+ for key in list(tos_data.keys()):
+ if key in ["colors", "labels"]:
+ continue # Keep as-is
+
+ if key.endswith('_A'):
+ filtered_list = [tos_data[key][i] for i in indices_to_keep_A if i < len(tos_data[key])]
+ tos_data[key] = filtered_list
+
+ elif key.endswith('_B'):
+ filtered_list = [tos_data[key][i] for i in indices_to_keep_B if i < len(tos_data[key])]
+ tos_data[key] = filtered_list
+ # logger.info(f"AFTER REAL A {self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A}")
+ for tos in tos_list:
+ print(self.mcast_obj_dict[ce][obj_name]["obj"].tos)
+ if tos not in self.mcast_obj_dict[ce][obj_name]["obj"].tos:
+ continue
+ if (self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["ul_A"] and self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["dl_A"]):
+ min_bps_a = self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A["min_bps_a"]
+ min_bps_b = self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A["min_bps_b"]
+
+ dataset_list = [self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["ul_A"], self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["dl_A"]]
+ # TODO possibly explain the wording for upload and download
+ dataset_length = len(self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["ul_A"])
+ x_fig_size = 20
+ y_fig_size = len(self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"]) * .4 + 5
+ logger.debug("length of clients_A {clients} resource_alias_A {alias_A}".format(
+ clients=len(self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"]), alias_A=len(self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["resource_alias_A"])))
+ logger.debug("clients_A {clients}".format(clients=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"]))
+ logger.debug("resource_alias_A {alias_A}".format(alias_A=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["resource_alias_A"]))
+
+ if int(min_bps_a) != 0:
+ self.overall_report.set_obj_html(
+ _obj_title=f"Individual throughput measured upload tcp or udp bps: {min_bps_a}, download tcp, udp, or mcast bps: {min_bps_b} station for traffic {tos} (WiFi).",
+ _obj=f"The below graph represents individual throughput for {dataset_length} clients running {tos} "
+ f"(WiFi) traffic. Y- axis shows “Client names“ and X-axis shows “"
+ f"Throughput in Mbps”.")
+ else:
+ self.overall_report.set_obj_html(
+ _obj_title=f"Individual throughput mcast download bps: {min_bps_b} traffic {tos} (WiFi).",
+ _obj=f"The below graph represents individual throughput for {dataset_length} clients running {tos} "
+ f"(WiFi) traffic. Y- axis shows “Client names“ and X-axis shows “"
+ f"Throughput in Mbps”.")
+
+ self.overall_report.build_objective()
+
+ graph = lf_bar_graph_horizontal(_data_set=dataset_list,
+ _xaxis_name="Throughput in bps",
+ _yaxis_name="Client names",
+ # _yaxis_categories=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"],
+ _yaxis_categories=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["resource_alias_A"],
+ _graph_image_name=f"{tos}_A{obj_no}",
+ _label=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['labels'],
+ _color_name=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['colors'],
+ _color_edge=['black'],
+ # traditional station side -A
+ _graph_title=f"Individual {tos} client side traffic measurement - side a (downstream)",
+ _title_size=10,
+ _figsize=(x_fig_size, y_fig_size),
+ _show_bar_value=True,
+ _enable_csv=True,
+ _text_font=8,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0)
+ )
+ graph_png = graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+ self.overall_report.set_csv_filename(graph_png)
+ self.overall_report.move_csv_file()
+ if(self.mcast_obj_dict[ce][obj_name]["obj"].dowebgui and self.mcast_obj_dict[ce][obj_name]["obj"].get_live_view):
+ for floor in range(0,int(self.mcast_obj_dict[ce][obj_name]["obj"].total_floors)):
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ throughput_image_path = os.path.join(script_dir, "heatmap_images", f"{self.mcast_obj_dict[ce][obj_name]['obj'].test_name}_throughput_{floor+1}.png")
+ rssi_image_path = os.path.join(script_dir, "heatmap_images", f"{self.mcast_obj_dict[ce][obj_name]['obj'].test_name}_rssi_{floor+1}.png")
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(throughput_image_path) and os.path.exists(rssi_image_path)):
+ if time.time() - start_time > timeout:
+ print("Timeout: Images not found within 60 seconds.")
+ break
+ time.sleep(1)
+ while not os.path.exists(throughput_image_path) and not os.path.exists(rssi_image_path):
+ if os.path.exists(throughput_image_path) and os.path.exists(rssi_image_path):
+ break
+ # time.sleep(10)
+ if os.path.exists(throughput_image_path):
+ self.overall_report.set_custom_html('')
+ self.overall_report.build_custom()
+ # self.overall_report.set_custom_html("Average Throughput Heatmap:
")
+ # self.overall_report.build_custom()
+ self.overall_report.set_custom_html(f'
')
+ self.overall_report.build_custom()
+ # os.remove(throughput_image_path)
+
+ if os.path.exists(rssi_image_path):
+ self.overall_report.set_custom_html('')
+ self.overall_report.build_custom()
+ # self.overall_report.set_custom_html("Average RSSI Heatmap:
")
+ # self.overall_report.build_custom()
+ self.overall_report.set_custom_html(f'
')
+ self.overall_report.build_custom()
+ # os.remove(rssi_image_path)
+
+ # For real devices appending the required data for pass fail criteria
+ if self.mcast_obj_dict[ce][obj_name]["obj"].real:
+ up, down, off_up, off_down = [], [], [], []
+ for i in self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['ul_A']:
+ up.append(int(i) / 1000000)
+ for i in self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['dl_A']:
+ down.append(int(i) / 1000000)
+ for i in self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['offered_upload_rate_A']:
+ off_up.append(int(i) / 1_000_000)
+ for i in self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['offered_download_rate_A']:
+ off_down.append(int(i) / 1000000)
+ # if either 'expected_passfail_value' or 'device_csv_name' is provided for pass/fail evaluation
+ if self.mcast_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.mcast_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ test_input_list, pass_fail_list = self.mcast_obj_dict[ce][obj_name]["obj"].get_pass_fail_list(tos, up, down)
+
+ if self.mcast_obj_dict[ce][obj_name]["obj"].real:
+ # When groups and profiles specifed for configuration
+ if self.mcast_obj_dict[ce][obj_name]["obj"].group_name:
+ for key, val in group_device_map.items():
+ # Generating Dataframe when Groups with their profiles and pass_fail case is specified
+ if self.mcast_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.mcast_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ dataframe = self.mcast_obj_dict[ce][obj_name]["obj"].generate_dataframe(
+ val,
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_alias_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_eid_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_host_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_hw_ver_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['port_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mode_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mac_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['ssid_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['channel_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_type_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_protocol_A'],
+ off_up,
+ off_down,
+ up,
+ down,
+ test_input_list,
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['download_rx_drop_percent_A'],
+ pass_fail_list)
+ # Generating Dataframe for groups when pass_fail case is not specified
+ else:
+ dataframe = self.mcast_obj_dict[ce][obj_name]["obj"].generate_dataframe(
+ val,
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_alias_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_eid_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_host_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_hw_ver_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['port_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mode_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mac_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['ssid_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['channel_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_type_A'],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_protocol_A'],
+ off_up,
+ off_down,
+ up,
+ down,
+ [],
+ self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['download_rx_drop_percent_A'],
+ [],)
+ # When the client exists in either group.
+ if dataframe:
+ self.overall_report.set_obj_html("", "Group: {}".format(key))
+ self.overall_report.build_objective()
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+ else:
+ tos_dataframe_A = {
+ " Client Alias ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_alias_A'],
+ " Host eid ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_eid_A'],
+ " Host Name ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_host_A'],
+ " Device Type / Hw Ver ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_hw_ver_A'],
+ " Endp Name": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"],
+ # TODO : port A being set to many times
+ " Port Name ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['port_A'],
+ " Mode ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mode_A'],
+ " Mac ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mac_A'],
+ " SSID ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['ssid_A'],
+ " Channel ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['channel_A'],
+ " Type of traffic ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_type_A'],
+ " Traffic Protocol ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_protocol_A'],
+ " Offered Upload Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['offered_upload_rate_A'],
+ " Offered Download Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['offered_download_rate_A'],
+ " Upload Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['ul_A'],
+ " Download Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['dl_A'],
+ " Drop Percentage (%)": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['download_rx_drop_percent_A'],
+ }
+ # When pass_Fail criteria specified
+ if self.mcast_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.mcast_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ tos_dataframe_A[" Expected " + 'Download' + " Rate"] = [float(x) * 10**6 for x in test_input_list]
+ tos_dataframe_A[" Status "] = pass_fail_list
+
+ dataframe3 = pd.DataFrame(tos_dataframe_A)
+ self.overall_report.set_table_dataframe(dataframe3)
+ self.overall_report.build_table()
+
+ # For virtual clients
+ else:
+ tos_dataframe_A = {
+ " Client Alias ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_alias_A'],
+ " Host eid ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_eid_A'],
+ " Host Name ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_host_A'],
+ " Device Type / Hw Ver ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['resource_hw_ver_A'],
+ " Endp Name": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]["clients_A"],
+ " Port Name ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['port_A'],
+ " Mode ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mode_A'],
+ " Mac ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['mac_A'],
+ " SSID ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['ssid_A'],
+ " Channel ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['channel_A'],
+ " Type of traffic ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_type_A'],
+ " Traffic Protocol ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['traffic_protocol_A'],
+ " Offered Upload Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['offered_upload_rate_A'],
+ " Offered Download Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['offered_download_rate_A'],
+ " Upload Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['ul_A'],
+ " Download Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['dl_A'],
+ " Drop Percentage (%)": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_A[tos]['download_rx_drop_percent_A'],
+ }
+ dataframe3 = pd.DataFrame(tos_dataframe_A)
+ self.overall_report.set_table_dataframe(dataframe3)
+ self.overall_report.build_table()
+
+ # TODO both client_dict_A and client_dict_B contains the same information
+ for tos in tos_list:
+ if (self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["ul_B"] and self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["dl_B"]):
+ min_bps_a = self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B["min_bps_a"]
+ min_bps_b = self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B["min_bps_b"]
+
+ dataset_list = [self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["ul_B"], self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["dl_B"]]
+ dataset_length = len(self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["ul_B"])
+
+ x_fig_size = 20
+ y_fig_size = len(self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["clients_B"]) * .4 + 5
+
+ self.overall_report.set_obj_html(
+ _obj_title=f"Individual throughput upstream endp, offered upload bps: {min_bps_a} offered download bps: {min_bps_b} /station for traffic {tos} (WiFi).",
+ _obj=f"The below graph represents individual throughput for {dataset_length} clients running {tos} "
+ f"(WiFi) traffic. Y- axis shows “Client names“ and X-axis shows “"
+ f"Throughput in Mbps”.")
+ self.overall_report.build_objective()
+
+ graph = lf_bar_graph_horizontal(_data_set=dataset_list,
+ _xaxis_name="Throughput in bps",
+ _yaxis_name="Client names",
+ # _yaxis_categories=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["clients_B"],
+ _yaxis_categories=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["resource_alias_B"],
+ _graph_image_name=f"{tos}_B{obj_no}",
+ _label=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['labels'],
+ _color_name=self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['colors'],
+ _color_edge=['black'],
+ _graph_title=f"Individual {tos} upstream side traffic measurement - side b (WIFI) traffic",
+ _title_size=10,
+ _figsize=(x_fig_size, y_fig_size),
+ _show_bar_value=True,
+ _enable_csv=True,
+ _text_font=8,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0)
+ )
+ graph_png = graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+ self.overall_report.set_csv_filename(graph_png)
+ self.overall_report.move_csv_file()
+
+ tos_dataframe_B = {
+ " Client Alias ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['resource_alias_B'],
+ " Host eid ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['resource_eid_B'],
+ " Host Name ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['resource_host_B'],
+ " Device Type / HW Ver ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['resource_hw_ver_B'],
+ " Endp Name": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]["clients_B"],
+ # TODO get correct size
+ " Port Name ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['port_B'],
+ " Mode ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['mode_B'],
+ " Mac ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['mac_B'],
+ " SSID ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['ssid_B'],
+ " Channel ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['channel_B'],
+ " Type of traffic ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['traffic_type_B'],
+ " Traffic Protocol ": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['traffic_protocol_B'],
+ " Offered Upload Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['offered_upload_rate_B'],
+ " Offered Download Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['offered_download_rate_B'],
+ " Upload Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['ul_B'],
+ " Download Rate Per Client": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['dl_B'],
+ " Drop Percentage (%)": self.mcast_obj_dict[ce][obj_name]["obj"].client_dict_B[tos]['download_rx_drop_percent_B']
+ }
+
+ dataframe3 = pd.DataFrame(tos_dataframe_B)
+ self.overall_report.set_table_dataframe(dataframe3)
+ self.overall_report.build_table()
+
+ # L3 total traffic # TODO csv_results_file present yet not readable
+ # self.overall_report.set_table_title("Total Layer 3 Cross-Connect Traffic across all Stations")
+ # self.overall_report.build_table_title()
+ # self.overall_report.set_table_dataframe_from_csv(self.mcast_obj_dict[ce][obj_name]["obj"].csv_results_file)
+ # self.overall_report.build_table()
+
+ # empty dictionarys evaluate to false , placing tables in output
+ if bool(self.mcast_obj_dict[ce][obj_name]["obj"].dl_port_csv_files):
+ for key, value in self.mcast_obj_dict[ce][obj_name]["obj"].dl_port_csv_files.items():
+ if self.mcast_obj_dict[ce][obj_name]["obj"].csv_data_to_report:
+ # read the csv file
+ self.overall_report.set_table_title("Layer 3 Cx Traffic {key}".format(key=key))
+ self.overall_report.build_table_title()
+ self.overall_report.set_table_dataframe_from_csv(value.name)
+ self.overall_report.build_table()
+
+ # read in column heading and last line
+ df = pd.read_csv(value.name)
+ last_row = df.tail(1)
+ self.overall_report.set_table_title(
+ "Layer 3 Cx Traffic Last Reporting Interval {key}".format(key=key))
+ self.overall_report.build_table_title()
+ self.overall_report.set_table_dataframe(last_row)
+ self.overall_report.build_table()
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"mcast_test_{obj_no}"
+ else:
+ break
+
+ elif test_name == "vs_test":
+ obj_no=1
+ obj_name = "vs_test"
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.vs_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ params = self.vs_obj_dict[ce][obj_name]["data"].copy()
+ date = params["date"]
+
+ iterations_before_test_stopped_by_user = (
+ params["iterations_before_test_stopped_by_user"].copy()
+ if isinstance(params["iterations_before_test_stopped_by_user"], (list, dict, set))
+ else params["iterations_before_test_stopped_by_user"]
+ )
+
+ test_setup_info = (
+ params["test_setup_info"].copy()
+ if isinstance(params["test_setup_info"], (list, dict, set))
+ else params["test_setup_info"]
+ )
+
+ realtime_dataset = (
+ params["realtime_dataset"].copy()
+ if isinstance(params["realtime_dataset"], (list, dict, set))
+ else params["realtime_dataset"]
+ )
+
+ report_path = (
+ params["report_path"].copy()
+ if isinstance(params["report_path"], (list, dict, set))
+ else params["report_path"]
+ )
+
+ cx_order_list = (
+ params["cx_order_list"].copy()
+ if isinstance(params["cx_order_list"], (list, dict, set))
+ else params["cx_order_list"]
+ )
+ self.overall_report.set_obj_html(_obj_title=f'Video Streaming Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ created_incremental_values = self.vs_obj_dict[ce][obj_name]["obj"].get_incremental_capacity_list()
+ keys = list(self.vs_obj_dict[ce][obj_name]["obj"].http_profile.created_cx.keys())
+
+ self.overall_report.set_table_title("Input Parameters")
+ self.overall_report.build_table_title()
+ if self.vs_obj_dict[ce][obj_name]["obj"].config:
+ test_setup_info["SSID"] = self.vs_obj_dict[ce][obj_name]["obj"].ssid
+ test_setup_info["Password"] = self.vs_obj_dict[ce][obj_name]["obj"].passwd
+ test_setup_info["ENCRYPTION"] = self.vs_obj_dict[ce][obj_name]["obj"].encryp
+ elif len(self.vs_obj_dict[ce][obj_name]["obj"].selected_groups) > 0 and len(self.vs_obj_dict[ce][obj_name]["obj"].selected_profiles) > 0:
+ # Map each group with a profile
+ gp_pairs = zip(self.vs_obj_dict[ce][obj_name]["obj"].selected_groups, self.vs_obj_dict[ce][obj_name]["obj"].selected_profiles)
+ # Create a string by joining the mapped pairs
+ gp_map = ", ".join(f"{group} -> {profile}" for group, profile in gp_pairs)
+ test_setup_info["Configuration"] = gp_map
+
+ self.overall_report.test_setup_table(value="Test Setup Information", test_setup_data=test_setup_info)
+
+ device_type = []
+ username = []
+ ssid = []
+ mac = []
+ channel = []
+ mode = []
+ rssi = []
+ channel = []
+ tx_rate = []
+ resource_ids = list(map(int, self.vs_obj_dict[ce][obj_name]["obj"].resource_ids.split(',')))
+ try:
+ eid_data = self.vs_obj_dict[ce][obj_name]["obj"].json_get("ports?fields=alias,mac,mode,Parent Dev,rx-rate,tx-rate,ssid,signal,channel")
+ except KeyError:
+ logger.error("Error: 'interfaces' key not found in port data")
+ exit(1)
+
+ # Loop through interfaces
+ for alias in eid_data["interfaces"]:
+ for i in alias:
+ # Check interface index and alias
+ if int(i.split(".")[1]) > 1 and alias[i]["alias"] == 'wlan0':
+
+ # Get resource data for specific interface
+ resource_hw_data = self.vs_obj_dict[ce][obj_name]["obj"].json_get("/resource/" + i.split(".")[0] + "/" + i.split(".")[1])
+ hw_version = resource_hw_data['resource']['hw version']
+
+ # Filter based on OS and resource ID
+ if not hw_version.startswith(('Win', 'Linux', 'Apple')) and int(resource_hw_data['resource']['eid'].split('.')[1]) in resource_ids:
+ device_type.append('Android')
+ username.append(resource_hw_data['resource']['user'])
+ ssid.append(alias[i]['ssid'])
+ mac.append(alias[i]['mac'])
+ mode.append(alias[i]['mode'])
+ rssi.append(alias[i]['signal'])
+ channel.append(alias[i]['channel'])
+ tx_rate.append(alias[i]['tx-rate'])
+ total_urls = self.vs_obj_dict[ce][obj_name]["obj"].data["total_urls"]
+ total_err = self.vs_obj_dict[ce][obj_name]["obj"].data["total_err"]
+ total_buffer = self.vs_obj_dict[ce][obj_name]["obj"].data["total_buffer"]
+ max_bytes_rd_list = []
+ avg_rx_rate_list = []
+ # Iterate through the length of cx_order_list
+ for iter in range(len(iterations_before_test_stopped_by_user)):
+ data_set_in_graph, wait_time_data, devices_on_running_state, device_names_on_running = [], [], [], []
+ devices_data_to_create_wait_time_bar_graph = []
+ max_video_rate, min_video_rate, avg_video_rate = [], [], []
+ total_url_data, rssi_data = [], []
+ trimmed_data_set_in_graph = []
+ max_bytes_rd_list = []
+ avg_rx_rate_list = []
+ # Retrieve data for the previous iteration, if it's not the first iteration
+ if iter != 0:
+ before_data_iter = realtime_dataset[realtime_dataset['iteration'] == iter]
+ # Retrieve data for the current iteration
+ data_iter = realtime_dataset[realtime_dataset['iteration'] == iter + 1]
+
+ # Populate the list of devices on running state and their corresponding usernames
+ for j in range(created_incremental_values[iter]):
+ devices_on_running_state.append(keys[j])
+ device_names_on_running.append(username[j])
+
+ # Iterate through each device currently running
+ for k in devices_on_running_state:
+ # Filter columns related to the current device
+ columns_with_substring = [col for col in data_iter.columns if k in col]
+ filtered_df = data_iter[columns_with_substring]
+ min_val = self.vs_obj_dict[ce][obj_name]["obj"].process_list(filtered_df[[col for col in filtered_df.columns if "video_format_bitrate" in col][0]].values.tolist())
+ if iter != 0:
+ # Filter columns related to the current device from the previous iteration
+ before_iter_columns_with_substring = [col for col in before_data_iter.columns if k in col]
+ before_filtered_df = before_data_iter[before_iter_columns_with_substring]
+
+ # Extract and compute max, min, and average video rates
+ max_video_rate.append(max(filtered_df[[col for col in filtered_df.columns if "video_format_bitrate" in col][0]].values.tolist()))
+ min_video_rate.append(min_val)
+ avg_video_rate.append(round(sum(filtered_df[[col for col in filtered_df.columns if "video_format_bitrate" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "video_format_bitrate" in col][0]].values.tolist()), 2))
+ wait_time_data.append(filtered_df[[col for col in filtered_df.columns if "total_wait_time" in col][0]].values.tolist()[-1])
+ rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
+ len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
+ # Extract maximum bytes read for the device
+ max_bytes_rd = max(filtered_df[[col for col in filtered_df.columns if "bytes_rd" in col][0]].values.tolist())
+ max_bytes_rd_list.append(max_bytes_rd)
+
+ # Calculate and append the average RX rate in Mbps
+ rx_rate_values = filtered_df[[col for col in filtered_df.columns if "rx rate" in col][0]].values.tolist()
+ avg_rx_rate_list.append(round((sum(rx_rate_values) / len(rx_rate_values)) / 1_000_000, 2)) # Convert bps to Mbps
+
+ if iter != 0:
+ # Calculate the difference in total URLs between the current and previous iterations
+ total_url_data.append(abs(filtered_df[[col for col in filtered_df.columns if "total_urls" in col][0]].values.tolist()[-1] -
+ before_filtered_df[[col for col in before_filtered_df.columns if "total_urls" in col][0]].values.tolist()[-1]))
+ else:
+ # Append the total URLs for the first iteration
+ total_url_data.append(filtered_df[[col for col in filtered_df.columns if "total_urls" in col][0]].values.tolist()[-1])
+
+ # Append the wait time data to the list for creating the wait time bar graph
+ devices_data_to_create_wait_time_bar_graph.append(wait_time_data)
+
+ # Extract overall video format bitrate values for the current iteration and append to data_set_in_graph
+ video_streaming_values_list = realtime_dataset['overall_video_format_bitrate'][realtime_dataset['iteration'] == iter + 1].values.tolist()
+ data_set_in_graph.append(video_streaming_values_list)
+
+ # Trim the data in data_set_in_graph and append to trimmed_data_set_in_graph
+ for _ in range(len(data_set_in_graph)):
+ trimmed_data_set_in_graph.append(self.vs_obj_dict[ce][obj_name]["obj"].trim_data(len(data_set_in_graph[_]), data_set_in_graph[_]))
+
+ # If there are multiple incremental values, add custom HTML content to the report for the current iteration
+ if len(created_incremental_values) > 1:
+ self.overall_report.set_custom_html(f"Iteration-{iter + 1}
")
+ self.overall_report.build_custom()
+
+ self.overall_report.set_obj_html(
+ _obj_title=f"Realtime Video Rate: Number of devices running: {len(device_names_on_running)}",
+ _obj="")
+ self.overall_report.build_objective()
+
+ # Create a line graph for video rate over time
+ graph = lf_line_graph(_data_set=trimmed_data_set_in_graph,
+ _xaxis_name="Time",
+ _yaxis_name="Video Rate (Mbps)",
+ _xaxis_categories=self.vs_obj_dict[ce][obj_name]["obj"].trim_data(len(realtime_dataset['timestamp'][realtime_dataset['iteration'] == iter + 1].values.tolist()),
+ realtime_dataset['timestamp'][realtime_dataset['iteration'] == iter + 1].values.tolist()),
+ _label=['Rate'],
+ _graph_image_name=f"vs_line_graph{iter}{obj_no}"
+ )
+ graph_png = graph.build_line_graph()
+ logger.info("graph name {}".format(graph_png))
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+
+ self.overall_report.build_graph()
+
+ # Define figure size for horizontal bar graphs
+ x_fig_size = 15
+ y_fig_size = len(devices_on_running_state) * .5 + 4
+
+ self.overall_report.set_obj_html(
+ _obj_title="Total Urls Per Device",
+ _obj="")
+ self.overall_report.build_objective()
+ # Create a horizontal bar graph for total URLs per device
+ graph = lf_bar_graph_horizontal(_data_set=[total_urls[:created_incremental_values[iter]]],
+ _xaxis_name="Total Urls",
+ _yaxis_name="Devices",
+ _graph_image_name=f"total_urls_image_name{iter}{obj_no}",
+ _label=["Total Urls"],
+ _yaxis_categories=device_names_on_running,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size)
+ # _color=['lightcoral']
+ )
+ graph_png = graph.build_bar_graph_horizontal()
+ logger.info("wait time graph name {}".format(graph_png))
+ graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_obj_html(
+ _obj_title="Max/Min Video Rate Per Device",
+ _obj="")
+ self.overall_report.build_objective()
+
+ # Create a horizontal bar graph for max and min video rates per device
+ graph = lf_bar_graph_horizontal(_data_set=[max_video_rate, min_video_rate],
+ _xaxis_name="Max/Min Video Rate(Mbps)",
+ _yaxis_name="Devices",
+ _graph_image_name=f"max-min-video-rate_image_name{iter}{obj_no}",
+ _label=['Max Video Rate', 'Min Video Rate'],
+ _yaxis_categories=device_names_on_running,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size)
+ # _color=['lightcoral']
+ )
+ graph_png = graph.build_bar_graph_horizontal()
+ logger.info("max/min graph name {}".format(graph_png))
+ graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_obj_html(
+ _obj_title="Wait Time Per Device",
+ _obj="")
+ self.overall_report.build_objective()
+
+ # Create a horizontal bar graph for wait time per device
+ graph = lf_bar_graph_horizontal(_data_set=devices_data_to_create_wait_time_bar_graph,
+ _xaxis_name="Wait Time(seconds)",
+ _yaxis_name="Devices",
+ _graph_image_name=f"wait_time_image_name{iter}{obj_no}",
+ _label=['Wait Time'],
+ _yaxis_categories=device_names_on_running,
+ _legend_loc="best",
+ _legend_box=(1.0, 1.0),
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size)
+ # _color=['lightcoral']
+ )
+ graph_png = graph.build_bar_graph_horizontal()
+ logger.info("wait time graph name {}".format(graph_png))
+ graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_png)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ if self.vs_obj_dict[ce][obj_name]["obj"].dowebgui and self.vs_obj_dict[ce][obj_name]["obj"].get_live_view:
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+
+ self.overall_report.set_custom_html("No of Buffers and Wait Time %
")
+ self.overall_report.build_custom()
+
+ for floor in range(int(self.vs_obj_dict[ce][obj_name]["obj"].floors)):
+ # Construct expected image paths
+ vs_buffer_image = os.path.join(script_dir, "heatmap_images", f"{self.vs_obj_dict[ce][obj_name]['obj'].test_name}_vs_buffer_{floor+1}.png")
+ vs_wait_time_image = os.path.join(script_dir, "heatmap_images", f"{self.vs_obj_dict[ce][obj_name]['obj'].test_name}_vs_wait_time_{floor+1}.png")
+
+
+ # Wait for all required images to be generated (up to timeout)
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(vs_buffer_image) and os.path.exists(vs_wait_time_image)):
+ if time.time() - start_time > timeout:
+ print(f"Timeout: Heatmap images for floor {floor + 1} not found within {timeout} seconds.")
+ break
+ time.sleep(1)
+
+ # Generate report sections for each image if it exists
+ for image_path in [vs_buffer_image, vs_wait_time_image,]:
+ if os.path.exists(image_path):
+ self.overall_report.set_custom_html(f'
')
+ self.overall_report.build_custom()
+
+ # Table 1
+ self.overall_report.set_obj_html("Overall - Detailed Result Table", "The below tables provides detailed information for the Video Streaming test.")
+ self.overall_report.build_objective()
+ test_data = {
+ "iter": iter,
+ "created_incremental_values": created_incremental_values,
+ "device_type": device_type,
+ "username": username,
+ "ssid": ssid,
+ "mac": mac,
+ "channel": channel,
+ "mode": mode,
+ "total_buffer": total_buffer,
+ "wait_time_data": wait_time_data,
+ "min_video_rate": min_video_rate,
+ "avg_video_rate": avg_video_rate,
+ "max_video_rate": max_video_rate,
+ "total_urls": total_urls,
+ "total_err": total_err,
+ "rssi_data": rssi_data,
+ "tx_rate": tx_rate,
+ "max_bytes_rd_list": max_bytes_rd_list,
+ "avg_rx_rate_list": avg_rx_rate_list
+ }
+
+ dataframe = self.vs_obj_dict[ce][obj_name]["obj"].handle_passfail_criteria(test_data)
+
+ dataframe1 = pd.DataFrame(dataframe)
+ self.overall_report.set_table_dataframe(dataframe1)
+ self.overall_report.build_table()
+
+ # Set and build title for the overall results table
+ self.overall_report.set_obj_html("Detailed Total Errors Table", "The below tables provides detailed information of total errors for the web browsing test.")
+ self.overall_report.build_objective()
+ dataframe2 = {
+ " DEVICE": username[:created_incremental_values[iter]],
+ " TOTAL ERRORS ": total_err[:created_incremental_values[iter]],
+ }
+ dataframe3 = pd.DataFrame(dataframe2)
+ self.overall_report.set_table_dataframe(dataframe3)
+ self.overall_report.build_table()
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"vs_test_{obj_no}"
+ else:
+ break
+
+ elif test_name =="rb_test":
+ obj_no=1
+ obj_name = "rb_test"
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.rb_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ self.overall_report.set_obj_html(_obj_title=f'Real Browser Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ self.overall_report.set_table_title("Test Parameters:")
+ self.overall_report.build_table_title()
+
+ final_eid_data = []
+ mac_data = []
+ channel_data = []
+ signal_data = []
+ ssid_data = []
+ tx_rate_data = []
+ device_type_data = []
+ device_names = []
+ total_urls = []
+ time_to_target_urls = []
+ uc_min_data = []
+ uc_max_data = []
+ uc_avg_data = []
+ total_err_data = []
+
+ final_eid_data, mac_data, channel_data, signal_data, ssid_data, tx_rate_data, device_names, device_type_data = self.rb_obj_dict[ce][obj_name]["obj"].extract_device_data('{}/real_time_data.csv'.format(self.rb_obj_dict[ce][obj_name]["obj"].report_path_date_time))
+
+ test_setup_info = self.rb_obj_dict[ce][obj_name]["obj"].generate_test_setup_info()
+ self.overall_report.test_setup_table(
+ test_setup_data=test_setup_info, value='Test Parameters')
+ self.rb_obj_dict[ce][obj_name]["obj"].csv_file_names
+ for i in range(0, len(self.rb_obj_dict[ce][obj_name]["obj"].csv_file_names)):
+ if self.rb_obj_dict[ce][obj_name]["obj"].csv_file_names[i].startswith("real_time_data.csv"):
+ continue
+
+ final_eid_data, mac_data, channel_data, signal_data, ssid_data, tx_rate_data, device_names, device_type_data = self.rb_obj_dict[ce][obj_name]["obj"].extract_device_data("{}/{}".format(self.rb_obj_dict[ce][obj_name]["obj"].report_path_date_time,self.rb_obj_dict[ce][obj_name]["obj"].csv_file_names[i]))
+ self.overall_report.set_graph_title("Successful URL's per Device")
+ self.overall_report.build_graph_title()
+
+ data = pd.read_csv("{}/{}".format(self.rb_obj_dict[ce][obj_name]["obj"].report_path_date_time,self.rb_obj_dict[ce][obj_name]["obj"].csv_file_names[i]))
+
+ # Extract device names from CSV
+ if 'total_urls' in data.columns:
+ total_urls = data['total_urls'].tolist()
+ else:
+ raise ValueError("The 'total_urls' column was not found in the CSV file.")
+
+ x_fig_size = 18
+ y_fig_size = len(device_type_data) * 1 + 4
+ print('DEVICE NAMES',device_names)
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=[total_urls],
+ _xaxis_name="URL",
+ _yaxis_name="Devices",
+ _yaxis_label=device_names,
+ _yaxis_categories=device_names,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="URLs",
+ _graph_image_name=f"{self.rb_obj_dict[ce][obj_name]['obj'].csv_file_names[i]}_urls_per_device{obj_no}",
+ _label=["URLs"]
+ )
+ # print('yaxssss)
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_graph_title(f"Time Taken Vs Device For Completing {self.rb_obj_dict[ce][obj_name]['obj'].count} RealTime URLs")
+ self.overall_report.build_graph_title()
+
+ # Extract device names from CSV
+ if 'time_to_target_urls' in data.columns:
+ time_to_target_urls = data['time_to_target_urls'].tolist()
+ else:
+ raise ValueError("The 'time_to_target_urls' column was not found in the CSV file.")
+
+ x_fig_size = 18
+ y_fig_size = len(device_type_data) * 1 + 4
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=[time_to_target_urls],
+ _xaxis_name="Time (in Seconds)",
+ _yaxis_name="Devices",
+ _yaxis_label=device_names,
+ _yaxis_categories=device_names,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="Time Taken",
+ _graph_image_name=f"{self.rb_obj_dict[ce][obj_name]['obj'].csv_file_names[i]}_time_taken_for_urls{obj_no}",
+ _label=["Time (in sec)"]
+ )
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ if 'uc_min' in data.columns:
+ uc_min_data = data['uc_min'].tolist()
+ else:
+ raise ValueError("The 'uc_min' column was not found in the CSV file.")
+
+ if 'uc_max' in data.columns:
+ uc_max_data = data['uc_max'].tolist()
+ else:
+ raise ValueError("The 'uc_max' column was not found in the CSV file.")
+
+ if 'uc_avg' in data.columns:
+ uc_avg_data = data['uc_avg'].tolist()
+ else:
+ raise ValueError("The 'uc_avg' column was not found in the CSV file.")
+
+ if 'total_err' in data.columns:
+ total_err_data = data['total_err'].tolist()
+ else:
+ raise ValueError("The 'total_err' column was not found in the CSV file.")
+
+ self.overall_report.set_table_title("Final Test Results")
+ self.overall_report.build_table_title()
+ if self.rb_obj_dict[ce][obj_name]["obj"].expected_passfail_value or self.rb_obj_dict[ce][obj_name]["obj"].device_csv_name:
+ pass_fail_list, test_input_list = self.rb_obj_dict[ce][obj_name]["obj"].generate_pass_fail_list(device_type_data, device_names, total_urls)
+
+ final_test_results = {
+
+ "Device Type": device_type_data,
+ "Hostname": device_names,
+ "SSID": ssid_data,
+ "MAC": mac_data,
+ "Channel": channel_data,
+ "UC-MIN (ms)": uc_min_data,
+ "UC-MAX (ms)": uc_max_data,
+ "UC-AVG (ms)": uc_avg_data,
+ "Total Successful URLs": total_urls,
+ "Expected URLS": test_input_list,
+ "Total Erros": total_err_data,
+ "RSSI": signal_data,
+ "Link Speed": tx_rate_data,
+ "Status ": pass_fail_list
+
+ }
+ else:
+ final_test_results = {
+
+ "Device Type": device_type_data,
+ "Hostname": device_names,
+ "SSID": ssid_data,
+ "MAC": mac_data,
+ "Channel": channel_data,
+ "UC-MIN (ms)": uc_min_data,
+ "UC-MAX (ms)": uc_max_data,
+ "UC-AVG (ms)": uc_avg_data,
+ "Total Successful URLs": total_urls,
+ "Total Erros": total_err_data,
+ "RSSI": signal_data,
+ "Link Speed": tx_rate_data,
+
+ }
+ logger.info(f"dataframe realbrowser {final_test_results}")
+ test_results_df = pd.DataFrame(final_test_results)
+ self.overall_report.set_table_dataframe(test_results_df)
+ self.overall_report.build_table()
+
+ if self.rb_obj_dict[ce][obj_name]["obj"].dowebgui:
+
+ os.chdir(self.rb_obj_dict[ce][obj_name]["obj"].original_dir)
+
+ self.overall_report.build_custom()
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"rb_test_{obj_no}"
+ else:
+ break
+
+ elif test_name == "yt_test":
+ obj_no=1
+ obj_name = "yt_test"
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.yt_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ result_data = self.yt_obj_dict[ce][obj_name]["obj"].stats_api_response
+ for device, stats in result_data.items():
+ self.yt_obj_dict[ce][obj_name]["obj"].mydatajson.setdefault(device, {}).update({
+ "Viewport": stats.get("Viewport", ""),
+ "DroppedFrames": stats.get("DroppedFrames", "0"),
+ "TotalFrames": stats.get("TotalFrames", "0"),
+ "CurrentRes": stats.get("CurrentRes", ""),
+ "OptimalRes": stats.get("OptimalRes", ""),
+ "BufferHealth": stats.get("BufferHealth", "0.0"),
+ "Timestamp": stats.get("Timestamp", ""),
+ })
+
+ if self.yt_obj_dict[ce][obj_name]["obj"].config:
+
+ # Test setup info
+ test_setup_info = {
+ 'Test Name': 'YouTube Streaming Test',
+ 'Duration (in Minutes)': self.yt_obj_dict[ce][obj_name]["obj"].duration,
+ 'Resolution': self.yt_obj_dict[ce][obj_name]["obj"].resolution,
+ 'Configured Devices': self.yt_obj_dict[ce][obj_name]["obj"].hostname_os_combination,
+ 'No of Devices :': f' Total({len(self.yt_obj_dict[ce][obj_name]["obj"].real_sta_os_types)}) : W({self.yt_obj_dict[ce][obj_name]["obj"].windows}),L({self.yt_obj_dict[ce][obj_name]["obj"].linux}),M({self.yt_obj_dict[ce][obj_name]["obj"].mac})',
+ "Video URL": self.yt_obj_dict[ce][obj_name]["obj"].url,
+ "SSID": self.yt_obj_dict[ce][obj_name]["obj"].ssid,
+ "Security": self.yt_obj_dict[ce][obj_name]["obj"].security,
+
+ }
+
+ elif len(self.yt_obj_dict[ce][obj_name]["obj"].selected_groups) > 0 and len(self.yt_obj_dict[ce][obj_name]["obj"].selected_profiles) > 0:
+ gp_pairs = zip(self.yt_obj_dict[ce][obj_name]["obj"].selected_groups, self.yt_obj_dict[ce][obj_name]["obj"].selected_profiles)
+ gp_map = ", ".join(f"{group} -> {profile}" for group, profile in gp_pairs)
+
+ # Test setup info
+ test_setup_info = {
+ 'Test Name': 'YouTube Streaming Test',
+ 'Duration (in Minutes)': self.yt_obj_dict[ce][obj_name]["obj"].duration,
+ 'Resolution': self.yt_obj_dict[ce][obj_name]["obj"].resolution,
+ "Configuration": gp_map,
+ 'Configured Devices': self.yt_obj_dict[ce][obj_name]["obj"].hostname_os_combination,
+ 'No of Devices :': f' Total({len(self.yt_obj_dict[ce][obj_name]["obj"].real_sta_os_types)}) : W({self.yt_obj_dict[ce][obj_name]["obj"].windows}),L({self.yt_obj_dict[ce][obj_name]["obj"].linux}),M({self.yt_obj_dict[ce][obj_name]["obj"].mac})',
+ "Video URL": self.yt_obj_dict[ce][obj_name]["obj"].url,
+
+ }
+ else:
+ # Test setup info
+ test_setup_info = {
+ 'Test Name': 'YouTube Streaming Test',
+ 'Duration (in Minutes)': self.yt_obj_dict[ce][obj_name]["obj"].duration,
+ 'Resolution': self.yt_obj_dict[ce][obj_name]["obj"].resolution,
+ 'Configured Devices': self.yt_obj_dict[ce][obj_name]["obj"].hostname_os_combination,
+ 'No of Devices :': f' Total({len(self.yt_obj_dict[ce][obj_name]["obj"].real_sta_os_types)}) : W({self.yt_obj_dict[ce][obj_name]["obj"].windows}),L({self.yt_obj_dict[ce][obj_name]["obj"].linux}),M({self.yt_obj_dict[ce][obj_name]["obj"].mac})',
+ "Video URL": self.yt_obj_dict[ce][obj_name]["obj"].url,
+
+ }
+ self.overall_report.set_obj_html(_obj_title=f'Youtube Streaming Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ self.overall_report.test_setup_table(
+ test_setup_data=test_setup_info, value='Test Parameters')
+
+ viewport_list = []
+ current_res_list = []
+ optimal_res_list = []
+
+ dropped_frames_list = []
+ total_frames_list = []
+ max_buffer_health_list = []
+ min_buffer_health_list = []
+
+ for hostname in self.yt_obj_dict[ce][obj_name]["obj"].real_sta_hostname:
+ if hostname in self.yt_obj_dict[ce][obj_name]["obj"].mydatajson:
+ stats = self.yt_obj_dict[ce][obj_name]["obj"].mydatajson[hostname]
+ viewport_list.append(stats.get("Viewport", ""))
+ current_res_list.append(stats.get("CurrentRes", ""))
+ optimal_res_list.append(stats.get("OptimalRes", ""))
+
+ dropped_frames = stats.get("DroppedFrames", "0")
+ total_frames = stats.get("TotalFrames", "0")
+ max_buffer_health = stats.get("maxbufferhealth", "0,0")
+ min_buffer_health = stats.get("minbufferhealth", "0.0")
+ try:
+ dropped_frames_list.append(int(dropped_frames))
+ except ValueError:
+ dropped_frames_list.append(0)
+
+ try:
+ total_frames_list.append(int(total_frames))
+ except ValueError:
+ total_frames_list.append(0)
+ try:
+ max_buffer_health_list.append(float(max_buffer_health))
+ except ValueError:
+ max_buffer_health_list.append(0.0)
+
+ try:
+ min_buffer_health_list.append(float(min_buffer_health))
+ except ValueError:
+ min_buffer_health_list.append(0.0)
+
+ else:
+ viewport_list.append("NA")
+ current_res_list.append("NA")
+ optimal_res_list.append("NA")
+ dropped_frames_list.append(0)
+ total_frames_list.append(0)
+ max_buffer_health_list.append(0.0)
+ min_buffer_health_list.append(0.0)
+
+ # graph of frames dropped
+ self.overall_report.set_graph_title("Total Frames vs Frames dropped")
+ self.overall_report.build_graph_title()
+ x_fig_size = 25
+ y_fig_size = len(self.yt_obj_dict[ce][obj_name]["obj"].device_names) * .5 + 4
+
+ graph = lf_bar_graph_horizontal(_data_set=[dropped_frames_list, total_frames_list],
+ _xaxis_name="No of Frames",
+ _yaxis_name="Devices",
+ _yaxis_categories=self.yt_obj_dict[ce][obj_name]["obj"].real_sta_hostname,
+ _graph_image_name=f"Dropped Frames vs Total Frames{obj_no}",
+ _label=["dropped Frames", "Total Frames"],
+ _color=None,
+ _color_edge='red',
+ _figsize=(x_fig_size, y_fig_size),
+ _show_bar_value=True,
+ _text_font=6,
+ _text_rotation=True,
+ _enable_csv=True,
+ _legend_loc="upper right",
+ _legend_box=(1.1, 1),
+ )
+ graph_image = graph.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_table_title('Test Results')
+ self.overall_report.build_table_title()
+
+ test_results = {
+ "Hostname": self.yt_obj_dict[ce][obj_name]["obj"].real_sta_hostname,
+ "OS Type": self.yt_obj_dict[ce][obj_name]["obj"].real_sta_os_types,
+ "MAC": self.yt_obj_dict[ce][obj_name]["obj"].mac_list,
+ "RSSI": self.yt_obj_dict[ce][obj_name]["obj"].rssi_list,
+ "Link Rate": self.yt_obj_dict[ce][obj_name]["obj"].link_rate_list,
+ "ViewPort": viewport_list,
+ "SSID": self.yt_obj_dict[ce][obj_name]["obj"].ssid_list,
+ "Video Resoultion": current_res_list,
+ "Max Buffer Health (Seconds)": max_buffer_health_list,
+ "Min Buffer health (Seconds)": min_buffer_health_list,
+ "Total Frames": total_frames_list,
+ "Dropped Frames": dropped_frames_list,
+
+
+ }
+
+ test_results_df = pd.DataFrame(test_results)
+ self.overall_report.set_table_dataframe(test_results_df)
+ self.overall_report.build_table()
+
+ # for file_path in self.yt_obj_dict[ce][obj_name]["obj"].devices_list:
+ # self.yt_obj_dict[ce][obj_name]["obj"].move_files(file_path, self.yt_obj_dict[ce][obj_name]["obj"].report_path_date_time)
+
+ original_dir = os.getcwd()
+
+ if self.yt_obj_dict[ce][obj_name]["obj"].do_webUI:
+ csv_files = [f for f in os.listdir(self.yt_obj_dict[ce][obj_name]["obj"].report_path_date_time) if f.endswith('.csv')]
+ os.chdir(self.yt_obj_dict[ce][obj_name]["obj"].report_path_date_time)
+ else:
+ csv_files = [f for f in os.listdir(self.yt_obj_dict[ce][obj_name]["obj"].report_path_date_time) if f.endswith('.csv')]
+ os.chdir(self.yt_obj_dict[ce][obj_name]["obj"].report_path_date_time)
+ print("CSV FILES",csv_files)
+ print("Script Directory:", os.path.dirname(os.path.abspath(__file__)))
+ scp_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),self.report_path_date_time)
+ for file_name in csv_files:
+ data = pd.read_csv(file_name)
+ print('dataaaaaaaaaaaaa',data)
+ self.overall_report.set_graph_title('Buffer Health vs Time Graph for {}'.format(file_name.split('_')[0]))
+ self.overall_report.build_graph_title()
+
+ try:
+ data['TimeStamp'] = pd.to_datetime(data['TimeStamp'], format="%H:%M:%S").dt.time
+ except Exception as e:
+ logging.error(f"Error in timestamp conversion for {file_name}: {e}")
+ continue
+
+ data = data.drop_duplicates(subset='TimeStamp', keep='first')
+
+ data = data.sort_values(by='TimeStamp')
+
+ timestamps = data['TimeStamp'].apply(lambda t: t.strftime('%H:%M:%S'))
+ buffer_health = data['BufferHealth']
+
+ fig, ax = plt.subplots(figsize=(20, 10))
+ plt.plot(timestamps, buffer_health, color='blue', linewidth=2)
+
+ # Customize the plot
+ plt.xlabel('Time', fontweight='bold', fontsize=15)
+ plt.ylabel('Buffer Health', fontweight='bold', fontsize=15)
+ plt.title('Buffer Health vs Time Graph for {}'.format(file_name.split('_')[0]), fontsize=18)
+
+ if len(timestamps) > 30:
+ tick_interval = len(timestamps) // 30
+ selected_ticks = timestamps[::tick_interval]
+ ax.set_xticks(selected_ticks)
+ else:
+ ax.set_xticks(timestamps)
+
+ plt.xticks(rotation=45, ha='right')
+
+ # output_file = '{}'.format(file_name.split('_')[0]) + 'buffer_health_vs_time.png'
+ output_file = os.path.join(scp_path,f"{file_name.split('_')[0]}buffer_health_vs_time.png{obj_no}")
+ plt.tight_layout()
+ plt.savefig(output_file, dpi=96)
+ plt.close()
+ abs_path = os.path.abspath(output_file)
+ logging.info(f"Graph saved PATH {file_name}: {abs_path}")
+
+ logging.info(f"Graph saved for {file_name}: {output_file}")
+
+ self.overall_report.set_graph_image(output_file)
+
+ self.overall_report.build_graph()
+
+ os.chdir(original_dir)
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"yt_test_{obj_no}"
+ else:
+ break
+
+ elif test_name == "zoom_test":
+ obj_no=1
+ obj_name = "zoom_test"
+ if ce == "series":
+ obj_name += "_1"
+ while obj_name in self.zoom_obj_dict[ce]:
+ if ce == "parallel":
+ obj_no = ''
+ self.overall_report.set_obj_html(_obj_title=f'ZOOM Test {obj_no}', _obj="")
+ self.overall_report.build_objective()
+ self.overall_report.set_table_title("Test Parameters:")
+ self.overall_report.build_table_title()
+ testtype = ""
+ if self.zoom_obj_dict[ce][obj_name]["obj"].audio and self.zoom_obj_dict[ce][obj_name]["obj"].video:
+ testtype = "AUDIO & VIDEO"
+ elif self.zoom_obj_dict[ce][obj_name]["obj"].audio:
+ testtype = "AUDIO"
+ elif self.zoom_obj_dict[ce][obj_name]["obj"].video:
+ testtype = "VIDEO"
+
+ if self.zoom_obj_dict[ce][obj_name]["obj"].config:
+ test_parameters = pd.DataFrame([{
+ "Configured Devices": self.zoom_obj_dict[ce][obj_name]["obj"].hostname_os_combination,
+ 'No of Clients': f'W({self.zoom_obj_dict[ce][obj_name]["obj"].windows}),L({self.zoom_obj_dict[ce][obj_name]["obj"].linux}),M({self.zoom_obj_dict[ce][obj_name]["obj"].mac})',
+ 'Test Duration(min)': self.zoom_obj_dict[ce][obj_name]["obj"].duration,
+ 'EMAIL ID': self.zoom_obj_dict[ce][obj_name]["obj"].signin_email,
+ "PASSWORD": self.zoom_obj_dict[ce][obj_name]["obj"].signin_passwd,
+ "HOST": self.zoom_obj_dict[ce][obj_name]["obj"].real_sta_list[0],
+ "TEST TYPE": testtype,
+ "SSID": self.zoom_obj_dict[ce][obj_name]["obj"].ssid,
+ "Security": self.zoom_obj_dict[ce][obj_name]["obj"].security
+
+ }])
+ elif len(self.zoom_obj_dict[ce][obj_name]["obj"].selected_groups) > 0 and len(self.zoom_obj_dict[ce][obj_name]["obj"].selected_profiles) > 0:
+ # Map each group with a profile
+ gp_pairs = zip(self.zoom_obj_dict[ce][obj_name]["obj"].selected_groups, self.zoom_obj_dict[ce][obj_name]["obj"].selected_profiles)
+
+ # Create a string by joining the mapped pairs
+ gp_map = ", ".join(f"{group} -> {profile}" for group, profile in gp_pairs)
+
+ test_parameters = pd.DataFrame([{
+ "Configuration": gp_map,
+ "Configured Devices": self.zoom_obj_dict[ce][obj_name]["obj"].hostname_os_combination,
+ 'No of Clients': f'W({self.zoom_obj_dict[ce][obj_name]["obj"].windows}),L({self.zoom_obj_dict[ce][obj_name]["obj"].linux}),M({self.zoom_obj_dict[ce][obj_name]["obj"].mac})',
+ 'Test Duration(min)': self.zoom_obj_dict[ce][obj_name]["obj"].duration,
+ 'EMAIL ID': self.zoom_obj_dict[ce][obj_name]["obj"].signin_email,
+ "PASSWORD": self.zoom_obj_dict[ce][obj_name]["obj"].signin_passwd,
+ "HOST": self.zoom_obj_dict[ce][obj_name]["obj"].real_sta_list[0],
+ "TEST TYPE": testtype,
+
+ }])
+ else:
+
+ test_parameters = pd.DataFrame([{
+ "Configured Devices": self.zoom_obj_dict[ce][obj_name]["obj"].hostname_os_combination,
+ 'No of Clients': f'W({self.zoom_obj_dict[ce][obj_name]["obj"].windows}),L({self.zoom_obj_dict[ce][obj_name]["obj"].linux}),M({self.zoom_obj_dict[ce][obj_name]["obj"].mac})',
+ 'Test Duration(min)': self.zoom_obj_dict[ce][obj_name]["obj"].duration,
+ 'EMAIL ID': self.zoom_obj_dict[ce][obj_name]["obj"].signin_email,
+ "PASSWORD": self.zoom_obj_dict[ce][obj_name]["obj"].signin_passwd,
+ "HOST": self.zoom_obj_dict[ce][obj_name]["obj"].real_sta_list[0],
+ "TEST TYPE": testtype,
+
+ }])
+
+ test_parameters = pd.DataFrame([{
+
+ 'No of Clients': f'W({self.zoom_obj_dict[ce][obj_name]["obj"].windows}),L({self.zoom_obj_dict[ce][obj_name]["obj"].linux}),M({self.zoom_obj_dict[ce][obj_name]["obj"].mac})',
+ 'Test Duration(min)': self.zoom_obj_dict[ce][obj_name]["obj"].duration,
+ 'EMAIL ID': self.zoom_obj_dict[ce][obj_name]["obj"].signin_email,
+ "PASSWORD": self.zoom_obj_dict[ce][obj_name]["obj"].signin_passwd,
+ "HOST": self.zoom_obj_dict[ce][obj_name]["obj"].real_sta_list[0],
+ "TEST TYPE": testtype
+
+ }])
+ self.overall_report.set_table_dataframe(test_parameters)
+ self.overall_report.build_table()
+
+ client_array = []
+ accepted_clients = []
+ no_csv_client = []
+ rejected_clients = []
+ final_dataset = []
+ accepted_ostypes = []
+ max_audio_jitter_s, min_audio_jitter_s = [], []
+ max_audio_jitter_r, min_audio_jitter_r = [], []
+ max_audio_latency_s, min_audio_latency_s = [], []
+ max_audio_latency_r, min_audio_latency_r = [], []
+ max_audio_pktloss_s, min_audio_pktloss_s = [], []
+ max_audio_pktloss_r, min_audio_pktloss_r = [], []
+
+ max_video_jitter_s, min_video_jitter_s = [], []
+ max_video_jitter_r, min_video_jitter_r = [], []
+ max_video_latency_s, min_video_latency_s = [], []
+ max_video_latency_r, min_video_latency_r = [], []
+ max_video_pktloss_s, min_video_pktloss_s = [], []
+ max_video_pktloss_r, min_video_pktloss_r = [], []
+ for i in range(0, len(self.zoom_obj_dict[ce][obj_name]["obj"].device_names)):
+ temp_max_audio_jitter_s, temp_min_audio_jitter_s = 0.0, 0.0
+ temp_max_audio_jitter_r, temp_min_audio_jitter_r = 0.0, 0.0
+ temp_max_audio_latency_s, temp_min_audio_latency_s = 0.0, 0.0
+ temp_max_audio_latency_r, temp_min_audio_latency_r = 0.0, 0.0
+ temp_max_audio_pktloss_s, temp_min_audio_pktloss_s = 0.0, 0.0
+ temp_max_audio_pktloss_r, temp_min_audio_pktloss_r = 0.0, 0.0
+
+ temp_max_video_jitter_s, temp_min_video_jitter_s = 0.0, 0.0
+ temp_max_video_jitter_r, temp_min_video_jitter_r = 0.0, 0.0
+ temp_max_video_latency_s, temp_min_video_latency_s = 0.0, 0.0
+ temp_max_video_latency_r, temp_min_video_latency_r = 0.0, 0.0
+ temp_max_video_pktloss_s, temp_min_video_pktloss_s = 0.0, 0.0
+ temp_max_video_pktloss_r, temp_min_video_pktloss_r = 0.0, 0.0
+ per_client_data = {
+ "audio_jitter_s": [],
+ "audio_jitter_r": [],
+ "audio_latency_s": [],
+ "audio_latency_r": [],
+ "audio_pktloss_s": [],
+ "audio_pktloss_r": [],
+ "video_jitter_s": [],
+ "video_jitter_r": [],
+ "video_latency_s": [],
+ "video_latency_r": [],
+ "video_pktloss_s": [],
+ "video_pktloss_r": [],
+ }
+ try:
+ file_path = os.path.join(self.zoom_obj_dict[ce][obj_name]["obj"].report_path_date_time, f'{self.zoom_obj_dict[ce][obj_name]["obj"].device_names[i]}.csv')
+ with open(file_path, mode='r', encoding='utf-8', errors='ignore') as file:
+ csv_reader = csv.DictReader(file)
+ for row in csv_reader:
+
+ per_client_data["audio_jitter_s"].append(float(row["Sent Audio Jitter (ms)"]))
+ per_client_data["audio_jitter_r"].append(float(row["Receive Audio Jitter (ms)"]))
+ per_client_data["audio_latency_s"].append(float(row["Sent Audio Latency (ms)"]))
+ per_client_data["audio_latency_r"].append(float(row["Receive Audio Latency (ms)"]))
+ per_client_data["audio_pktloss_s"].append(float((row["Sent Audio Packet loss (%)"]).split(" ")[0].replace("%", "")))
+ per_client_data["audio_pktloss_r"].append(float((row["Receive Audio Packet loss (%)"]).split(" ")[0].replace("%", "")))
+ per_client_data["video_jitter_s"].append(float(row["Sent Video Jitter (ms)"]))
+ per_client_data["video_jitter_r"].append(float(row["Receive Video Jitter (ms)"]))
+ per_client_data["video_latency_s"].append(float(row["Sent Video Latency (ms)"]))
+ per_client_data["video_latency_r"].append(float(row["Receive Video Latency (ms)"]))
+ per_client_data["video_pktloss_s"].append(float((row["Sent Video Packet loss (%)"]).split(" ")[0].replace("%", "")))
+ per_client_data["video_pktloss_r"].append(float((row["Receive Video Packet loss (%)"]).split(" ")[0].replace("%", "")))
+
+ temp_max_audio_jitter_s = max(temp_max_audio_jitter_s, float(row["Sent Audio Jitter (ms)"]))
+ temp_max_audio_jitter_r = max(temp_max_audio_jitter_r, float(row["Receive Audio Jitter (ms)"]))
+ temp_max_audio_latency_s = max(temp_max_audio_latency_s, float(row["Sent Audio Latency (ms)"]))
+ temp_max_audio_latency_r = max(temp_max_audio_latency_r, float(row["Receive Audio Latency (ms)"]))
+ temp_max_audio_pktloss_s = max(temp_max_audio_pktloss_s, float((row["Sent Audio Packet loss (%)"]).split(" ")[0].replace("%", "")))
+ temp_max_audio_pktloss_r = max(temp_max_audio_pktloss_r, float((row["Receive Audio Packet loss (%)"]).split(" ")[0].replace("%", "")))
+
+ temp_max_video_jitter_s = max(temp_max_video_jitter_s, float(row["Sent Video Jitter (ms)"]))
+ temp_max_video_jitter_r = max(temp_max_video_jitter_r, float(row["Receive Video Jitter (ms)"]))
+ temp_max_video_latency_s = max(temp_max_video_latency_s, float(row["Sent Video Latency (ms)"]))
+ temp_max_video_latency_r = max(temp_max_video_latency_r, float(row["Receive Video Latency (ms)"]))
+ temp_max_video_pktloss_s = max(temp_max_video_pktloss_s, float((row["Sent Video Packet loss (%)"]).split(" ")[0].replace("%", "")))
+ temp_max_video_pktloss_r = max(temp_max_video_pktloss_r, float((row["Receive Video Packet loss (%)"]).split(" ")[0].replace("%", "")))
+
+ temp_min_audio_jitter_s = min(
+ temp_min_audio_jitter_s,
+ float(
+ row["Sent Audio Jitter (ms)"])) if temp_min_audio_jitter_s > 0 and float(
+ row["Sent Audio Jitter (ms)"]) > 0 else (
+ float(
+ row["Sent Audio Jitter (ms)"]) if float(
+ row["Sent Audio Jitter (ms)"]) > 0 else temp_min_audio_jitter_s)
+ temp_min_audio_jitter_r = min(
+ temp_min_audio_jitter_r, float(
+ row["Receive Audio Jitter (ms)"])) if temp_min_audio_jitter_r > 0 and float(
+ row["Receive Audio Jitter (ms)"]) > 0 else (
+ float(
+ row["Receive Audio Jitter (ms)"]) if float(
+ row["Receive Audio Jitter (ms)"]) > 0 else temp_min_audio_jitter_r)
+ temp_min_audio_latency_s = min(
+ temp_min_audio_latency_s, float(
+ row["Sent Audio Latency (ms)"])) if temp_min_audio_latency_s > 0 and float(
+ row["Sent Audio Latency (ms)"]) > 0 else (
+ float(
+ row["Sent Audio Latency (ms)"]) if float(
+ row["Sent Audio Latency (ms)"]) > 0 else temp_min_audio_jitter_s)
+ temp_min_audio_latency_r = min(
+ temp_min_audio_latency_r, float(
+ row["Receive Audio Latency (ms)"])) if temp_min_audio_latency_r > 0 and float(
+ row["Receive Audio Latency (ms)"]) > 0 else (
+ float(
+ row["Receive Audio Latency (ms)"]) if float(
+ row["Receive Audio Latency (ms)"]) > 0 else temp_min_audio_jitter_r)
+
+ temp_min_audio_pktloss_s = min(
+ temp_min_audio_pktloss_s, float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", ""))) if temp_min_audio_pktloss_s > 0 and float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else (
+ float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) if float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else temp_min_audio_pktloss_s)
+ temp_min_audio_pktloss_r = min(
+ temp_min_audio_pktloss_r, float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", ""))) if temp_min_audio_pktloss_r > 0 and float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else (
+ float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) if float(
+ (row["Sent Audio Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else temp_min_audio_pktloss_r)
+
+ temp_min_video_jitter_s = min(
+ temp_min_video_jitter_s,
+ float(
+ row["Sent Video Jitter (ms)"])) if temp_min_video_jitter_s > 0 and float(
+ row["Sent Video Jitter (ms)"]) > 0 else (
+ float(
+ row["Sent Video Jitter (ms)"]) if float(
+ row["Sent Video Jitter (ms)"]) > 0 else temp_min_video_jitter_s)
+ temp_min_video_jitter_r = min(
+ temp_min_video_jitter_r, float(
+ row["Receive Video Jitter (ms)"])) if temp_min_video_jitter_r > 0 and float(
+ row["Receive Video Jitter (ms)"]) > 0 else (
+ float(
+ row["Receive Video Jitter (ms)"]) if float(
+ row["Receive Video Jitter (ms)"]) > 0 else temp_min_video_jitter_r)
+ temp_min_video_latency_s = min(
+ temp_min_video_latency_s, float(
+ row["Sent Video Latency (ms)"])) if temp_min_video_latency_s > 0 and float(
+ row["Sent Video Latency (ms)"]) > 0 else (
+ float(
+ row["Sent Video Latency (ms)"]) if float(
+ row["Sent Video Latency (ms)"]) > 0 else temp_min_video_latency_s)
+ temp_min_video_latency_r = min(
+ temp_min_video_latency_r, float(
+ row["Receive Video Latency (ms)"])) if temp_min_video_latency_r > 0 and float(
+ row["Receive Video Latency (ms)"]) > 0 else (
+ float(
+ row["Receive Video Latency (ms)"]) if float(
+ row["Receive Video Latency (ms)"]) > 0 else temp_min_video_latency_r)
+
+ temp_min_video_pktloss_s = min(
+ temp_min_video_pktloss_s, float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", ""))) if temp_min_video_pktloss_s > 0 and float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else (
+ float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) if float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else temp_min_video_pktloss_s)
+ temp_min_video_pktloss_r = min(
+ temp_min_video_pktloss_r, float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", ""))) if temp_min_video_pktloss_r > 0 and float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else (
+ float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) if float(
+ (row["Sent Video Packet loss (%)"]).split(" ")[0].replace(
+ "%", "")) > 0 else temp_min_video_pktloss_r)
+
+ except Exception as e:
+ logging.error(f"Error in reading data in client {self.zoom_obj_dict[ce][obj_name]['obj'].device_names[i]}", e)
+ no_csv_client.append(self.zoom_obj_dict[ce][obj_name]["obj"].device_names[i])
+ rejected_clients.append(self.zoom_obj_dict[ce][obj_name]["obj"].device_names[i])
+ if self.zoom_obj_dict[ce][obj_name]["obj"].device_names[i] not in no_csv_client:
+ client_array.append(self.zoom_obj_dict[ce][obj_name]["obj"].device_names[i])
+ accepted_clients.append(self.zoom_obj_dict[ce][obj_name]["obj"].device_names[i])
+ accepted_ostypes.append(self.zoom_obj_dict[ce][obj_name]["obj"].real_sta_os_type[i])
+ max_audio_jitter_s.append(temp_max_audio_jitter_s)
+ min_audio_jitter_s.append(temp_min_audio_jitter_s)
+ max_audio_jitter_r.append(temp_max_audio_jitter_r)
+ min_audio_jitter_r.append(temp_min_audio_jitter_r)
+ max_audio_latency_s.append(temp_max_audio_latency_s)
+ min_audio_latency_s.append(temp_min_audio_latency_s)
+ max_audio_latency_r.append(temp_max_audio_latency_r)
+ min_audio_latency_r.append(temp_min_audio_latency_r)
+ max_video_jitter_s.append(temp_max_video_jitter_s)
+ min_video_jitter_s.append(temp_min_video_jitter_s)
+ max_video_jitter_r.append(temp_max_video_jitter_r)
+ min_video_jitter_r.append(temp_min_video_jitter_r)
+ max_video_latency_s.append(temp_max_video_latency_s)
+ min_video_latency_s.append(temp_min_video_latency_s)
+ max_video_latency_r.append(temp_max_video_latency_r)
+ min_video_latency_r.append(temp_min_video_latency_r)
+
+ max_audio_pktloss_s.append(temp_max_audio_pktloss_s)
+ min_audio_pktloss_s.append(temp_min_audio_pktloss_s)
+ max_audio_pktloss_r.append(temp_max_audio_pktloss_r)
+ min_audio_pktloss_r.append(temp_min_audio_pktloss_r)
+ max_video_pktloss_s.append(temp_max_video_pktloss_s)
+ min_video_pktloss_s.append(temp_min_video_pktloss_s)
+ max_video_pktloss_r.append(temp_max_video_pktloss_r)
+ min_video_pktloss_r.append(temp_min_video_pktloss_r)
+
+ final_dataset.append(per_client_data.copy())
+
+ self.overall_report.set_table_title("Test Devices:")
+ self.overall_report.build_table_title()
+
+ device_details = pd.DataFrame({
+ 'Hostname': self.zoom_obj_dict[ce][obj_name]["obj"].real_sta_hostname,
+ 'OS Type': self.zoom_obj_dict[ce][obj_name]["obj"].real_sta_os_type,
+ "MAC": self.zoom_obj_dict[ce][obj_name]["obj"].mac_list,
+ "RSSI": self.zoom_obj_dict[ce][obj_name]["obj"].rssi_list,
+ "Link Rate": self.zoom_obj_dict[ce][obj_name]["obj"].link_rate_list,
+ "SSID": self.zoom_obj_dict[ce][obj_name]["obj"].ssid_list,
+
+ })
+ self.overall_report.set_table_dataframe(device_details)
+ self.overall_report.build_table()
+
+ if self.zoom_obj_dict[ce][obj_name]["obj"].audio:
+ self.overall_report.set_graph_title("Audio Latency (Sent/Received)")
+ self.overall_report.build_graph_title()
+ x_data_set = [max_audio_latency_s.copy(), min_audio_latency_s.copy(), max_audio_latency_r.copy(), min_audio_latency_r.copy()]
+ y_data_set = client_array
+
+ x_fig_size = 18
+ y_fig_size = len(client_array) * 1 + 4
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=x_data_set,
+ _xaxis_name="Latency (ms)",
+ _yaxis_name="Devices",
+ _yaxis_label=y_data_set,
+ _yaxis_categories=y_data_set,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _color_name=["yellow", "blue", "orange", "grey"],
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="Audio Latency(sent/received)",
+ _graph_image_name=f"Audio Latency(sent and received){obj_no}",
+ _label=["Max Sent", "Min Sent", "Max Recv", "Min Recv"]
+ )
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_graph_title("Audio Jitter (Sent/Received)")
+ self.overall_report.build_graph_title()
+ x_data_set = [max_audio_jitter_s.copy(), min_audio_jitter_s.copy(), max_audio_jitter_r.copy(), min_audio_jitter_r.copy()]
+ y_data_set = client_array
+
+ x_fig_size = 18
+ y_fig_size = len(client_array) * 1 + 4
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=x_data_set,
+ _xaxis_name="Jitter (ms)",
+ _yaxis_name="Devices",
+ _yaxis_label=y_data_set,
+ _yaxis_categories=y_data_set,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _color_name=["yellow", "blue", "orange", "grey"],
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="Audio Jitter(sent/received)",
+ _graph_image_name=f"Audio Jitter(sent and received) {obj_no}",
+ _label=["Max Sent", "Min Sent", "Max Recv", "Min Recv"]
+ )
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_graph_title("Audio Packet Loss (Sent/Received)")
+ self.overall_report.build_graph_title()
+ x_data_set = [max_audio_pktloss_s.copy(), min_audio_pktloss_s.copy(), max_audio_pktloss_r.copy(), min_audio_pktloss_r.copy()]
+ y_data_set = client_array
+
+ x_fig_size = 18
+ y_fig_size = len(client_array) * 1 + 4
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=x_data_set,
+ _xaxis_name="Packet Loss (%)",
+ _yaxis_name="Devices",
+ _yaxis_label=y_data_set,
+ _yaxis_categories=y_data_set,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _color_name=["yellow", "blue", "orange", "grey"],
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="Audio Packet Loss(sent/received)",
+ _graph_image_name=f"Audio Packet Loss(sent and received){obj_no}",
+ _label=["Max Sent", "Min Sent", "Max Recv", "Min Recv"]
+ )
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_table_title("Test Audio Results Table:")
+ self.overall_report.build_table_title()
+ audio_test_details = pd.DataFrame({
+ 'Device Name': [client for client in accepted_clients],
+ 'Avg Latency Sent (ms)': [round(sum(data["audio_latency_s"]) / len(data["audio_latency_s"]), 2) if len(data["audio_latency_s"]) != 0 else 0 for data in final_dataset],
+ 'Avg Latency Recv (ms)': [round(sum(data["audio_latency_r"]) / len(data["audio_latency_r"]), 2) if len(data["audio_latency_r"]) != 0 else 0 for data in final_dataset],
+ 'Avg Jitter Sent (ms)': [round(sum(data["audio_jitter_s"]) / len(data["audio_jitter_s"]), 2) if len(data["audio_jitter_s"]) != 0 else 0 for data in final_dataset],
+ 'Avg Jitter Recv (ms)': [round(sum(data["audio_jitter_r"]) / len(data["audio_jitter_r"]), 2) if len(data["audio_jitter_r"]) != 0 else 0 for data in final_dataset],
+ 'Avg Pkt Loss Sent': [round(sum(data["audio_pktloss_s"]) / len(data["audio_pktloss_s"]), 2) if len(data["audio_pktloss_s"]) != 0 else 0 for data in final_dataset],
+ 'Avg Pkt Loss Recv': [round(sum(data["audio_pktloss_r"]) / len(data["audio_pktloss_r"]), 2) if len(data["audio_pktloss_r"]) != 0 else 0 for data in final_dataset],
+ 'CSV link': ['csv data'.format(client) for client in accepted_clients]
+
+ })
+ self.overall_report.set_table_dataframe(audio_test_details)
+ self.overall_report.dataframe_html = self.overall_report.dataframe.to_html(index=False,
+ justify='center', render_links=True, escape=False) # have the index be able to be passed in.
+ self.overall_report.html += self.overall_report.dataframe_html
+ if self.zoom_obj_dict[ce][obj_name]["obj"].video:
+ self.overall_report.set_graph_title("Video Latency (Sent/Received)")
+ self.overall_report.build_graph_title()
+ x_data_set = [max_video_latency_s.copy(), min_video_latency_s.copy(), max_video_latency_r.copy(), min_video_latency_r.copy()]
+ y_data_set = client_array
+ x_fig_size = 18
+ y_fig_size = len(client_array) * 1 + 4
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=x_data_set,
+ _xaxis_name="Latency (ms)",
+ _yaxis_name="Devices",
+ _yaxis_label=y_data_set,
+ _yaxis_categories=y_data_set,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _color_name=["yellow", "blue", "orange", "grey"],
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="Video Latency(sent/received)",
+ _graph_image_name=f"Video Latency(sent and received){obj_no}",
+ _label=["Max Sent", "Min Sent", "Max Recv", "Min Recv"]
+ )
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_graph_title("Video Jitter (Sent/Received)")
+ self.overall_report.build_graph_title()
+ x_data_set = [max_video_jitter_s.copy(), min_video_jitter_s.copy(), max_video_jitter_r.copy(), min_video_jitter_r.copy()]
+ y_data_set = client_array
+ x_fig_size = 18
+ y_fig_size = len(client_array) * 1 + 4
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=x_data_set,
+ _xaxis_name="Jitter (ms)",
+ _yaxis_name="Devices",
+ _yaxis_label=y_data_set,
+ _yaxis_categories=y_data_set,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _color_name=["yellow", "blue", "orange", "grey"],
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="Video Jitter(sent/received)",
+ _graph_image_name=f"Video Jitter(sent and received){obj_no}",
+ _label=["Max Sent", "Min Sent", "Max Recv", "Min Recv"]
+ )
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_graph_title("Video Packet Loss (Sent/Received)")
+ self.overall_report.build_graph_title()
+ x_data_set = [max_video_pktloss_s.copy(), min_video_pktloss_s.copy(), max_video_pktloss_r.copy(), min_video_pktloss_r.copy()]
+ y_data_set = client_array
+ x_fig_size = 18
+ y_fig_size = len(client_array) * 1 + 4
+ bar_graph_horizontal = lf_bar_graph_horizontal(
+ _data_set=x_data_set,
+ _xaxis_name="Packet Loss (%)",
+ _yaxis_name="Devices",
+ _yaxis_label=y_data_set,
+ _yaxis_categories=y_data_set,
+ _yaxis_step=1,
+ _yticks_font=8,
+ _bar_height=.20,
+ _color_name=["yellow", "blue", "orange", "grey"],
+ _show_bar_value=True,
+ _figsize=(x_fig_size, y_fig_size),
+ _graph_title="Video Packet Loss(sent/received)",
+ _graph_image_name=f"Video Packet Loss(sent and received){obj_no}",
+ _label=["Max Sent", "Min Sent", "Max Recv", "Min Recv"]
+ )
+ graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
+ self.overall_report.set_graph_image(graph_image)
+ self.overall_report.move_graph_image()
+ self.overall_report.build_graph()
+
+ self.overall_report.set_table_title("Test Video Results Table:")
+ self.overall_report.build_table_title()
+ video_test_details = pd.DataFrame({
+ 'Device Name': [client for client in accepted_clients],
+ 'Avg Latency Sent (ms)': [round(sum(data["video_latency_s"]) / len(data["video_latency_s"]), 2) if len(data["video_latency_s"]) != 0 else 0 for data in final_dataset],
+ 'Avg Latency Recv (ms)': [round(sum(data["video_latency_r"]) / len(data["video_latency_r"]), 2) if len(data["video_latency_r"]) != 0 else 0 for data in final_dataset],
+ 'Avg Jitter Sent (ms)': [round(sum(data["video_jitter_s"]) / len(data["video_jitter_s"]), 2) if len(data["video_jitter_s"]) != 0 else 0 for data in final_dataset],
+ 'Avg Jitter Recv (ms)': [round(sum(data["video_jitter_r"]) / len(data["video_jitter_r"]), 2) if len(data["video_jitter_r"]) != 0 else 0 for data in final_dataset],
+ 'Avg Pkt Loss Sent': [round(sum(data["video_pktloss_s"]) / len(data["video_pktloss_s"]), 2) if len(data["video_pktloss_s"]) != 0 else 0 for data in final_dataset],
+ 'Avg Pkt Loss Recv': [round(sum(data["video_pktloss_r"]) / len(data["video_pktloss_r"]), 2) if len(data["video_pktloss_r"]) != 0 else 0 for data in final_dataset],
+ 'CSV link': ['csv data'.format(client) for client in accepted_clients]
+ })
+ self.overall_report.set_table_dataframe(video_test_details)
+
+ self.overall_report.dataframe_html = self.overall_report.dataframe.to_html(index=False,
+ justify='center', render_links=True, escape=False) # have the index be able to be passed in.
+ self.overall_report.html += self.overall_report.dataframe_html
+ self.overall_report.set_custom_html("
")
+ self.overall_report.build_custom()
+
+ if ce == "series":
+ obj_no += 1
+ obj_name = f"zoom_test_{obj_no}"
+ else:
+ break
+
+ except Exception as e:
+ logger.info(f"failed to generate report for {test_name} {e}")
+
+
+
+ def generate_overall_report(self,test_results_df=''):
+ self.overall_report = lf_report.lf_report(_results_dir_name="Base_Class_Test_Overall_report", _output_html="base_class_overall.html",
+ _output_pdf="base_class_overall.pdf", _path=self.result_path)
+ self.report_path_date_time = self.overall_report.get_path_date_time()
+ self.overall_report.set_title("Candela Base Class")
+ self.overall_report.set_date(datetime.now())
+ self.overall_report.build_banner()
+ self.overall_report.set_custom_html(test_results_df.to_html(index=False, justify='center'))
+ self.overall_report.build_custom()
+
+ if self.order_priority == "series":
+ if len(self.series_tests) != 0:
+ self.overall_report.set_custom_html('Series Tests
')
+ self.overall_report.build_custom()
+
+ self.render_each_test(ce="series")
+ if len(self.parallel_tests) != 0:
+ self.overall_report.set_custom_html('Parallel Tests
')
+ self.overall_report.build_custom()
+ self.render_each_test(ce="parallel")
+ else:
+ if len(self.parallel_tests) != 0:
+ self.overall_report.set_custom_html('Parallel Tests
')
+ self.overall_report.build_custom()
+ self.render_each_test(ce="parallel")
+ if len(self.series_tests) != 0:
+ self.overall_report.set_custom_html('Series Tests
')
+ self.overall_report.build_custom()
+ self.render_each_test(ce="series")
+ # self.overall_report.insert_table_at_marker(test_results_df,"for_table")
+ self.overall_report.build_footer()
+ html_file = self.overall_report.write_html()
+ print("returned file {}".format(html_file))
+ print(html_file)
+ self.overall_report.write_pdf()
+
+def validate_individual_args(args,test_name):
+ if test_name == 'ping_test':
+ return True
+ elif test_name =='http_test':
+ return True
+ elif test_name =='ftp_test':
+ return True
+ elif test_name =='thput_test':
+ return True
+ elif test_name =='qos_test':
+ return True
+ elif test_name =='vs_test':
+ return True
+ elif test_name =="zoom_test":
+ if args["zoom_signin_email"] is None:
+ return False
+ if args["zoom_signin_passwd"] is None:
+ return False
+ if args["zoom_participants"] is None:
+ return False
+ return True
+ elif test_name =="yt_test":
+ if args["yt_url"] is None:
+ return False
+ elif test_name == "rb_test":
+ return True
+
+
+
+
+
+
+
+
+def validate_args(args):
+ # pass/fail , config , groups-profiles arg validation
+ tests = ["http_test","ping_test","ftp_test","thput_test","qos_test","vs_test","mcast_test","yt_test","rb_test","zoom_test"]
+ if args[series_tests]:
+ series_tests = args[series_tests].split(',')
+ if args[parallel_tests]:
+ parallel_tests = args[parallel_tests].split(',')
+ for test in tests:
+ flag_test = True
+ if test in series_tests or test in parallel_tests:
+ logger.info(f"validating args for {test}...")
+ flag_test = validate_individual_args(args,test)
+ test = test.split('_')[0]
+ if args[f'{test}_expected_passfail_value'] and args[f'{test}_device_csv_name']:
+ logger.error(f"Specify either --{test}_expected_passfail_value or --{test}_device_csv_name")
+ flag_test = False
+ if args[f'{test}_group_name']:
+ selected_groups = args[f'{test}_group_name'].split(',')
+ else:
+ selected_groups = []
+ if args[f'{test}_profile_name']:
+ selected_profiles = args['profile_name'].split(',')
+ else:
+ selected_profiles = []
+
+ if len(selected_groups) != len(selected_profiles):
+ logger.error(f"Number of groups should match number of profiles")
+ flag_test = False
+ elif args[f'{test}_group_name'] and args[f'{test}_profile_name'] and args[f'{test}_file_name'] and args[f'{test}_device_list'] != []:
+ logger.error(f"Either --{test}_group_name or --{test}_device_list should be entered not both")
+ flag_test = False
+ elif args[f'{test}_ssid'] and args[f'{test}_profile_name']:
+ logger.error(f"Either --{test}_ssid or --{test}_profile_name should be given")
+ flag_test = False
+
+ elif args[f'{test}_file_name'] and (args.get(f'{test}_group_name') is None or args.get(f'{test}_profile_name') is None):
+ logger.error(f"Please enter the correct set of arguments for configuration")
+ flag_test = False
+
+ if args[f'{test}_config'] and args.get(f'{test}_group_name') is None:
+ if args.get(f'{test}_ssid') and args.get(f'{test}_security') and args[f'{test}_security'].lower() == 'open' and (args.get(f'{test}_passwd') is None or args[f'{test}_passwd'] == ''):
+ args[f'{test}_passwd'] = '[BLANK]'
+
+ if args.get(f'{test}_ssid') is None or args.get(f'{test}_passwd') is None or args[f'{test}_passwd'] == '':
+ logger.error(f'For configuration need to Specify --{test}_ssid , --{test}_passwd (Optional for "open" type security) , --{test}_security')
+ flag_test = False
+
+ elif args.get(f'{test}_ssid') and args[f'{test}_passwd'] == '[BLANK]' and args.get(f'{test}_security') and args[f'{test}_security'].lower() != 'open':
+ logger.error(f'Please provide valid --{test}_passwd and --{test}_security configuration')
+ flag_test = False
+
+ elif args.get(f'{test}_ssid') and args.get(f'{test}_passwd'):
+ if args.get(f'{test}_security') is None:
+ logger.error(f'Security must be provided when --{test}_ssid and --{test}_password specified')
+ flag_test = False
+ elif args[f'{test}_passwd'] == '[BLANK]' and args[f'{test}_security'].lower() != 'open':
+ logger.error(f'Please provide valid passwd and security configuration')
+ flag_test = False
+ elif args[f'{test}_security'].lower() == 'open' and args[f'{test}_passwd'] != '[BLANK]':
+ logger.error(f"For an open type security, the password should be left blank (i.e., set to '' or [BLANK]).")
+ flag_test = False
+ if flag_test:
+ logger.info(f"Arg validation check done for {test}")
+
+
+def main():
+
+ parser = argparse.ArgumentParser(
+ prog="lf_interop_throughput.py",
+ formatter_class=argparse.RawTextHelpFormatter,
+ )
+ parser = argparse.ArgumentParser(description="Run Candela API Tests")
+ #Always Common
+ parser.add_argument('--mgr', '--lfmgr', default='localhost', help='hostname for where LANforge GUI is running')
+ parser.add_argument('--mgr_port', '--port', default=8080, help='port LANforge GUI HTTP service is running on')
+ parser.add_argument('--upstream_port', '-u', default='eth1', help='non-station port that generates traffic: ., e.g: 1.eth1')
+ #Common
+ parser.add_argument('--device_list', help="Enter the devices on which the test should be run", default=[])
+ parser.add_argument('--duration', help='Please enter the duration in s,m,h (seconds or minutes or hours).Eg: 30s,5m,48h')
+ parser.add_argument('--parallel',
+ action="store_true",
+ help='to run in parallel')
+ parser.add_argument("--tests",type=str,help="Comma-separated ordered list of tests to run (e.g., ping_test,http_test,ping_test)")
+ parser.add_argument('--series_tests', help='Comma-separated list of tests to run in series')
+ parser.add_argument('--parallel_tests', help='Comma-separated list of tests to run in parallel')
+ parser.add_argument('--order_priority', choices=['series', 'parallel'], default='series',
+ help='Which tests to run first: series or parallel')
+
+ #NOt common
+ #ping
+ #without config
+ parser.add_argument('--ping_test',
+ action="store_true",
+ help='ping_test consists')
+ parser.add_argument('--ping_target',
+ type=str,
+ help='Target URL or port for ping test',
+ default='1.1.eth1')
+ parser.add_argument('--ping_interval',
+ type=str,
+ help='Interval (in seconds) between the echo requests',
+ default='1')
+
+ parser.add_argument('--ping_duration',
+ type=float,
+ help='Duration (in minutes) to run the ping test',
+ default=1)
+ parser.add_argument('--ping_use_default_config',
+ action='store_true',
+ help='specify this flag if wanted to proceed with existing Wi-Fi configuration of the devices')
+ parser.add_argument('--ping_device_list', help="Enter the devices on which the ping test should be run", default=[])
+ #ping pass fail value
+ parser.add_argument("--ping_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--ping_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #ping with groups and profile configuration
+ parser.add_argument('--ping_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--ping_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--ping_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #ping configuration with --config
+ parser.add_argument("--ping_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--ping_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--ping_passwd', '--ping_password', '--ping_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--ping_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ parser.add_argument("--ping_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--ping_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--ping_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--ping_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--ping_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--ping_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--ping_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--ping_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--ping_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--ping_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--ping_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--ping_pairwise", type=str, default='NA')
+ parser.add_argument("--ping_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--ping_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--ping_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--ping_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--ping_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--ping_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--ping_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--ping_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--ping_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+ #http
+ parser.add_argument('--http_test',
+ action="store_true",
+ help='http consists')
+ parser.add_argument('--http_bands', nargs="+", help='specify which band testing you want to run eg 5G, 2.4G, 6G',
+ default=["5G", "2.4G", "6G"])
+ parser.add_argument('--http_duration', help='Please enter the duration in s,m,h (seconds or minutes or hours).Eg: 30s,5m,48h')
+ parser.add_argument('--http_file_size', type=str, help='specify the size of file you want to download', default='5MB')
+ parser.add_argument('--http_device_list', help="Enter the devices on which the ping test should be run", default=[])
+ #http pass fail value
+ parser.add_argument("--http_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--http_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #http with groups and profile configuration
+ parser.add_argument('--http_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--http_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--http_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #http configuration with --config
+ parser.add_argument("--http_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--http_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--http_passwd', '--http_password', '--http_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--http_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ parser.add_argument("--http_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--http_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--http_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--http_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--http_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--http_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--http_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--http_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--http_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--http_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--http_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--http_pairwise", type=str, default='NA')
+ parser.add_argument("--http_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--http_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--http_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--http_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--http_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--http_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--http_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--http_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--http_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+
+ #ftp
+ parser.add_argument('--ftp_test',
+ action="store_true",
+ help='ftp_test consists')
+ parser.add_argument('--ftp_bands', nargs="+", help='specify which band testing you want to run eg 5G, 2.4G, 6G',
+ default=["5G", "2.4G", "6G"])
+ parser.add_argument('--ftp_duration', help='Please enter the duration in s,m,h (seconds or minutes or hours).Eg: 30s,5m,48h')
+ parser.add_argument('--ftp_file_size', type=str, help='specify the size of file you want to download', default='5MB')
+ parser.add_argument('--ftp_device_list', help="Enter the devices on which the ping test should be run", default=[])
+ #ftp pass fail value
+ parser.add_argument("--ftp_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--ftp_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #ftp with groups and profile configuration
+ parser.add_argument('--ftp_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--ftp_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--ftp_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #ftp configuration with --config
+ parser.add_argument("--ftp_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--ftp_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--ftp_passwd', '--ftp_password', '--ftp_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--ftp_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ parser.add_argument("--ftp_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--ftp_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--ftp_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--ftp_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--ftp_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--ftp_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--ftp_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--ftp_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--ftp_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--ftp_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--ftp_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--ftp_pairwise", type=str, default='NA')
+ parser.add_argument("--ftp_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--ftp_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--ftp_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--ftp_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--ftp_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--ftp_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--ftp_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--ftp_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--ftp_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+
+ #qos
+ parser.add_argument('--qos_test',
+ action="store_true",
+ help='qos_test consists')
+ parser.add_argument('--qos_duration', help='--qos_duration sets the duration of the test', default="2m")
+ parser.add_argument('--qos_upload', help='--upload traffic load per connection (upload rate)')
+ parser.add_argument('--qos_download', help='--download traffic load per connection (download rate)')
+ parser.add_argument('--qos_traffic_type', help='Select the Traffic Type [lf_udp, lf_tcp]', required=False)
+ parser.add_argument('--qos_tos', help='Enter the tos. Example1 : "BK,BE,VI,VO" , Example2 : "BK,VO", Example3 : "VI" ')
+ parser.add_argument('--qos_device_list', help="Enter the devices on which the ping test should be run", default=[])
+ #qos pass fail value
+ parser.add_argument("--qos_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--qos_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #qos with groups and profile configuration
+ parser.add_argument('--qos_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--qos_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--qos_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #qos configuration with --config
+ parser.add_argument("--qos_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--qos_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--qos_passwd', '--qos_password', '--qos_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--qos_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ #Optional qos config args
+ parser.add_argument("--qos_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--qos_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--qos_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--qos_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--qos_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--qos_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--qos_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--qos_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--qos_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--qos_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--qos_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--qos_pairwise", type=str, default='NA')
+ parser.add_argument("--qos_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--qos_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--qos_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--qos_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--qos_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--qos_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--qos_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--qos_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--qos_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+
+
+ #vs
+ parser.add_argument('--vs_test',
+ action="store_true",
+ help='vs_test consists')
+ parser.add_argument("--vs_url", default="www.google.com", help='specify the url you want to test on')
+ parser.add_argument("--vs_media_source", type=str, default='1')
+ parser.add_argument("--vs_media_quality", type=str, default='0')
+ parser.add_argument('--vs_duration', type=str, help='time to run traffic')
+ parser.add_argument('--vs_device_list', help="Enter the devices on which the ping test should be run", default=[])
+ #vs pass fail value
+ parser.add_argument("--vs_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--vs_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #vs with groups and profile configuration
+ parser.add_argument('--vs_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--vs_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--vs_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #vs configuration with --config
+ parser.add_argument("--vs_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--vs_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--vs_passwd', '--vs_password', '--vs_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--vs_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ #Optional vs config args
+ parser.add_argument("--vs_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--vs_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--vs_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--vs_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--vs_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--vs_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--vs_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--vs_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--vs_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--vs_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--vs_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--vs_pairwise", type=str, default='NA')
+ parser.add_argument("--vs_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--vs_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--vs_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--vs_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--vs_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--vs_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--vs_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--vs_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--vs_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+
+ #thput
+ parser.add_argument('--thput_test',
+ action="store_true",
+ help='thput_test consists')
+ parser.add_argument('--thput_test_duration', help='--thput_test_duration sets the duration of the test', default="")
+ parser.add_argument('--thput_download', help='--thput_download traffic load per connection (download rate)', default='2560')
+ parser.add_argument('--thput_traffic_type', help='Select the Traffic Type [lf_udp, lf_tcp]', required=False)
+ parser.add_argument('--thput_upload', help='--thput_download traffic load per connection (download rate)', default='2560')
+ parser.add_argument('--thput_device_list', help="Enter the devices on which the test should be run", default=[])
+ parser.add_argument('--thput_do_interopability', action='store_true', help='Ensures test on devices run sequentially, capturing each device’s data individually for plotting in the final report.')
+ parser.add_argument("--thput_default_config", action="store_true", help="To stop configuring the devices in interoperability")
+ #thput pass fail value
+ parser.add_argument("--thput_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--thput_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #thput with groups and profile configuration
+ parser.add_argument('--thput_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--thput_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--thput_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #thput configuration with --config
+ parser.add_argument("--thput_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--thput_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--thput_passwd', '--thput_password', '--thput_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--thput_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ #Optional thput config args
+ parser.add_argument("--thput_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--thput_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--thput_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--thput_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--thput_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--thput_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--thput_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--thput_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--thput_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--thput_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--thput_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--thput_pairwise", type=str, default='NA')
+ parser.add_argument("--thput_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--thput_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--thput_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--thput_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--thput_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--thput_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--thput_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--thput_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--thput_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+ #mcast
+ parser.add_argument('--mcast_test',
+ action="store_true",
+ help='mcast_test consists')
+ parser.add_argument(
+ '--mcast_test_duration',
+ help='--test_duration example --time 5d (5 days) default: 3m options: number followed by d, h, m or s',
+ default='3m')
+ parser.add_argument(
+ '--mcast_endp_type',
+ help=(
+ '--endp_type example --endp_type \"lf_udp lf_tcp mc_udp\" '
+ ' Default: lf_udp , options: lf_udp, lf_udp6, lf_tcp, lf_tcp6, mc_udp, mc_udp6'),
+ default='lf_udp',
+ type=valid_endp_types)
+ parser.add_argument(
+ '--mcast_upstream_port',
+ help='--mcast_upstream_port example: --mcast_upstream_port eth1',
+ default='eth1')
+ parser.add_argument(
+ '--mcast_side_b_min_bps',
+ help='''--side_b_min_bps or --download_min_bps, requested upstream min tx rate, comma separated list for multiple iterations. Default 256000
+ When runnign with tcp/udp and mcast will use this value''',
+ default="256000")
+ parser.add_argument(
+ '--mcast_tos',
+ help='--tos: Support different ToS settings: BK,BE,VI,VO,numeric',
+ default="BE")
+ parser.add_argument(
+ '--mcast_device_list',
+ action='append',
+ help='Specify the Resource IDs for real clients. Accepts a comma-separated list (e.g., 1.11,1.95,1.360).'
+ )
+ #mcast pass fail value
+ parser.add_argument("--mcast_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--mcast_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #mcast with groups and profile configuration
+ parser.add_argument('--mcast_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--mcast_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--mcast_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #mcast configuration with --config
+ parser.add_argument("--mcast_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--mcast_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--mcast_passwd', '--mcast_password', '--mcast_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--mcast_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ #Optional mcast config args
+ parser.add_argument("--mcast_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--mcast_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--mcast_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--mcast_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--mcast_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--mcast_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--mcast_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--mcast_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--mcast_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--mcast_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--mcast_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--mcast_pairwise", type=str, default='NA')
+ parser.add_argument("--mcast_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--mcast_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--mcast_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--mcast_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--mcast_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--mcast_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--mcast_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--mcast_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--mcast_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+ #YOUTUBE
+ parser.add_argument('--yt_test',
+ action="store_true",
+ help='mcast_test consists')
+ parser.add_argument('--yt_url', type=str, help='youtube url')
+ parser.add_argument('--yt_duration', help='duration to run the test in sec')
+ parser.add_argument('--yt_res', default='Auto', help="to set resolution to 144p,240p,720p")
+ # parser.add_argument('--yt_upstream_port', type=str, help='Specify The Upstream Port name or IP address', required=True)
+ parser.add_argument('--yt_device_list', help='Specify the real device ports seperated by comma')
+ #mcast pass fail value
+ parser.add_argument("--yt_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--yt_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #yt with groups and profile configuration
+ parser.add_argument('--yt_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--yt_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--yt_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #yt configuration with --config
+ parser.add_argument("--yt_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--yt_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--yt_passwd', '--yt_password', '--yt_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--yt_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ #Optional yt config args
+ parser.add_argument("--yt_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--yt_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--yt_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--yt_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--yt_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--yt_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--yt_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--yt_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--yt_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--yt_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--yt_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--yt_pairwise", type=str, default='NA')
+ parser.add_argument("--yt_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--yt_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--yt_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--yt_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--yt_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--yt_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--yt_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--yt_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--yt_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+ #real browser
+ parser.add_argument('--rb_test',
+ action="store_true",
+ help='mcast_test consists')
+ parser.add_argument("--rb_url", default="https://google.com", help='specify the url you want to test on')
+ parser.add_argument('--rb_duration', type=str, help='time to run traffic')
+ parser.add_argument('--rb_device_list', type=str, help='provide resource_ids of android devices. for instance: "10,12,14"')
+ parser.add_argument('--rb_webgui_incremental', '--rb_incremental_capacity', help="Specify the incremental values <1,2,3..>", dest='webgui_incremental', type=str)
+ parser.add_argument('--rb_incremental', help="to add incremental capacity to run the test", action='store_true')
+ #mcast pass fail value
+ parser.add_argument("--rb_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--rb_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #rb with groups and profile configuration
+ parser.add_argument('--rb_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--rb_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--rb_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #rb configuration with --config
+ parser.add_argument("--rb_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--rb_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--rb_passwd', '--rb_password', '--rb_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--rb_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ #Optional rb config args
+ parser.add_argument("--rb_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--rb_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--rb_ieee80211", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--rb_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--rb_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--rb_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--rb_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--rb_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--rb_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--rb_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--rb_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--rb_pairwise", type=str, default='NA')
+ parser.add_argument("--rb_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--rb_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--rb_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--rb_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--rb_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--rb_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--rb_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--rb_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--rb_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+ #zoom
+ parser.add_argument('--zoom_test',
+ action="store_true",
+ help='mcast_test consists')
+ parser.add_argument('--zoom_duration', type=int, help="Duration of the Zoom meeting in minutes")
+ parser.add_argument('--zoom_signin_email', type=str, help="Sign-in email")
+ parser.add_argument('--zoom_signin_passwd', type=str, help="Sign-in password")
+ parser.add_argument('--zoom_participants', type=int, help="no of participanrs")
+ parser.add_argument('--zoom_audio', action='store_true')
+ parser.add_argument('--zoom_video', action='store_true')
+ parser.add_argument('--zoom_device_list', help="resources participated in the test")
+ parser.add_argument('--zoom_host', help="Host of the test")
+ #zoom config args
+ parser.add_argument("--zoom_expected_passfail_value", help="Specify the expected number of urls", default=None)
+ parser.add_argument("--zoom_device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
+ #zoom with groups and profile configuration
+ parser.add_argument('--zoom_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ parser.add_argument('--zoom_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ parser.add_argument('--zoom_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+
+ #zoom configuration with --config
+ parser.add_argument("--zoom_config", action="store_true", help="Specify for configuring the devices")
+ parser.add_argument('--zoom_ssid', help='WiFi SSID for script objects to associate to')
+ parser.add_argument('--zoom_passwd', '--zoom_password', '--zoom_key', default="[BLANK]", help='WiFi passphrase/password/key')
+ parser.add_argument('--zoom_security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
+ #Optional zoom config args
+ parser.add_argument("--zoom_eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
+ parser.add_argument("--zoom_eap_identity", type=str, default='', help="Specify the EAP identity for authentication.")
+ parser.add_argument("--zoom_ieee8021x", action="store_true", help='Enables 802.1X enterprise authentication for test stations.')
+ parser.add_argument("--zoom_ieee80211u", action="store_true", help='Enables IEEE 802.11u (Hotspot 2.0) support.')
+ parser.add_argument("--zoom_ieee80211w", type=int, default=1, help='Enables IEEE 802.11w (Management Frame Protection) support.')
+ parser.add_argument("--zoom_enable_pkc", action="store_true", help='Enables pkc support.')
+ parser.add_argument("--zoom_bss_transition", action="store_true", help='Enables BSS transition support.')
+ parser.add_argument("--zoom_power_save", action="store_true", help='Enables power-saving features.')
+ parser.add_argument("--zoom_disable_ofdma", action="store_true", help='Disables OFDMA support.')
+ parser.add_argument("--zoom_roam_ft_ds", action="store_true", help='Enables fast BSS transition (FT) support')
+ parser.add_argument("--zoom_key_management", type=str, default='DEFAULT', help='Specify the key management method (e.g., WPA-PSK, WPA-EAP')
+ parser.add_argument("--zoom_pairwise", type=str, default='NA')
+ parser.add_argument("--zoom_private_key", type=str, default='NA', help='Specify EAP private key certificate file.')
+ parser.add_argument("--zoom_ca_cert", type=str, default='NA', help='Specifiy the CA certificate file name')
+ parser.add_argument("--zoom_client_cert", type=str, default='NA', help='Specify the client certificate file name')
+ parser.add_argument("--zoom_pk_passwd", type=str, default='NA', help='Specify the password for the private key')
+ parser.add_argument("--zoom_pac_file", type=str, default='NA', help='Specify the pac file name')
+ # parser.add_argument('--zoom_file_name', type=str, help='Specify the file name containing group details. Example:file1')
+ # parser.add_argument('--zoom_group_name', type=str, help='Specify the groups name that contains a list of devices. Example: group1,group2')
+ # parser.add_argument('--zoom_profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
+ parser.add_argument("--zoom_wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
+
+
+ args = parser.parse_args()
+ args_dict = vars(args)
+ print('argsss',args_dict)
+ # exit(0)
+ # validate_args(args_dict)
+ candela_apis = Candela(ip=args.mgr, port=args.mgr_port,order_priority=args.order_priority)
+ print(args)
+ test_map = {
+ "ping_test": (run_ping_test, "PING TEST"),
+ "http_test": (run_http_test, "HTTP TEST"),
+ "ftp_test": (run_ftp_test, "FTP TEST"),
+ "qos_test": (run_qos_test, "QoS TEST"),
+ "vs_test": (run_vs_test, "VIDEO STREAMING TEST"),
+ "thput_test": (run_thput_test, "THROUGHPUT TEST"),
+ "mcast_test": (run_mcast_test, "MULTICAST TEST"),
+ "yt_test": (run_yt_test, "YOUTUBE TEST"),
+ "rb_test": (run_rb_test, "REAL BROWSER TEST"),
+ "zoom_test": (run_zoom_test, "ZOOM TEST"),
+ }
+
+
+ if not args.series_tests and not args.parallel_tests:
+ logger.error("Please provide tests cases --parallel_tests or --series_tests")
+ logger.info(f"availbe tests are {test_map.keys()}")
+ exit(0)
+
+ flag=1
+ tests_to_run_series = []
+ tests_to_run_parallel = []
+ if args.series_tests:
+ tests_to_run_series = args.series_tests.split(',')
+ for test in tests_to_run_series:
+ if test not in test_map:
+ logger.error(f"{test} is not availble in test suite")
+ flag = 0
+ if args.parallel_tests:
+ tests_to_run_parallel = args.parallel_tests.split(',')
+ for test in tests_to_run_parallel:
+ if test not in test_map:
+ logger.error(f"{test} is not availble in test suite")
+ flag = 0
+
+
+ if not flag:
+ logger.info(f"availble tests are {test_map.keys()}")
+ exit(0)
+ if args.parallel_tests and (len(tests_to_run_parallel) != len(set(tests_to_run_parallel))):
+ logger.error("in -parallel dont specify duplicate tests")
+ exit(0)
+ # args.current = "series"
+ iszoom = 'zoom_test' in tests_to_run_parallel or 'zoom_test' in tests_to_run_series
+ isrb = 'rb_test' in tests_to_run_parallel or 'rb_test' in tests_to_run_series
+ isyt = 'yt_test' in tests_to_run_parallel or 'yt_test' in tests_to_run_series
+ candela_apis.series_tests = tests_to_run_series
+ candela_apis.parallel_tests = tests_to_run_parallel
+ candela_apis.misc_clean_up(layer3=True,layer4=True,generic=True,port_5000=iszoom,port_5002=isyt,port_5003=isrb)
+ if args.series_tests or args.parallel_tests:
+ series_threads = []
+ parallel_threads = []
+ parallel_connect = []
+ series_connect = []
+ rb_test = 'rb_test' in tests_to_run_parallel
+ yt_test = 'yt_test' in tests_to_run_parallel
+ zoom_test = 'zoom_test' in tests_to_run_parallel
+ # Process series tests
+ if args.series_tests:
+ ordered_series_tests = args.series_tests.split(',')
+ for idx, test_name in enumerate(ordered_series_tests):
+ test_name = test_name.strip().lower()
+ if test_name in test_map:
+ func, label = test_map[test_name]
+ args.current = "series"
+ if test_name in ['rb_test','zoom_test','yt_test']:
+ if test_name == "rb_test":
+ obj_no = 1
+ while f"rb_test_{obj_no}" in candela_apis.rb_obj_dict["series"]:
+ obj_no+=1
+ obj_name = f"rb_test_{obj_no}"
+ candela_apis.rb_obj_dict["series"][obj_name] = manager.dict({"obj":None,"data":None})
+ print('hiii data',candela_apis.rb_obj_dict)
+ elif test_name == "yt_test":
+ obj_no = 1
+ while f"yt_test_{obj_no}" in candela_apis.yt_obj_dict["series"]:
+ obj_no+=1
+ obj_name = f"yt_test_{obj_no}"
+ candela_apis.yt_obj_dict["series"][obj_name] = manager.dict({"obj":None,"data":None})
+ print('hiii data',candela_apis.yt_obj_dict)
+ elif test_name == "zoom_test":
+ obj_no = 1
+ while f"zoom_test_{obj_no}" in candela_apis.zoom_obj_dict["series"]:
+ obj_no+=1
+ obj_name = f"zoom_test_{obj_no}"
+ candela_apis.zoom_obj_dict["series"][obj_name] = manager.dict({"obj":None,"data":None})
+ print('hiii data',candela_apis.zoom_obj_dict)
+ series_threads.append(multiprocessing.Process(target=run_test_safe(func, f"{label} [Series {idx+1}]", args, candela_apis)))
+ else:
+ series_threads.append(threading.Thread(
+ target=run_test_safe(func, f"{label} [Series {idx+1}]", args, candela_apis)
+ ))
+ else:
+ print(f"Warning: Unknown test '{test_name}' in --series_tests")
+
+ # Process parallel tests
+ if args.parallel_tests:
+ ordered_parallel_tests = args.parallel_tests.split(',')
+ for idx, test_name in enumerate(ordered_parallel_tests):
+ test_name = test_name.strip().lower()
+ if test_name in test_map:
+ func, label = test_map[test_name]
+ args.current = "parallel"
+ if test_name in ['rb_test','zoom_test','yt_test']:
+ # if test_name == "rb_test":
+ # candela_apis.rb_pipe_dict["parallel"][len(candela_apis.rb_pipe_dict["parallel"])] = {}
+ # candela_apis.rb_pipe_dict["parallel"][len(candela_apis.rb_pipe_dict["parallel"])]["parent"],candela_apis.rb_pipe_dict["parallel"][len(candela_apis.rb_pipe_dict["parallel"])]["child"] = multiprocessing.Pipe()
+ # parent_conn, child_conn = multiprocessing.Pipe()
+ # candela_apis.parallel_connect[idx] = [test_name,parent_conn,child_conn]
+ if test_name == "rb_test":
+ candela_apis.rb_obj_dict["parallel"]["rb_test"] = manager.dict({"obj": None, "data": None})
+ print('hiii data',candela_apis.rb_obj_dict)
+ elif test_name == "yt_test":
+ candela_apis.yt_obj_dict["parallel"]["yt_test"] = manager.dict({"obj": None, "data": None})
+ print('hiii data',candela_apis.yt_obj_dict)
+ elif test_name == "zoom_test":
+ candela_apis.zoom_obj_dict["parallel"]["zoom_test"] = manager.dict({"obj": None, "data": None})
+ print('hiii data',candela_apis.zoom_obj_dict)
+ parallel_threads.append(multiprocessing.Process(target=run_test_safe(func, f"{label} [Parallel {idx+1}]", args, candela_apis)))
+ else:
+ parallel_threads.append(threading.Thread(
+ target=run_test_safe(func, f"{label} [Parallel {idx+1}]", args, candela_apis)
+ ))
+ else:
+ print(f"Warning: Unknown test '{test_name}' in --parallel_tests")
+ logging.info(f"Series Threads: {series_threads}")
+ logging.info(f"Parallel Threads: {parallel_threads}")
+ logging.info(f"connections parallel {candela_apis.parallel_connect}")
+ logging.info(f"connections series{candela_apis.series_connect}")
+ # time.sleep(20)
+ if args.order_priority == 'series':
+ candela_apis.current_exec="series"
+ for t in series_threads:
+ t.start()
+ t.join()
+ candela_apis.series_index += 1
+ # Then run parallel tests
+ if len(parallel_threads) != 0:
+ # candela_apis.misc_clean_up(layer3=False,layer4=False,generic=True)
+ candela_apis.misc_clean_up(layer3=True,layer4=True,generic=True,port_5000=iszoom,port_5002=isyt,port_5003=isrb)
+ print('starting parallel tests.......')
+ time.sleep(10)
+ candela_apis.current_exec = "parallel"
+ for t in parallel_threads:
+ t.start()
+
+ candela_apis.parallel_index = 0
+ for t in parallel_threads:
+ t.join()
+ candela_apis.parallel_index += 1
+
+ else:
+ candela_apis.current_exec="parallel"
+ for t in parallel_threads:
+ t.start()
+ # for p in parallel_processes:
+ # p.start()
+
+ for t in parallel_threads:
+ t.join()
+
+ if len(series_threads) != 0:
+ rb_test = 'rb_test' in tests_to_run_parallel
+ yt_test = 'yt_test' in tests_to_run_parallel
+ candela_apis.misc_clean_up(layer3=True,layer4=True,generic=True,port_5000=iszoom,port_5002=isyt,port_5003=isrb)
+ print('starting Series tests.......')
+ time.sleep(5)
+ candela_apis.current_exec="series"
+ for t in series_threads:
+ t.start()
+ t.join()
+ # for p in series_processes:
+ # p.start()
+ # p.join()
+ # candela_apis.misc_clean_up(layer3=True,layer4=True,generic=True)
+ else:
+ logger.error("provide either --paralell_tests or --series_tests")
+ exit(1)
+ rb_test = 'rb_test' in tests_to_run_parallel
+ yt_test = 'yt_test' in tests_to_run_parallel
+ # candela_apis.browser_cleanup(rb_test=rb_test,yt_test=yt_test)
+ # candela_apis.misc_clean_up(layer3=False,layer4=False,generic=True)
+ candela_apis.misc_clean_up(layer3=True,layer4=True,generic=True,port_5000=iszoom,port_5002=isyt,port_5003=isrb)
+ log_file = save_logs()
+ print(f"Logs saved to: {log_file}")
+ test_results_df = pd.DataFrame(list(test_results_list))
+ # You can also access the test results dataframe:
+ candela_apis.generate_overall_report(test_results_df)
+ print("\nTest Results Summary:")
+ print(test_results_df)
+ # candela_apis.overall_report.insert_table_at_marker(test_results_df,"for_table")
+ # candela_apis.overall_report.build_footer()
+ # html_file = candela_apis.overall_report.write_html()
+ # print("returned file {}".format(html_file))
+ # print(html_file)
+ # candela_apis.overall_report.write_pdf()
+
+def run_test_safe(test_func, test_name, args, candela_apis):
+ global error_logs
+ # global test_results_df
+
+ def wrapper():
+ global error_logs
+ # global test_results_df
+
+ try:
+ result = test_func(args, candela_apis)
+ if not result:
+ status = "NOT EXECUTED"
+ logger.error(f"{test_name} NOT EXECUTED")
+ else:
+ status = "EXECUTED"
+ logger.info(f"{test_name} EXECUTED")
+
+ # Update the dataframe with test result
+ # test_results_df.loc[len(test_results_df)] = [test_name, status]
+ test_results_list.append({"test_name": test_name, "status": status})
+
+ except SystemExit as e:
+ if e.code != 0:
+ status = "NOT EXECUTED"
+ else:
+ status = "EXECUTED"
+ error_msg = f"{test_name} exited with code {e.code}\n"
+ logger.error(error_msg)
+ error_logs += error_msg
+ # test_results_df.loc[len(test_results_df)] = [test_name, status]
+ test_results_list.append({"test_name": test_name, "status": status})
+
+ except Exception as e:
+ status = "NOT EXECUTED"
+ error_msg = f"{test_name} crashed unexpectedly\n"
+ logger.exception(error_msg)
+ tb_str = traceback.format_exc()
+ traceback.print_exc()
+ full_error = error_msg + tb_str + "\n"
+ error_logs += full_error
+ # test_results_df.loc[len(test_results_df)] = [test_name, status]
+ test_results_list.append({"test_name": test_name, "status": status})
+
+ return wrapper
+
+def save_logs():
+ """Save accumulated error logs to a timestamped file in base_class_logs directory"""
+ global error_logs
+
+
+ # Create directory if it doesn't exist
+ log_dir = "base_class_logs"
+ os.makedirs(log_dir, exist_ok=True)
+
+ # Generate timestamp
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ log_filename = f"{log_dir}/test_logs_{timestamp}.txt"
+
+ # Write logs to file
+ with open(log_filename, 'w') as f:
+ f.write(error_logs)
+
+ logger.info(f"Test logs saved to {log_filename}")
+ return log_filename
+
+def run_ping_test(args, candela_apis):
+ return candela_apis.run_ping_test(
+ real=True,
+ target=args.ping_target,
+ ping_interval=args.ping_interval,
+ ping_duration=args.ping_duration,
+ use_default_config=False if args.ping_config else True,
+ dev_list=args.ping_device_list,
+ expected_passfail_value=args.ping_expected_passfail_value,
+ device_csv_name=args.ping_device_csv_name,
+ file_name=args.ping_file_name,
+ group_name=args.ping_group_name,
+ profile_name=args.ping_profile_name,
+ ssid=args.ping_ssid,
+ passwd=args.ping_passwd,
+ security=args.ping_security,
+ eap_method=args.ping_eap_method,
+ eap_identity=args.ping_eap_identity,
+ ieee8021x=args.ping_ieee8021x,
+ ieee80211u=args.ping_ieee80211u,
+ ieee80211w=args.ping_ieee80211w,
+ enable_pkc=args.ping_enable_pkc,
+ bss_transition=args.ping_bss_transition,
+ power_save=args.ping_power_save,
+ disable_ofdma=args.ping_disable_ofdma,
+ roam_ft_ds=args.ping_roam_ft_ds,
+ key_management=args.ping_key_management,
+ pairwise=args.ping_pairwise,
+ private_key=args.ping_private_key,
+ ca_cert=args.ping_ca_cert,
+ client_cert=args.ping_client_cert,
+ pk_passwd=args.ping_pk_passwd,
+ pac_file=args.ping_pac_file,
+ wait_time=args.ping_wait_time,
+ local_lf_report_dir = candela_apis.result_path
+ )
+
+def run_http_test(args, candela_apis):
+ return candela_apis.run_http_test(
+ upstream_port=args.upstream_port,
+ bands=args.http_bands,
+ duration=args.http_duration,
+ file_size=args.http_file_size,
+ device_list=args.http_device_list,
+ expected_passfail_value=args.http_expected_passfail_value,
+ device_csv_name=args.http_device_csv_name,
+ file_name=args.http_file_name,
+ group_name=args.http_group_name,
+ profile_name=args.http_profile_name,
+ config=args.http_config,
+ ssid=args.http_ssid,
+ passwd=args.http_passwd,
+ security=args.http_security,
+ eap_method=args.http_eap_method,
+ eap_identity=args.http_eap_identity,
+ ieee8021x=args.http_ieee8021x,
+ ieee80211u=args.http_ieee80211u,
+ ieee80211w=args.http_ieee80211w,
+ enable_pkc=args.http_enable_pkc,
+ bss_transition=args.http_bss_transition,
+ power_save=args.http_power_save,
+ disable_ofdma=args.http_disable_ofdma,
+ roam_ft_ds=args.http_roam_ft_ds,
+ key_management=args.http_key_management,
+ pairwise=args.http_pairwise,
+ private_key=args.http_private_key,
+ ca_cert=args.http_ca_cert,
+ client_cert=args.http_client_cert,
+ pk_passwd=args.http_pk_passwd,
+ pac_file=args.http_pac_file,
+ wait_time=args.http_wait_time
+ )
+
+def run_ftp_test(args, candela_apis):
+ return candela_apis.run_ftp_test(
+ device_list=args.ftp_device_list,
+ file_sizes=[args.ftp_file_size],
+ traffic_duration=args.ftp_duration,
+ bands=args.ftp_bands,
+ expected_passfail_value=args.ftp_expected_passfail_value,
+ device_csv_name=args.ftp_device_csv_name,
+ file_name=args.ftp_file_name,
+ group_name=args.ftp_group_name,
+ profile_name=args.ftp_profile_name,
+ config=args.ftp_config,
+ ssid=args.ftp_ssid,
+ passwd=args.ftp_passwd,
+ security=args.ftp_security,
+ eap_method=args.ftp_eap_method,
+ eap_identity=args.ftp_eap_identity,
+ ieee8021x=args.ftp_ieee8021x,
+ ieee80211u=args.ftp_ieee80211u,
+ ieee80211w=args.ftp_ieee80211w,
+ enable_pkc=args.ftp_enable_pkc,
+ bss_transition=args.ftp_bss_transition,
+ power_save=args.ftp_power_save,
+ disable_ofdma=args.ftp_disable_ofdma,
+ roam_ft_ds=args.ftp_roam_ft_ds,
+ key_management=args.ftp_key_management,
+ pairwise=args.ftp_pairwise,
+ private_key=args.ftp_private_key,
+ ca_cert=args.ftp_ca_cert,
+ client_cert=args.ftp_client_cert,
+ pk_passwd=args.ftp_pk_passwd,
+ pac_file=args.ftp_pac_file,
+ wait_time=args.ftp_wait_time
+ )
+
+def run_qos_test(args, candela_apis):
+ print("QOS_LIST",args.qos_device_list)
+ return candela_apis.run_qos_test(
+ upstream_port=args.upstream_port,
+ test_duration=args.qos_duration,
+ download=args.qos_download,
+ upload=args.qos_upload,
+ traffic_type=args.qos_traffic_type,
+ tos=args.qos_tos,
+ device_list=args.qos_device_list,
+ expected_passfail_value=args.qos_expected_passfail_value,
+ device_csv_name=args.qos_device_csv_name,
+ file_name=args.qos_file_name,
+ group_name=args.qos_group_name,
+ profile_name=args.qos_profile_name,
+ config=args.qos_config,
+ ssid=args.qos_ssid,
+ passwd=args.qos_passwd,
+ security=args.qos_security,
+ eap_method=args.qos_eap_method,
+ eap_identity=args.qos_eap_identity,
+ ieee8021x=args.qos_ieee8021x,
+ ieee80211u=args.qos_ieee80211u,
+ ieee80211w=args.qos_ieee80211w,
+ enable_pkc=args.qos_enable_pkc,
+ bss_transition=args.qos_bss_transition,
+ power_save=args.qos_power_save,
+ disable_ofdma=args.qos_disable_ofdma,
+ roam_ft_ds=args.qos_roam_ft_ds,
+ key_management=args.qos_key_management,
+ pairwise=args.qos_pairwise,
+ private_key=args.qos_private_key,
+ ca_cert=args.qos_ca_cert,
+ client_cert=args.qos_client_cert,
+ pk_passwd=args.qos_pk_passwd,
+ pac_file=args.qos_pac_file,
+ wait_time=args.qos_wait_time
+ )
+
+def run_vs_test(args, candela_apis):
+ return candela_apis.run_vs_test1(
+ url=args.vs_url,
+ media_source=args.vs_media_source,
+ media_quality=args.vs_media_quality,
+ duration=args.vs_duration,
+ device_list=args.vs_device_list,
+ expected_passfail_value=args.vs_expected_passfail_value,
+ device_csv_name=args.vs_device_csv_name,
+ file_name=args.vs_file_name,
+ group_name=args.vs_group_name,
+ profile_name=args.vs_profile_name,
+ config=args.vs_config,
+ ssid=args.vs_ssid,
+ passwd=args.vs_passwd,
+ encryp=args.vs_security,
+ eap_method=args.vs_eap_method,
+ eap_identity=args.vs_eap_identity,
+ ieee8021x=args.vs_ieee8021x,
+ ieee80211u=args.vs_ieee80211u,
+ ieee80211w=args.vs_ieee80211w,
+ enable_pkc=args.vs_enable_pkc,
+ bss_transition=args.vs_bss_transition,
+ power_save=args.vs_power_save,
+ disable_ofdma=args.vs_disable_ofdma,
+ roam_ft_ds=args.vs_roam_ft_ds,
+ key_management=args.vs_key_management,
+ pairwise=args.vs_pairwise,
+ private_key=args.vs_private_key,
+ ca_cert=args.vs_ca_cert,
+ client_cert=args.vs_client_cert,
+ pk_passwd=args.vs_pk_passwd,
+ pac_file=args.vs_pac_file,
+ wait_time=args.vs_wait_time,
+ upstream_port=args.upstream_port
+ )
+
+def run_thput_test(args, candela_apis):
+ if args.thput_do_interopability and args.thput_config:
+ args.thput_default_config = False
+ args.thput_config = False
+ return candela_apis.run_throughput_test(
+ upstream_port=args.upstream_port,
+ test_duration=args.thput_test_duration,
+ download=args.thput_download,
+ upload=args.thput_upload,
+ traffic_type=args.thput_traffic_type,
+ device_list=args.thput_device_list,
+ do_interopability=args.thput_do_interopability,
+ default_config=args.thput_default_config,
+ expected_passfail_value=args.thput_expected_passfail_value,
+ device_csv_name=args.thput_device_csv_name,
+ file_name=args.thput_file_name,
+ group_name=args.thput_group_name,
+ profile_name=args.thput_profile_name,
+ config=args.thput_config,
+ ssid=args.thput_ssid,
+ passwd=args.thput_passwd,
+ security=args.thput_security,
+ eap_method=args.thput_eap_method,
+ eap_identity=args.thput_eap_identity,
+ ieee8021x=args.thput_ieee8021x,
+ ieee80211u=args.thput_ieee80211u,
+ ieee80211w=args.thput_ieee80211w,
+ enable_pkc=args.thput_enable_pkc,
+ bss_transition=args.thput_bss_transition,
+ power_save=args.thput_power_save,
+ disable_ofdma=args.thput_disable_ofdma,
+ roam_ft_ds=args.thput_roam_ft_ds,
+ key_management=args.thput_key_management,
+ pairwise=args.thput_pairwise,
+ private_key=args.thput_private_key,
+ ca_cert=args.thput_ca_cert,
+ client_cert=args.thput_client_cert,
+ pk_passwd=args.thput_pk_passwd,
+ pac_file=args.thput_pac_file,
+ wait_time=args.thput_wait_time
+ )
+
+def run_mcast_test(args, candela_apis):
+ return candela_apis.run_mc_test1(
+ test_duration=args.mcast_test_duration,
+ upstream_port=args.upstream_port,
+ endp_type=args.mcast_endp_type,
+ side_b_min_bps=args.mcast_side_b_min_bps,
+ tos=args.mcast_tos,
+ device_list=args.mcast_device_list,
+ expected_passfail_value=args.mcast_expected_passfail_value,
+ device_csv_name=args.mcast_device_csv_name,
+ file_name=args.mcast_file_name,
+ group_name=args.mcast_group_name,
+ profile_name=args.mcast_profile_name,
+ config=args.mcast_config,
+ ssid=args.mcast_ssid,
+ passwd=args.mcast_passwd,
+ security=args.mcast_security,
+ eap_method=args.mcast_eap_method,
+ eap_identity=args.mcast_eap_identity,
+ ieee8021x=args.mcast_ieee8021x,
+ ieee80211u=args.mcast_ieee80211u,
+ ieee80211w=args.mcast_ieee80211w,
+ enable_pkc=args.mcast_enable_pkc,
+ bss_transition=args.mcast_bss_transition,
+ power_save=args.mcast_ieee8021x,
+ disable_ofdma=args.mcast_disable_ofdma,
+ roam_ft_ds=args.mcast_roam_ft_ds,
+ key_management=args.mcast_key_management,
+ pairwise=args.mcast_pairwise,
+ private_key=args.mcast_private_key,
+ ca_cert=args.mcast_ca_cert,
+ client_cert=args.mcast_client_cert,
+ pk_passwd=args.mcast_pk_passwd,
+ pac_file=args.mcast_pac_file,
+ wait_time=args.mcast_wait_time
+ )
+
+def run_yt_test(args, candela_apis):
+ return candela_apis.run_yt_test(
+ url=args.yt_url,
+ duration=args.yt_duration,
+ res=args.yt_res,
+ upstream_port=args.upstream_port,
+ resource_list=args.yt_device_list,
+ expected_passfail_value=args.yt_expected_passfail_value,
+ device_csv_name=args.yt_device_csv_name,
+ file_name=args.yt_file_name,
+ group_name=args.yt_group_name,
+ profile_name=args.yt_profile_name,
+ config=args.yt_config,
+ ssid=args.yt_ssid,
+ passwd=args.yt_passwd,
+ encryp=args.yt_security,
+ eap_method=args.yt_eap_method,
+ eap_identity=args.yt_eap_identity,
+ ieee8021x=args.yt_ieee8021x,
+ ieee80211u=args.yt_ieee80211u,
+ ieee80211w=args.yt_ieee80211w,
+ enable_pkc=args.yt_enable_pkc,
+ bss_transition=args.yt_bss_transition,
+ power_save=args.yt_ieee8021x,
+ disable_ofdma=args.yt_disable_ofdma,
+ roam_ft_ds=args.yt_roam_ft_ds,
+ key_management=args.yt_key_management,
+ pairwise=args.yt_pairwise,
+ private_key=args.yt_private_key,
+ ca_cert=args.yt_ca_cert,
+ client_cert=args.yt_client_cert,
+ pk_passwd=args.yt_pk_passwd,
+ pac_file=args.yt_pac_file,
+ exec_type=args.current
+ )
+
+def run_rb_test(args, candela_apis):
+ return candela_apis.run_rb_test(
+ url=args.rb_url,
+ upstream_port=args.upstream_port,
+ device_list=args.rb_device_list,
+ expected_passfail_value=args.rb_expected_passfail_value,
+ device_csv_name=args.rb_device_csv_name,
+ file_name=args.rb_file_name,
+ group_name=args.rb_group_name,
+ profile_name=args.rb_profile_name,
+ config=args.rb_config,
+ ssid=args.rb_ssid,
+ passwd=args.rb_passwd,
+ encryp=args.rb_security,
+ eap_method=args.rb_eap_method,
+ eap_identity=args.rb_eap_identity,
+ ieee80211=args.rb_ieee80211,
+ ieee80211u=args.rb_ieee80211u,
+ ieee80211w=args.rb_ieee80211w,
+ enable_pkc=args.rb_enable_pkc,
+ bss_transition=args.rb_bss_transition,
+ power_save=args.rb_power_save,
+ disable_ofdma=args.rb_disable_ofdma,
+ roam_ft_ds=args.rb_roam_ft_ds,
+ key_management=args.rb_key_management,
+ pairwise=args.rb_pairwise,
+ private_key=args.rb_private_key,
+ ca_cert=args.rb_ca_cert,
+ client_cert=args.rb_client_cert,
+ pk_passwd=args.rb_pk_passwd,
+ pac_file=args.rb_pac_file,
+ wait_time=args.rb_wait_time,
+ duration=args.rb_duration,
+ exec_type=args.current
+ )
+
+def run_zoom_test(args, candela_apis):
+ return candela_apis.run_zoom_test(
+ duration=args.zoom_duration,
+ signin_email=args.zoom_signin_email,
+ signin_passwd=args.zoom_signin_passwd,
+ participants=args.zoom_participants,
+ audio=args.zoom_audio,
+ video=args.zoom_video,
+ upstream_port=args.upstream_port,
+ resource_list=args.zoom_device_list,
+ zoom_host=args.zoom_host,
+ expected_passfail_value=args.zoom_expected_passfail_value,
+ device_csv_name=args.zoom_device_csv_name,
+ file_name=args.zoom_file_name,
+ group_name=args.zoom_group_name,
+ profile_name=args.zoom_profile_name,
+ config=args.zoom_config,
+ ssid=args.zoom_ssid,
+ passwd=args.zoom_passwd,
+ encryp=args.zoom_security,
+ eap_method=args.zoom_eap_method,
+ eap_identity=args.zoom_eap_identity,
+ ieee8021x=args.zoom_ieee8021x,
+ ieee80211u=args.zoom_ieee80211u,
+ ieee80211w=args.zoom_ieee80211w,
+ enable_pkc=args.zoom_enable_pkc,
+ bss_transition=args.zoom_bss_transition,
+ power_save=args.zoom_power_save,
+ disable_ofdma=args.zoom_disable_ofdma,
+ roam_ft_ds=args.zoom_roam_ft_ds,
+ key_management=args.zoom_key_management,
+ pairwise=args.zoom_pairwise,
+ private_key=args.zoom_private_key,
+ ca_cert=args.zoom_ca_cert,
+ client_cert=args.zoom_client_cert,
+ pk_passwd=args.zoom_pk_passwd,
+ pac_file=args.zoom_pac_file,
+ wait_time=args.zoom_wait_time,
+ exec_type=args.current
+ )
+# def browser_cleanup(args,candela_apis):
+# return candela_apis.browser_cleanup(args)
+main()
diff --git a/py-scripts/lf_ftp.py b/py-scripts/lf_ftp.py
index f37a4ed6a..f3d9d532d 100755
--- a/py-scripts/lf_ftp.py
+++ b/py-scripts/lf_ftp.py
@@ -118,6 +118,7 @@
from typing import List, Optional
import asyncio
import csv
+import traceback
if sys.version_info[0] != 3:
print("This script requires Python 3")
@@ -147,7 +148,7 @@ def __init__(self, lfclient_host="localhost", lfclient_port=8080, sta_prefix="st
profile_name=None, group_name=None,
sixg_radio=None, fiveg_radio=None, upstream="eth1", _debug_on=False, _exit_on_error=False, _exit_on_fail=False, ap_name="",
direction=None, duration=None, traffic_duration=None, ssh_port=None, kpi_csv=None, kpi_results=None,
- lf_username="lanforge", lf_password="lanforge", clients_type="Virtual", dowebgui=False, device_list=[], test_name=None, result_dir=None,
+ lf_username="lanforge", lf_password="lanforge", clients_type="Virtual", dowebgui=False, device_list=None, test_name=None, result_dir=None,
eap_method=None,
eap_identity=None,
ieee80211=None,
@@ -167,6 +168,8 @@ def __init__(self, lfclient_host="localhost", lfclient_port=8080, sta_prefix="st
pk_passwd=None,
pac_file=None,
expected_passfail_val=None,
+ get_live_view=False,
+ total_floors=0,
config=False,
csv_name=None):
super().__init__(lfclient_host, lfclient_port, _debug=_debug_on, _exit_on_fail=_exit_on_fail)
@@ -227,11 +230,14 @@ def __init__(self, lfclient_host="localhost", lfclient_port=8080, sta_prefix="st
self.mac_id_list = []
self.real_client_list1 = []
self.uc_avg = []
+ self.failed_cx = []
+ self.tracking_map = {}
self.uc_min = []
self.uc_max = []
self.url_data = []
self.bytes_rd = []
self.rx_rate = []
+ self.total_err = []
self.channel_list = []
self.mode_list = []
self.cx_list = []
@@ -264,6 +270,8 @@ def __init__(self, lfclient_host="localhost", lfclient_port=8080, sta_prefix="st
self.pass_fail_list = []
self.test_input_list = []
self.api_url = 'http://{}:{}'.format(self.host, self.port)
+ self.get_live_view = get_live_view
+ self.total_floors = total_floors
logger.info("Test is Initialized")
@@ -888,6 +896,51 @@ def convert_file_size_in_Bytes(self, size):
# assume data is MB if no designator is on end of str
else:
return float(upper[:-2]) * 10 ** 6
+
+ def get_all_l4_data(self):
+ # List of all fields to collect
+ fields = [
+ "name", "eid", "type", "status", "total-urls", "urls/s", "bytes-rd", "bytes-wr",
+ "total-buffers", "total-rebuffers", "total-wait-time", "video-format-bitrate",
+ "audio-format-bitrate", "frame-rate", "video-quality", "tx rate", "tx-rate-1m",
+ "rx rate", "rx rate (1m)", "fb-min", "fb-avg", "fb-max", "uc-min", "uc-avg",
+ "uc-max", "dns-min", "dns-avg", "dns-max", "total-err", "bad-proto", "bad-url",
+ "rslv-p", "rslv-h", "!conn", "timeout", "nf (4xx)", "http-r", "http-p", "http-t",
+ "acc. denied", "ftp-host", "ftp-stor", "ftp-port", "write", "read", "redir",
+ "login-denied", "other-err", "elapsed", "rpt timer", "time-stamp"
+ ]
+
+ # Fetch all data in one go
+ data = self.json_get(f"layer4/list?fields={','.join(fields)}")
+
+ # Initialize result dict
+ result = {field: [] for field in fields}
+
+ # Access 'endpoint' field
+ endpoint = data.get("endpoint", {})
+
+ if isinstance(endpoint, dict):
+ # Single endpoint format
+ for field in fields:
+ result[field].append(endpoint.get(field, None))
+ else:
+ # Multiple endpoints
+ for created_cx in self.cx_list:
+ for cx in endpoint:
+ if created_cx in cx:
+ for field in fields:
+ result[field].append(cx[created_cx].get(field, None))
+ break
+
+ # Example transformation for specific fields (e.g., bytes-rd in MB)
+ if "bytes-rd" in result:
+ result["bytes-rd"] = [
+ float(f"{int(x) / 1_000_000:.4f}") if x is not None else None
+ for x in result["bytes-rd"]
+ ]
+
+ return result
+
# FOR WEB-UI // function usd to fetch runtime values and fill the csv.
def monitor_for_runtime_csv(self):
@@ -905,9 +958,13 @@ def monitor_for_runtime_csv(self):
max_bytes_rd = []
rx_rate_val = []
individual_device_data = {}
+ client_id_list = []
for port in self.input_devices_list:
columns = ['TIMESTAMP', 'Bytes-rd', 'total urls', 'download_rate', 'rx_rate', 'tx_rate', 'RSSI']
individual_device_data[port] = pd.DataFrame(columns=columns)
+ kk = port.split('.')
+ client_id_list.append('.'.join(kk[:2]))
+
while (current_time < endtime):
# data in json format
@@ -927,11 +984,19 @@ def monitor_for_runtime_csv(self):
self.data['UC-MIN'] = self.uc_min
self.data['UC-AVG'] = self.uc_avg
self.data['UC-MAX'] = self.uc_max
+ self.data['client_id'] = client_id_list
+ self.data['total_err'] = self.total_err
rx_rate_val.append(list(self.rx_rate))
for i, port in enumerate(self.input_devices_list):
- row_data = [current_time, self.bytes_rd[i], self.url_data[i], self.rx_rate[i], self.port_rx_rate[i], self.tx_rate[i], self.rssi_list[i]]
- individual_device_data[port].loc[len(individual_device_data[port])] = row_data
+ try:
+ row_data = [current_time, self.bytes_rd[i], self.url_data[i], self.rx_rate[i], self.port_rx_rate[i], self.tx_rate[i], self.rssi_list[i]]
+ individual_device_data[port].loc[len(individual_device_data[port])] = row_data
+ except:
+ logger.info(f"ftp0 iii {i}")
+ logger.info(f"row data HTTP0: {current_time}, {self.bytes_rd}, {self.url_data}, {self.rx_rate}, {self.port_rx_rate}, {self.tx_rate}, {self.rssi_list}")
+ traceback.print_exc()
+ exit(1)
# calculating average for rx_rate
for j in range(len(rx_rate_val[0])):
rx_rate_sum = 0
@@ -954,44 +1019,26 @@ def monitor_for_runtime_csv(self):
max_bytes_rd = list(self.bytes_rd)
self.data['Bytes RD'] = self.bytes_rd
-
- if 'endpoint' in total_url_data.keys():
- # list of layer 4 connections name
- # temp_data can be used to check data as well as check whether the endpoint has data
- if type(total_url_data['endpoint']) is dict:
- temp_data[self.cx_list[0]] = total_url_data['endpoint']['total-urls']
- else:
- for cx in total_url_data['endpoint']:
- for CX in cx:
- for created_cx in self.cx_list:
- if CX == created_cx:
- temp_data[created_cx] = cx[CX]['total-urls']
-
- if temp_data != {}:
-
- self.data["status"] = ["RUNNING"] * len(list(temp_data.keys()))
- # self.data["url_data"] = list(temp_data.values())
- self.data["url_data"] = self.url_data
- else:
- self.data["status"] = ["RUNNING"] * len(self.cx_list)
- self.data["url_data"] = [0] * len(self.cx_list)
- time_difference = abs(end_time - datetime.now())
- total_hours = time_difference.total_seconds() / 3600
- remaining_minutes = (total_hours % 1) * 60
- self.data["start_time"] = [start_time] * len(self.cx_list)
- self.data["end_time"] = [end_time.strftime("%d/%m %I:%M:%S %p")] * len(self.cx_list)
- self.data["remaining_time"] = [[str(int(total_hours)) + " hr and " + str(
- int(remaining_minutes)) + " min" if int(total_hours) != 0 or int(
- remaining_minutes) != 0 else '<1 min'][0]] * len(self.cx_list)
+ self.data["url_data"] = self.url_data
+ self.data["status"] = ["RUNNING"] * len(self.url_data)
+ time_difference = abs(end_time - datetime.now())
+ total_hours = time_difference.total_seconds() / 3600
+ remaining_minutes = (total_hours % 1) * 60
+ self.data["start_time"] = [start_time] * len(self.cx_list)
+ self.data["end_time"] = [end_time.strftime("%d/%m %I:%M:%S %p")] * len(self.cx_list)
+ self.data["remaining_time"] = [[str(int(total_hours)) + " hr and " + str(
+ int(remaining_minutes)) + " min" if int(total_hours) != 0 or int(
+ remaining_minutes) != 0 else '<1 min'][0]] * len(self.cx_list)
+ try:
df1 = pd.DataFrame(self.data)
- if self.dowebgui:
- df1.to_csv('{}/ftp_datavalues.csv'.format(self.result_dir), index=False)
- if self.clients_type == 'Real':
- df1.to_csv("ftp_datavalues.csv", index=False)
-
- else:
-
- logger.info("No layer 4-7 endpoints - No endpoint in reponse")
+ except:
+ logger.info(f'error error data {self.data}')
+ traceback.print_exc()
+ exit(1)
+ if self.dowebgui:
+ df1.to_csv('{}/ftp_datavalues.csv'.format(self.result_dir), index=False)
+ if self.clients_type == 'Real':
+ df1.to_csv("ftp_datavalues.csv", index=False)
time.sleep(5)
if self.dowebgui == "True":
@@ -1010,100 +1057,82 @@ def monitor_for_runtime_csv(self):
df.to_csv(f"{endtime}-ftp-{port}.csv", index=False)
individual_device_csv_names.append(f'{endtime}-ftp-{port}')
self.individual_device_csv_names = individual_device_csv_names
+ try:
+ all_l4_data = self.get_all_l4_data()
+ df = pd.DataFrame(all_l4_data)
+ df.to_csv("all_l4_data.csv", index=False)
+ except:
+ logger.error("All l4 data not found")
# Created a function to get uc-avg,uc,min,uc-max,ssid and all other details of the devices
+ def get_layer4_data(self):
+ try:
+ l4_data = self.local_realm.json_get('layer4/{}/list?fields=uc-avg,uc-max,uc-min,total-urls,rx rate (1m),bytes-rd,total-err'.format(','.join(self.cx_list)))['endpoint']
+ except:
+ logger.error("NO L4 endpoint found")
+ exit(1)
+ l4_dict = {
+ 'uc_avg_data': [],
+ 'uc_max_data':[],
+ 'uc_min_data':[],
+ 'url_times':[],
+ 'rx_rate':[],
+ 'bytes_rd':[],
+ 'total_err':[]
+ }
+ cx_list = self.cx_list
+ idx = 0
+ if type(l4_data) != list:
+ l4_data = [{l4_data['name']:l4_data}]
+ for cx in cx_list:
+ cx_found = False
+ for i in l4_data:
+ for cx_name,value in i.items():
+ if cx == cx_name:
+ l4_dict['uc_avg_data'].append(value['uc-avg'])
+ l4_dict['uc_max_data'].append(value['uc-max'])
+ l4_dict['uc_min_data'].append(value['uc-min'])
+ l4_dict['url_times'].append(value['total-urls'])
+ l4_dict['rx_rate'].append(value['rx rate (1m)'])
+ l4_dict['bytes_rd'].append(value['bytes-rd'])
+ l4_dict['total_err'].append(value['total-err'])
+ cx_found = True
+ if not cx_found:
+ print(f'apending default for http {cx}')
+ self.failed_cx.append(cx)
+ l4_dict['uc_avg_data'].append(0 if not self.tracking_map else self.tracking_map['uc_avg_data'][idx])
+ l4_dict['uc_max_data'].append(0 if not self.tracking_map else self.tracking_map['uc_max_data'][idx])
+ l4_dict['uc_min_data'].append(0 if not self.tracking_map else self.tracking_map['uc_min_data'][idx])
+ l4_dict['url_times'].append(0 if not self.tracking_map else self.tracking_map['url_times'][idx])
+ l4_dict['rx_rate'].append(0 if not self.tracking_map else self.tracking_map['rx_rate'][idx])
+ l4_dict['bytes_rd'].append(0 if not self.tracking_map else self.tracking_map['bytes_rd'][idx])
+ l4_dict['total_err'].append(0 if not self.tracking_map else self.tracking_map['total_err'][idx])
+ idx += 1
+ self.tracking_map = l4_dict.copy()
+
+ return l4_dict
def get_device_details(self):
dataset = []
self.channel_list, self.mode_list, self.ssid_list, self.uc_avg, self.uc_max, self.url_data, self.uc_min, self.bytes_rd, self.rx_rate = [], [], [], [], [], [], [], [], []
+ self.total_err = []
if self.clients_type == "Real":
self.get_port_data()
# data in json format
# data = self.json_get("layer4/list?fields=bytes-rd")
- uc_avg_data = self.json_get("layer4/list?fields=uc-avg")
- uc_max_data = self.json_get("layer4/list?fields=uc-max")
- uc_min_data = self.json_get("layer4/list?fields=uc-min")
- total_url_data = self.json_get("layer4/list?fields=total-urls")
- bytes_rd = self.json_get("layer4/list?fields=bytes-rd")
- rx_rate = self.json_get("layer4/list?fields=rx rate (1m)")
- if 'endpoint' in uc_avg_data.keys():
- # list of layer 4 connections name
- if type(uc_avg_data['endpoint']) is dict:
- self.uc_avg.append(uc_avg_data['endpoint']['uc-avg'])
- self.uc_max.append(uc_max_data['endpoint']['uc-max'])
- self.uc_min.append(uc_min_data['endpoint']['uc-min'])
- self.rx_rate.append(rx_rate['endpoint']['rx rate (1m)'])
- # reading uc-avg data in json format
- self.url_data.append(total_url_data['endpoint']['total-urls'])
- dataset.append(bytes_rd['endpoint']['bytes-rd'])
- self.bytes_rd = [float(f"{(int(i) / 1000000): .4f}") for i in dataset]
- else:
- for created_cx in self.cx_list:
- for cx in uc_avg_data['endpoint']:
- if created_cx in cx:
- self.uc_avg.append(cx[created_cx]['uc-avg'])
- break
-
- for cx in uc_max_data['endpoint']:
- if created_cx in cx:
- self.uc_max.append(cx[created_cx]['uc-max'])
- break
-
- for cx in uc_min_data['endpoint']:
- if created_cx in cx:
- self.uc_min.append(cx[created_cx]['uc-min'])
- break
-
- for cx in total_url_data['endpoint']:
- if created_cx in cx:
- self.url_data.append(cx[created_cx]['total-urls'])
- break
-
- for cx in bytes_rd['endpoint']:
- if created_cx in cx:
- dataset.append(cx[created_cx]['bytes-rd'])
- break
-
- for cx in rx_rate['endpoint']:
- if created_cx in cx:
- self.rx_rate.append(cx[created_cx]['rx rate (1m)'])
- break
- self.bytes_rd = [float(f"{(i / 1000000): .4f}") for i in dataset]
- # for cx in uc_avg_data['endpoint']:
- # for CX in cx:
- # for created_cx in self.cx_list:
- # if CX == created_cx:
- # self.uc_avg.append(cx[CX]['uc-avg'])
- # for cx in uc_max_data['endpoint']:
- # for CX in cx:
- # for created_cx in self.cx_list:
- # if CX == created_cx:
- # self.uc_max.append(cx[CX]['uc-max'])
- # for cx in uc_min_data['endpoint']:
- # for CX in cx:
- # for created_cx in self.cx_list:
- # if CX == created_cx:
- # self.uc_min.append(cx[CX]['uc-min'])
- # for cx in total_url_data['endpoint']:
- # for CX in cx:
- # for created_cx in self.cx_list:
- # if CX == created_cx:
- # self.url_data.append(cx[CX]['total-urls'])
- # for cx in bytes_rd['endpoint']:
- # for CX in cx:
- # for created_cx in self.cx_list:
- # if CX == created_cx:
- # dataset.append(cx[CX]['bytes-rd'])
- # self.bytes_rd=[float(f"{(i / 1000000): .4f}") for i in dataset]
- # for cx in rx_rate['endpoint']:
- # for CX in cx:
- # for created_cx in self.cx_list:
- # if CX == created_cx:
- # self.rx_rate.append(cx[CX]['rx rate'])
- else:
- total_data = self.json_get("layer4/all")
- logger.info("No endpoint found")
- logger.info(total_data)
+ l4_data = self.get_layer4_data()
+ self.uc_avg = l4_data["uc_avg_data"]
+ self.uc_max = l4_data["uc_max_data"]
+ self.uc_min = l4_data["uc_min_data"]
+ self.rx_rate = l4_data["rx_rate"]
+ self.total_err = l4_data["total_err"]
+ self.url_data = l4_data["url_times"]
+ dataset = l4_data["bytes_rd"]
+ self.bytes_rd = [float(f"{(i / 1000000): .4f}") for i in dataset]
+ urls_downloaded = []
+ for i in range(len(self.total_err)):
+ urls_downloaded.append(self.url_data[i]-self.total_err[i])
+ self.url_data = list(urls_downloaded)
def get_port_data(self):
"""
@@ -1274,9 +1303,17 @@ def my_monitor_for_real_devices(self):
self.channel_list.append(str(port_data['channel']))
self.mode_list.append(str(port_data['mode']))
self.ssid_list.append(str(port_data['ssid']))
+
if self.dowebgui:
+ client_id_list = []
+ for port in self.input_devices_list:
+ kk = port.split('.')
+ # print('kk',kk)
+ client_id_list.append('.'.join(kk[:2]))
self.data_for_webui = {
"client": self.cx_list,
+ "client_id": client_id_list,
+ "Rx Rate(1m)":self.rx_rate,
"url_data": self.url_data,
"bytes rd": self.bytes_rd,
"uc_min": self.uc_min,
@@ -1284,7 +1321,8 @@ def my_monitor_for_real_devices(self):
"uc_avg": self.uc_avg,
"start_time": self.data["start_time"],
"end_time": self.data["end_time"],
- "remaining_time": [0] * len(self.cx_list)
+ "remaining_time": [0] * len(self.cx_list),
+ "total_err" : self.total_err
}
logger.info("Monitoring complete")
@@ -1766,7 +1804,12 @@ def generate_report(self, ftp_data, date, input_setup_info, test_rig, test_tag,
# To move ftp_datavalues.csv in report folder
report_path_date_time = self.report.get_path_date_time()
if self.clients_type == "Real":
+
shutil.move('ftp_datavalues.csv', report_path_date_time)
+ try:
+ shutil.move('all_l4_data.csv',report_path_date_time)
+ except:
+ logger.error("failed to create all layer 4 csv")
for csv_name in self.individual_device_csv_names:
shutil.move(f"{csv_name}.csv", report_path_date_time)
self.report.set_title("FTP Test")
@@ -1789,6 +1832,7 @@ def generate_report(self, ftp_data, date, input_setup_info, test_rig, test_tag,
"Security": self.security,
"Device List": ", ".join(all_devices_names),
"No of Devices": "Total" + f"({no_of_stations})" + total_devices,
+ "Failed CXs": self.failed_cx if self.failed_cx else "NONE",
"File size": self.file_size,
"File location": "/home/lanforge",
"Traffic Direction": self.direction,
@@ -1861,7 +1905,7 @@ def generate_report(self, ftp_data, date, input_setup_info, test_rig, test_tag,
_color_name=['orange'],
_show_bar_value=True,
_enable_csv=True,
- _graph_image_name="Total-url", _color_edge=['black'],
+ _graph_image_name="Total-url_ftp", _color_edge=['black'],
_color=['orange'],
_label=[self.direction])
graph_png = graph.build_bar_graph_horizontal()
@@ -1894,7 +1938,7 @@ def generate_report(self, ftp_data, date, input_setup_info, test_rig, test_tag,
_color_name=['steelblue'],
_show_bar_value=True,
_enable_csv=True,
- _graph_image_name="ucg-avg", _color_edge=['black'],
+ _graph_image_name="ucg-avg_ftp", _color_edge=['black'],
_color=['steelblue'],
_label=[self.direction])
graph_png = graph.build_bar_graph_horizontal()
@@ -1905,6 +1949,31 @@ def generate_report(self, ftp_data, date, input_setup_info, test_rig, test_tag,
self.report.set_csv_filename(graph_png)
self.report.move_csv_file()
self.report.build_graph()
+ if(self.dowebgui and self.get_live_view):
+ for floor in range(0,int(self.total_floors)):
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ throughput_image_path = os.path.join(script_dir, "heatmap_images", f"ftp_{self.test_name}_{floor+1}.png")
+ # rssi_image_path = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_rssi_{floor+1}.png")
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(throughput_image_path)):
+ if time.time() - start_time > timeout:
+ print("Timeout: Images not found within 60 seconds.")
+ break
+ time.sleep(1)
+ while not os.path.exists(throughput_image_path):
+ if os.path.exists(throughput_image_path):
+ break
+ # time.sleep(10)
+ if os.path.exists(throughput_image_path):
+ self.report.set_custom_html('')
+ self.report.build_custom()
+ # self.report.set_custom_html("Average Throughput Heatmap:
")
+ # self.report.build_custom()
+ self.report.set_custom_html(f'
')
+ self.report.build_custom()
+ # os.remove(throughput_image_path)
self.report.set_obj_html("File Download Time (sec)", "The below table will provide information of "
"minimum, maximum and the average time taken by clients to download a file in seconds")
self.report.build_objective()
@@ -1949,7 +2018,8 @@ def generate_report(self, ftp_data, date, input_setup_info, test_rig, test_tag,
" No of times File downloaded ": self.url_data,
" Time Taken to Download file (ms)": self.uc_avg,
" Bytes-rd (Mega Bytes)": self.bytes_rd,
- " RX RATE (Mbps) ": self.rx_rate
+ " RX RATE (Mbps) ": self.rx_rate,
+ "Failed Urls": self.total_err
}
if self.expected_passfail_val or self.csv_name:
dataframe[" Expected output "] = self.test_input_list
@@ -1978,6 +2048,12 @@ def generate_report(self, ftp_data, date, input_setup_info, test_rig, test_tag,
logger.info("returned file {}".format(html_file))
logger.info(html_file)
self.report.write_pdf()
+ if(self.get_live_view):
+ folder_path = os.path.join(script_dir, "heatmap_images")
+ for f in os.listdir(folder_path):
+ file_path = os.path.join(folder_path, f)
+ if os.path.isfile(file_path):
+ os.remove(file_path)
# The following lines can be used when the kpi results are needed
# self.kpi_results
@@ -2311,6 +2387,67 @@ def generate_dataframe(self, groupdevlist: List[str], clients_list: List[str], m
else:
return None
+ def monitor_cx(self):
+ """
+ This function waits for upto 20 iterations to allow all CXs (connections) to be created.
+
+ If some CXs are still not created after 20 iterations, then the CXs related to that device are removed,
+ along with their associated client and MAC entries from all relevant lists.
+ """
+ max_retry = 20
+ current_retry = 0
+ failed_cx = []
+ flag = 0
+ idx_list = []
+ del_device_list = []
+ del_mac_list = []
+ del_input_devices_list = []
+ del_device_list1 = []
+ del_real_client_list = []
+ while current_retry < max_retry:
+ failed_cx.clear()
+ idx_list.clear()
+ del_device_list.clear()
+ del_mac_list.clear()
+ del_input_devices_list.clear()
+ del_device_list1.clear()
+ del_real_client_list.clear()
+ created_cx_list = list(self.cx_list)
+ for i, created_cxs in enumerate(created_cx_list):
+ try:
+ _ = self.local_realm.json_get("layer4/%s/list?fields=%s" %
+ (created_cxs, 'status'))['endpoint']['status']
+ except BaseException:
+ logger.error(f'cx not created for {self.input_devices_list[i]}')
+ failed_cx.append(created_cxs)
+ del_device_list.append(self.device_list[i])
+ del_mac_list.append(self.mac_id_list[i])
+ del_input_devices_list.append(self.input_devices_list[i])
+ del_device_list1.append(self.real_client_list1[i])
+ del_real_client_list.append(self.real_client_list[i])
+ if len(failed_cx) == 0:
+ flag = 1
+ break
+ logger.info(f'Try {current_retry} out of 20: Waiting for the cross-connections to be created.')
+ time.sleep(2)
+ current_retry += 1
+
+ if flag:
+ logger.info('cross connections found for all devices')
+ return
+ for cx in failed_cx:
+ self.cx_list.remove(cx)
+ for i in range(len(del_input_devices_list)):
+ logger.info(f'Cross connection not created for {self.input_devices_list[i]}')
+ self.input_devices_list.remove(del_input_devices_list[i])
+ self.mac_id_list.remove(del_mac_list[i])
+ self.device_list.remove(del_device_list[i])
+ self.real_client_list1.remove(del_device_list1[i])
+ self.real_client_list.remove(del_real_client_list[i])
+ if len(self.input_devices_list) == 0:
+ logger.error('No cross connections created, aborting test')
+ exit(1)
+
def validate_args(args):
"""Validate CLI arguments."""
@@ -2578,6 +2715,8 @@ def main():
optional.add_argument("--pk_passwd", type=str, default='NA', help='Specify the password for the private key')
optional.add_argument("--pac_file", type=str, default='NA', help='Specify the pac file name')
+ optional.add_argument('--get_live_view', help="If true will heatmap will be generated from testhouse automation WebGui ", action='store_true')
+ optional.add_argument('--total_floors', help="Total floors from testhouse automation WebGui ", default="0")
# logging configuration
optional.add_argument(
"--lf_logger_config_json",
@@ -2706,7 +2845,9 @@ def pass_fail_duration(band, file_size):
expected_passfail_val=args.expected_passfail_value,
csv_name=args.device_csv_name,
wait_time=args.wait_time,
- config=args.config
+ config=args.config,
+ get_live_view= args.get_live_view,
+ total_floors = args.total_floors
)
interation_num = interation_num + 1
@@ -2743,6 +2884,9 @@ def pass_fail_duration(band, file_size):
logger.info(obj.get_fail_message())
exit(1)
+ if obj.clients_type == 'Real':
+ obj.monitor_cx()
+ logger.info(f'Test started on the devices : {obj.input_devices_list}')
# First time stamp
time1 = datetime.now()
logger.info("Traffic started running at %s", time1)
@@ -2793,6 +2937,13 @@ def pass_fail_duration(band, file_size):
"Security": args.security,
"Contact": "support@candelatech.com"
}
+ if args.dowebgui:
+ obj.data_for_webui["status"] = ["STOPPED"] * len(obj.url_data)
+
+ df1 = pd.DataFrame(obj.data_for_webui)
+ df1.to_csv('{}/ftp_datavalues.csv'.format(obj.result_dir), index=False)
+ # copying to home directory i.e home/user_name
+ # obj.copy_reports_to_home_dir()
# Report generation when groups are specified
if args.group_name:
obj.generate_report(ftp_data, date, input_setup_info, test_rig=args.test_rig,
@@ -2807,15 +2958,19 @@ def pass_fail_duration(band, file_size):
dut_sw_version=args.dut_sw_version, dut_model_num=args.dut_model_num,
dut_serial_num=args.dut_serial_num, test_id=args.test_id,
bands=args.bands, csv_outfile=args.csv_outfile, local_lf_report_dir=args.local_lf_report_dir)
-# FOR WEB-UI // to fetch the last logs of the execution.
- if args.dowebgui:
- obj.data_for_webui["status"] = ["STOPPED"] * len(obj.url_data)
- df1 = pd.DataFrame(obj.data_for_webui)
- df1.to_csv('{}/ftp_datavalues.csv'.format(obj.result_dir), index=False)
- # copying to home directory i.e home/user_name
+ if args.dowebgui:
obj.copy_reports_to_home_dir()
+# FOR WEB-UI // to fetch the last logs of the execution.
+ # if args.dowebgui:
+ # obj.data_for_webui["status"] = ["STOPPED"] * len(obj.url_data)
+
+ # df1 = pd.DataFrame(obj.data_for_webui)
+ # df1.to_csv('{}/ftp_datavalues.csv'.format(obj.result_dir), index=False)
+ # # copying to home directory i.e home/user_name
+ # obj.copy_reports_to_home_dir()
+
if __name__ == '__main__':
main()
diff --git a/py-scripts/lf_interop_ping.py b/py-scripts/lf_interop_ping.py
index ef20172d3..65d926b51 100755
--- a/py-scripts/lf_interop_ping.py
+++ b/py-scripts/lf_interop_ping.py
@@ -629,7 +629,6 @@ def generate_report(self, result_json=None, result_dir='Ping_Test_Report', repor
report.set_csv_filename(graph_png)
report.move_csv_file()
report.build_graph()
-
if self.real:
# Calculating the pass/fail criteria when either expected_passfail_val or csv_name is provided
if self.expected_passfail_val or self.csv_name:
diff --git a/py-scripts/lf_interop_ping_plotter.py b/py-scripts/lf_interop_ping_plotter.py
old mode 100755
new mode 100644
index d5abd8939..7fb6df0bf
--- a/py-scripts/lf_interop_ping_plotter.py
+++ b/py-scripts/lf_interop_ping_plotter.py
@@ -154,7 +154,9 @@ def __init__(self,
server_ip=None,
expected_passfail_val=None,
csv_name=None,
- wait_time=60):
+ wait_time=60,
+ floors=None,
+ get_live_view=None):
super().__init__(lfclient_host=host,
lfclient_port=port)
self.host = host
@@ -199,6 +201,8 @@ def __init__(self,
self.expected_passfail_val = expected_passfail_val
self.csv_name = csv_name
self.wait_time = wait_time
+ self.floors = floors
+ self.get_live_view = get_live_view
def change_target_to_ip(self):
@@ -1053,6 +1057,41 @@ def generate_report(self, result_json=None, result_dir='Ping_Plotter_Test_Report
report.move_csv_file()
report.build_graph()
+ if self.do_webUI and self.get_live_view:
+ test_name = os.path.basename(self.ui_report_dir)
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+
+ for floor in range(int(self.floors)):
+ # Construct expected image paths
+ packet_sent_image = os.path.join(script_dir, "heatmap_images", f"{test_name}_ping_packet_sent_{floor+1}.png")
+ packet_recv_image = os.path.join(script_dir, "heatmap_images", f"{test_name}_ping_packet_recv_{floor+1}.png")
+ packet_loss_image = os.path.join(script_dir, "heatmap_images", f"{test_name}_ping_packet_loss_{floor+1}.png")
+
+ # Wait for all required images to be generated (up to timeout)
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(packet_sent_image) and os.path.exists(packet_recv_image) and os.path.exists(packet_loss_image)):
+ if time.time() - start_time > timeout:
+ print(f"Timeout: Heatmap images for floor {floor + 1} not found within {timeout} seconds.")
+ break
+ time.sleep(1)
+
+ report.set_custom_html("Ping Packet Sent vs Recevied vs Lost:
")
+ report.build_custom()
+
+ # Generate report sections for each image if it exists
+ for image_path in [packet_sent_image, packet_recv_image, packet_loss_image]:
+ if os.path.exists(image_path):
+ # report.set_custom_html('''
+ #
+ #

+ #
+ # '''.format(image_path))
+ report.set_custom_html(f'
')
+ report.build_custom()
+
+
dataframe1 = pd.DataFrame({
'Wireless Client': self.device_names,
'MAC': self.device_mac,
@@ -1481,6 +1520,16 @@ def main():
action="store_true",
help='specify this flag to stop cleaning up generic cxs after the test')
+ optional.add_argument('--get_live_view',
+ action="store_true",
+ help='specify this flag to get the liveview of the devices')
+
+ optional.add_argument('--floors',
+ type=int,
+ default=0,
+ help='specify the Number of floors there in the house')
+
+
# webUI arguments
webUI_args.add_argument('--do_webUI',
action='store_true',
@@ -1637,7 +1686,7 @@ def main():
# ping object creation
ping = Ping(host=mgr_ip, port=mgr_port, ssid=ssid, security=security, password=password, radio=radio,
lanforge_password=mgr_password, target=target, interval=interval, sta_list=[], virtual=args.virtual, real=args.real, duration=report_duration, do_webUI=do_webUI, debug=debug,
- ui_report_dir=ui_report_dir, csv_name=args.device_csv_name, expected_passfail_val=args.expected_passfail_value, wait_time=args.wait_time, group_name=group_name)
+ ui_report_dir=ui_report_dir, csv_name=args.device_csv_name, expected_passfail_val=args.expected_passfail_value, wait_time=args.wait_time, group_name=group_name, floors=args.floors, get_live_view=args.get_live_view)
# creating virtual stations if --virtual flag is specified
if args.virtual:
@@ -2200,6 +2249,9 @@ def main():
logging.info(ping.result_json)
+ if ping.do_webUI:
+ ping.set_webUI_stop()
+
if args.local_lf_report_dir == "":
if args.group_name:
ping.generate_report(config_devices=config_devices, group_device_map=group_device_map)
@@ -2214,8 +2266,6 @@ def main():
if ping.do_webUI:
# copying to home directory i.e home/user_name
ping.copy_reports_to_home_dir()
- if ping.do_webUI:
- ping.set_webUI_stop()
# print('----',rtts)
# station post cleanup
if not args.no_cleanup:
diff --git a/py-scripts/lf_interop_qos.py b/py-scripts/lf_interop_qos.py
index 744663ac6..dae5ee708 100755
--- a/py-scripts/lf_interop_qos.py
+++ b/py-scripts/lf_interop_qos.py
@@ -93,6 +93,7 @@
import shutil
import asyncio
import csv
+import re
from datetime import datetime, timedelta
from collections import defaultdict
@@ -131,7 +132,7 @@ def __init__(self,
group_name=None,
port=8080,
test_name=None,
- device_list=[],
+ device_list=None,
result_dir=None,
ap_name="",
traffic_type=None,
@@ -146,9 +147,24 @@ def __init__(self,
_exit_on_fail=False,
dowebgui=False,
ip="localhost",
- user_list=[], real_client_list=[], real_client_list1=[], hw_list=[], laptop_list=[], android_list=[], mac_list=[], windows_list=[], linux_list=[],
- total_resources_list=[], working_resources_list=[], hostname_list=[], username_list=[], eid_list=[],
- devices_available=[], input_devices_list=[], mac_id1_list=[], mac_id_list=[],
+ user_list=None,
+ real_client_list=None,
+ real_client_list1=None,
+ hw_list=None,
+ laptop_list=None,
+ android_list=None,
+ mac_list=None,
+ windows_list=None,
+ linux_list=None,
+ total_resources_list=None,
+ working_resources_list=None,
+ hostname_list=None,
+ username_list=None,
+ eid_list=None,
+ devices_available=None,
+ input_devices_list=None,
+ mac_id1_list=None,
+ mac_id_list=None,
eap_method=None,
eap_identity=None,
ieee80211=None,
@@ -170,15 +186,17 @@ def __init__(self,
csv_direction=None,
expected_passfail_val=None,
csv_name=None,
- wait_time=60):
+ wait_time=60,
+ get_live_view=False,
+ total_floors=0):
super().__init__(lfclient_host=host,
- lfclient_port=port),
+ lfclient_port=port)
self.ssid_list = []
self.upstream = upstream
self.host = host
self.port = port
self.test_name = test_name
- self.device_list = device_list
+ self.device_list = device_list if device_list else []
self.result_dir = result_dir
self.ssid = ssid
self.security = security
@@ -208,24 +226,24 @@ def __init__(self,
self.cx_profile.side_a_max_bps = side_a_max_rate
self.cx_profile.side_b_min_bps = side_b_min_rate
self.cx_profile.side_b_max_bps = side_b_max_rate
- self.hw_list = hw_list
- self.laptop_list = laptop_list
- self.android_list = android_list
- self.mac_list = mac_list
- self.windows_list = windows_list
- self.linux_list = linux_list
- self.total_resources_list = total_resources_list
- self.working_resources_list = working_resources_list
- self.hostname_list = hostname_list
- self.username_list = username_list
- self.eid_list = eid_list
- self.devices_available = devices_available
- self.input_devices_list = input_devices_list
- self.real_client_list = real_client_list
- self.real_client_list1 = real_client_list1
- self.user_list = user_list
- self.mac_id_list = mac_id_list
- self.mac_id1_list = mac_id1_list
+ self.hw_list = hw_list if hw_list else []
+ self.laptop_list = laptop_list if laptop_list else []
+ self.android_list = android_list if android_list else []
+ self.mac_list = mac_list if mac_list else []
+ self.windows_list = windows_list if windows_list else []
+ self.linux_list = linux_list if linux_list else []
+ self.total_resources_list = total_resources_list if total_resources_list else []
+ self.working_resources_list = working_resources_list if working_resources_list else []
+ self.hostname_list = hostname_list if hostname_list else []
+ self.username_list = username_list if username_list else []
+ self.eid_list = eid_list if eid_list else []
+ self.devices_available = devices_available if devices_available else []
+ self.input_devices_list = input_devices_list if input_devices_list else []
+ self.real_client_list = real_client_list if real_client_list else []
+ self.real_client_list1 = real_client_list1 if real_client_list1 else []
+ self.user_list = user_list if user_list else []
+ self.mac_id_list = mac_id_list if mac_id_list else []
+ self.mac_id1_list = mac_id1_list if mac_id1_list else []
self.dowebgui = dowebgui
self.ip = ip
self.device_found = False
@@ -255,6 +273,8 @@ def __init__(self,
self.wait_time = wait_time
self.group_device_map = {}
self.config = config
+ self.get_live_view = get_live_view
+ self.total_floors = total_floors
def os_type(self):
response = self.json_get("/resource/all")
@@ -568,6 +588,66 @@ def create_cx(self):
count += 1
logger.info("cross connections with TOS type created.")
+ def monitor_cx(self):
+ """
+ This function waits for up to 20 iterations to allow all CXs (connections) to be created.
+
+ If some CXs are still not created after 20 iterations, then the CXs related to that device are removed,
+ along with their associated client and MAC entries from all relevant lists.
+ """
+
+ max_retry = 20
+ current_retry = 0
+ while current_retry < max_retry:
+ not_running_cx = []
+ overallresponse = self.json_get('/cx/all') # Get all current CXs from the layer-3 tab
+ created_cx_list = list(self.cx_profile.created_cx.keys())
+ l3_existing_cx = list(overallresponse.keys())
+ count_of_cx = 0
+ for created_cxs in created_cx_list:
+ if created_cxs in l3_existing_cx:
+ count_of_cx += 1
+ else:
+ # Extract base device name (e.g., from '1.16androidsamsunga7_UDP_UL_BE-8' to '1.16androidsamsunga7')
+ # to track the whole device if any TOS-based CX fails.
+ not_running_cx.append(created_cxs.split('_')[0]) # CX was not created
+ if count_of_cx == len(created_cx_list):
+ break
+ logger.info(f"Try {current_retry + 1} out of 20: Waiting for the cross-connection to be created.")
+ time.sleep(2)
+ current_retry += 1
+ cxs_to_remove = []
+
+ # Collect all CXs related to the failed device (from `not_running_cx`),
+ # including those created for other TOS types, and add them to `cxs_to_remove`.
+ for cx in self.cx_profile.created_cx:
+ for not_created_cx in not_running_cx:
+ if not_created_cx in cx:
+ cxs_to_remove.append(cx)
+
+ # Remove each failed CX and delete it from the created CX tracking dictionary.
+ for cx in cxs_to_remove:
+ logger.info(f"Removing failed CX: {cx}")
+ super().rm_cx(cx)
+ del self.cx_profile.created_cx[cx]
+
+ devices_to_be_removed = []
+ for item in not_running_cx:
+ match = re.match(r'^[0-9.]+', item)
+ if match:
+ devices_to_be_removed.append(match.group())
+
+ # If there are devices to remove, filter them out from all related client and MAC lists
+ # to keep the lists consistent with the currently considered devices.
+ if len(devices_to_be_removed) != 0:
+ self.real_client_list1 = [item for item in self.real_client_list1 if item.split()[0] not in devices_to_be_removed]
+ self.input_devices_list = [item for item in self.input_devices_list if item.split('.')[0] + '.' + item.split('.')[1] not in devices_to_be_removed]
+ filtered = [(dev, mac) for dev, mac in zip(self.real_client_list, self.mac_id_list) if dev.split()[0] not in devices_to_be_removed]
+ self.real_client_list, self.mac_id_list = zip(*filtered) if filtered else ([], [])
+ self.real_client_list = list(self.real_client_list)
+ self.mac_id_list = list(self.mac_id_list)
+ self.num_stations = len(self.real_client_list)
+
def monitor(self):
# TODO: Fix this. This is poor style
throughput, upload, download, upload_throughput, download_throughput, connections_upload, connections_download, avg_upload, avg_download, avg_upload_throughput, avg_download_throughput, connections_download_avg, connections_upload_avg, avg_drop_a, avg_drop_b, dropa_connections, dropb_connections = { # noqa: E501
@@ -641,6 +721,11 @@ def monitor(self):
time_break = 0
# Added background_run to allow the test to continue running, bypassing the duration limit for nile requirement.
rates_data = defaultdict(list)
+ individual_device_data = {}
+ cx_list = list(self.cx_profile.created_cx.keys())
+ for cx in cx_list:
+ columns = ['bps rx a', 'bps rx b']
+ individual_device_data[cx] = pd.DataFrame(columns=columns)
while datetime.now() < end_time or getattr(self, "background_run", None):
index += 1
current_time = datetime.now()
@@ -648,6 +733,7 @@ def monitor(self):
t_response = {}
overallresponse = self.json_get('/cx/all')
+ # rssi_list = {}
try:
# for dynamic data, taken rx rate (last) from layer3 endp tab
l3_endp_data = list(self.json_get('/endp/list?fields=rx rate (last),rx drop %25,name')['endpoint'])
@@ -658,6 +744,7 @@ def monitor(self):
rates_data['.'.join(port.split('.')[:2]) + ' rx_rate'].append(port_data['rx-rate'])
rates_data['.'.join(port.split('.')[:2]) + ' tx_rate'].append(port_data['tx-rate'])
rates_data['.'.join(port.split('.')[:2]) + ' RSSI'].append(port_data['signal'])
+ # rssi_list[self.input_devices_list.index(port)] = port_data['signal']
cx_list = list(self.cx_profile.created_cx.keys())
# t_response data order - [rx rate(last)_A,rx rate(last)_B,rx drop % A,rx drop %B] A or B will considered based upon the name in L3 Endps tab
for cx in cx_list:
@@ -798,6 +885,11 @@ def monitor(self):
self.df_for_webui.append(self.overall[-1])
previous_time = current_time
if self.dowebgui == "True":
+ for key,value in t_response.items():
+ row_data = [value[0],value[1]]
+ individual_device_data[key].loc[len(individual_device_data[key])] = row_data
+ for port, df in individual_device_data.items():
+ df.to_csv(f"{runtime_dir}/{port}.csv", index=False)
df1 = pd.DataFrame(self.df_for_webui)
df1.to_csv('{}/overall_throughput.csv'.format(runtime_dir), index=False)
@@ -925,16 +1017,21 @@ def evaluate_qos(self, connections_download, connections_upload, drop_a_per, dro
temp = sta.rsplit('-', 1)
current_tos = temp[0].split('_')[-1] # slicing TOS from CX name
temp = int(temp[1])
- if int(self.cx_profile.side_b_min_bps) != 0:
- tos_download[current_tos].append(connections_download[sta])
- tos_drop_dict['rx_drop_a'][current_tos].append(drop_a_per[temp])
- tx_b_download[current_tos].append(int(f"{tx_endps_download['%s-B' % sta]['tx pkts ll']}"))
- rx_a_download[current_tos].append(int(f"{rx_endps_download['%s-A' % sta]['rx pkts ll']}"))
- else:
- tos_download[current_tos].append(float(0))
- tos_drop_dict['rx_drop_a'][current_tos].append(float(0))
- tx_b_download[current_tos].append(int(0))
- rx_a_download[current_tos].append(int(0))
+ counter = 0
+ try:
+ if int(self.cx_profile.side_b_min_bps) != 0:
+ tos_download[current_tos].append(connections_download[sta])
+ tos_drop_dict['rx_drop_a'][current_tos].append(drop_a_per[counter])
+ tx_b_download[current_tos].append(int(f"{tx_endps_download['%s-B' % sta]['tx pkts ll']}"))
+ rx_a_download[current_tos].append(int(f"{rx_endps_download['%s-A' % sta]['rx pkts ll']}"))
+ else:
+ tos_download[current_tos].append(float(0))
+ tos_drop_dict['rx_drop_a'][current_tos].append(float(0))
+ tx_b_download[current_tos].append(int(0))
+ rx_a_download[current_tos].append(int(0))
+ except:
+ logger.info(f'{sta}-A/B : CX Not Found')
+ counter += 1
tos_download.update({"bkQOS": float(f"{sum(tos_download['BK']):.2f}")})
tos_download.update({"beQOS": float(f"{sum(tos_download['BE']):.2f}")})
tos_download.update({"videoQOS": float(f"{sum(tos_download['VI']):.2f}")})
@@ -956,9 +1053,10 @@ def evaluate_qos(self, connections_download, connections_upload, drop_a_per, dro
temp = sta.rsplit('-', 1)
current_tos = temp[0].split('_')[-1]
temp = int(temp[1])
+ counter = 0
if int(self.cx_profile.side_a_min_bps) != 0:
tos_upload[current_tos].append(connections_upload[sta])
- tos_drop_dict['rx_drop_b'][current_tos].append(drop_b_per[temp])
+ tos_drop_dict['rx_drop_b'][current_tos].append(drop_b_per[counter])
tx_b_upload[current_tos].append(int(f"{tx_endps_upload['%s-B' % sta]['tx pkts ll']}"))
rx_a_upload[current_tos].append(int(f"{rx_endps_upload['%s-A' % sta]['rx pkts ll']}"))
else:
@@ -966,6 +1064,7 @@ def evaluate_qos(self, connections_download, connections_upload, drop_a_per, dro
tos_drop_dict['rx_drop_b'][current_tos].append(float(0))
tx_b_upload[current_tos].append(int(0))
rx_a_upload[current_tos].append(int(0))
+ counter += 1
tos_upload.update({"bkQOS": float(f"{sum(tos_upload['BK']):.2f}")})
tos_upload.update({"beQOS": float(f"{sum(tos_upload['BE']):.2f}")})
tos_upload.update({"videoQOS": float(f"{sum(tos_upload['VI']):.2f}")})
@@ -1216,11 +1315,18 @@ def generate_report(self, data, input_setup_info, connections_download_avg, conn
report.build_graph()
self.generate_individual_graph(res, report, connections_download_avg, connections_upload_avg, avg_drop_a, avg_drop_b)
report.test_setup_table(test_setup_data=input_setup_info, value="Information")
- report.build_custom()
report.build_footer()
report.write_html()
report.write_pdf()
+ # if(self.get_live_view):
+ # script_dir = os.path.dirname(os.path.abspath(__file__))
+ # folder_path = os.path.join(script_dir, "heatmap_images")
+ # for f in os.listdir(folder_path):
+ # file_path = os.path.join(folder_path, f)
+ # if os.path.isfile(file_path):
+ # os.remove(file_path)
+
# Generates a separate table in the report for each group, including its respective devices.
def generate_dataframe(self, groupdevlist, clients_list, mac, ssid, tos, upload, download, individual_upload,
individual_download, test_input, individual_drop_b, individual_drop_a, pass_fail_list):
@@ -1317,13 +1423,61 @@ def generate_dataframe(self, groupdevlist, clients_list, mac, ssid, tos, upload,
else:
return None
- def generate_individual_graph(self, res, report, connections_download_avg, connections_upload_avg, avg_drop_a, avg_drop_b):
+ def get_live_view_images(self, multicast_exists=False):
+ image_paths_by_tos = {} # { "BE": [img1, img2, ...], "VO": [...], ... }
+ rssi_image_paths_by_floor = {} if not multicast_exists else {} # Empty if skipping RSSI
+ print('tos tos', self.tos)
+
+ for floor in range(int(self.total_floors)):
+ for tos in self.tos:
+ timeout = 60 # seconds
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+
+ throughput_image_path = os.path.join(
+ script_dir, "heatmap_images", f"{self.test_name}_throughput_{tos}_{floor+1}.png"
+ )
+
+ if not multicast_exists:
+ rssi_image_path = os.path.join(
+ script_dir, "heatmap_images", f"{self.test_name}_rssi_{floor+1}.png"
+ )
+
+ start_time = time.time()
+
+ while True:
+ throughput_ready = os.path.exists(throughput_image_path)
+ rssi_ready = True if multicast_exists else os.path.exists(rssi_image_path)
+
+ if throughput_ready and rssi_ready:
+ break
+
+ if time.time() - start_time > timeout:
+ print(f"Timeout: Images for TOS '{tos}' on Floor {floor+1} not found within 60 seconds.")
+ break
+ time.sleep(1)
+
+ if throughput_ready:
+ image_paths_by_tos.setdefault(tos, []).append(throughput_image_path)
+
+ # Only check and store RSSI if not multicast
+ if not multicast_exists and os.path.exists(rssi_image_path):
+ rssi_image_paths_by_floor[floor + 1] = rssi_image_path
+
+ return image_paths_by_tos, rssi_image_paths_by_floor
+
+
+
+ def generate_individual_graph(self, res, report, connections_download_avg, connections_upload_avg, avg_drop_a, avg_drop_b,totalfloors=None,multicast_exists=False,graph_no=''):
+ if totalfloors!=None:
+ self.total_floors = totalfloors
load = ""
upload_list, download_list, individual_upload_list, individual_download_list = [], [], [], []
individual_set, colors, labels = [], [], []
individual_drop_a_list, individual_drop_b_list = [], []
list1 = [[], [], [], []]
data_set = {}
+ if (self.dowebgui and self.get_live_view) or multicast_exists:
+ tos_images,rssi_images = self.get_live_view_images()
# Initialized dictionaries to store average upload ,download and drop values with respect to tos
avg_res = {'Upload': {
'VO': [],
@@ -1479,7 +1633,7 @@ def generate_individual_graph(self, res, report, connections_download_avg, conne
_color_name=colors,
_show_bar_value=True,
_enable_csv=True,
- _graph_image_name="bk_{}".format(self.direction), _color_edge=['black'],
+ _graph_image_name="bk_{}{}".format(self.direction,graph_no), _color_edge=['black'],
_color=colors)
graph_png = graph.build_bar_graph_horizontal()
print("graph name {}".format(graph_png))
@@ -1489,6 +1643,14 @@ def generate_individual_graph(self, res, report, connections_download_avg, conne
report.set_csv_filename(graph_png)
report.move_csv_file()
report.build_graph()
+ if (self.dowebgui and self.get_live_view) or multicast_exists:
+ for image_path in tos_images['BK']:
+ report.set_custom_html('')
+ report.build_custom()
+ # report.set_custom_html("Average Throughput Heatmap:
")
+ # report.build_custom()
+ report.set_custom_html(f'
')
+ report.build_custom()
individual_avgupload_list = []
individual_avgdownload_list = []
for i in range(len(individual_upload_list)):
@@ -1559,6 +1721,15 @@ def generate_individual_graph(self, res, report, connections_download_avg, conne
dataframe1 = pd.DataFrame(bk_dataframe)
report.set_table_dataframe(dataframe1)
report.build_table()
+ # if (self.dowebgui and self.get_live_view) or multicast_exists:
+ # for image_path in tos_images['BK']:
+ # report.set_custom_html('')
+ # report.build_custom()
+ # # report.set_custom_html("Average Throughput Heatmap:
")
+ # # report.build_custom()
+ # report.set_custom_html(f'
')
+ # report.build_custom()
+
logger.info("Graph and table for BK tos are built")
if "BE" in self.tos:
if self.direction == "Bi-direction":
@@ -1602,7 +1773,7 @@ def generate_individual_graph(self, res, report, connections_download_avg, conne
_color_name=colors,
_show_bar_value=True,
_enable_csv=True,
- _graph_image_name="be_{}".format(self.direction), _color_edge=['black'],
+ _graph_image_name="be_{}{}".format(self.direction,graph_no), _color_edge=['black'],
_color=colors)
graph_png = graph.build_bar_graph_horizontal()
print("graph name {}".format(graph_png))
@@ -1612,6 +1783,14 @@ def generate_individual_graph(self, res, report, connections_download_avg, conne
report.set_csv_filename(graph_png)
report.move_csv_file()
report.build_graph()
+ if (self.dowebgui and self.get_live_view) or multicast_exists:
+ for image_path in tos_images['BE']:
+ report.set_custom_html('')
+ report.build_custom()
+ # report.set_custom_html("Average Throughput Heatmap:
")
+ # report.build_custom()
+ report.set_custom_html(f'
')
+ report.build_custom()
individual_avgupload_list = []
individual_avgdownload_list = []
for i in range(len(individual_upload_list)):
@@ -1680,6 +1859,14 @@ def generate_individual_graph(self, res, report, connections_download_avg, conne
dataframe2 = pd.DataFrame(be_dataframe)
report.set_table_dataframe(dataframe2)
report.build_table()
+ # if (self.dowebgui and self.get_live_view) or multicast_exists:
+ # for image_path in tos_images['BE']:
+ # report.set_custom_html('')
+ # report.build_custom()
+ # # report.set_custom_html("Average Throughput Heatmap:
")
+ # # report.build_custom()
+ # report.set_custom_html(f'
')
+ # report.build_custom()
logger.info("Graph and table for BE tos are built")
if "VI" in self.tos:
if self.direction == "Bi-direction":
@@ -1722,7 +1909,7 @@ def generate_individual_graph(self, res, report, connections_download_avg, conne
_show_bar_value=True,
_color_name=colors,
_enable_csv=True,
- _graph_image_name="video_{}".format(self.direction),
+ _graph_image_name="video_{}{}".format(self.direction,graph_no),
_color_edge=['black'],
_color=colors)
graph_png = graph.build_bar_graph_horizontal()
@@ -1733,6 +1920,14 @@ def generate_individual_graph(self, res, report, connections_download_avg, conne
report.set_csv_filename(graph_png)
report.move_csv_file()
report.build_graph()
+ if (self.dowebgui and self.get_live_view) or multicast_exists:
+ for image_path in tos_images['VI']:
+ report.set_custom_html('')
+ report.build_custom()
+ # report.set_custom_html("Average Throughput Heatmap:
")
+ # report.build_custom()
+ report.set_custom_html(f'
')
+ report.build_custom()
individual_avgupload_list = []
individual_avgdownload_list = []
for i in range(len(individual_upload_list)):
@@ -1801,6 +1996,14 @@ def generate_individual_graph(self, res, report, connections_download_avg, conne
dataframe3 = pd.DataFrame(vi_dataframe)
report.set_table_dataframe(dataframe3)
report.build_table()
+ # if (self.dowebgui and self.get_live_view) or multicast_exists:
+ # for image_path in tos_images['VI']:
+ # report.set_custom_html('')
+ # report.build_custom()
+ # # report.set_custom_html("Average Throughput Heatmap:
")
+ # # report.build_custom()
+ # report.set_custom_html(f'
')
+ # report.build_custom()
logger.info("Graph and table for VI tos are built")
if "VO" in self.tos:
if self.direction == "Bi-direction":
@@ -1843,7 +2046,7 @@ def generate_individual_graph(self, res, report, connections_download_avg, conne
_show_bar_value=True,
_color_name=colors,
_enable_csv=True,
- _graph_image_name="voice_{}".format(self.direction),
+ _graph_image_name="voice_{}{}".format(self.direction,graph_no),
_color_edge=['black'],
_color=colors)
graph_png = graph.build_bar_graph_horizontal()
@@ -1854,6 +2057,14 @@ def generate_individual_graph(self, res, report, connections_download_avg, conne
report.set_csv_filename(graph_png)
report.move_csv_file()
report.build_graph()
+ if (self.dowebgui and self.get_live_view) or multicast_exists:
+ for image_path in tos_images['VO']:
+ report.set_custom_html('')
+ report.build_custom()
+ # report.set_custom_html("Average Throughput Heatmap:
")
+ # report.build_custom()
+ report.set_custom_html(f'
')
+ report.build_custom()
individual_avgupload_list = []
individual_avgdownload_list = []
for i in range(len(individual_upload_list)):
@@ -1924,6 +2135,13 @@ def generate_individual_graph(self, res, report, connections_download_avg, conne
report.set_table_dataframe(dataframe4)
report.build_table()
logger.info("Graph and table for VO tos are built")
+ if self.dowebgui and self.get_live_view and not multicast_exists:
+ for floor,rssi_image_path in rssi_images.items():
+ if os.path.exists(rssi_image_path):
+ report.set_custom_html('')
+ report.build_custom()
+ report.set_custom_html(f'
')
+ report.build_custom()
else:
print("No individual graph to generate.")
# storing overall throughput CSV in the report directory
@@ -1934,8 +2152,13 @@ def generate_individual_graph(self, res, report, connections_download_avg, conne
for cx in self.real_time_data:
for tos in self.real_time_data[cx]:
if tos in self.tos and len(self.real_time_data[cx][tos]['time']) != 0:
- cx_df = pd.DataFrame(self.real_time_data[cx][tos])
- cx_df.to_csv('{}/{}_{}_realtime_data.csv'.format(report.path_date_time, cx, tos), index=False)
+ try:
+ cx_df = pd.DataFrame(self.real_time_data[cx][tos])
+ cx_df.to_csv('{}/{}_{}_realtime_data.csv'.format(report.path_date_time, cx, tos), index=False)
+ except:
+ logger.info(f'failed cx {cx} tos {tos}')
+ logger.info(f"overall Data {self.real_time_data[cx][tos]}")
+
def get_pass_fail_list(self, test_input_list, individual_avgupload_list, individual_avgdownload_list):
pass_fail_list = []
@@ -2216,6 +2439,8 @@ def main():
optional.add_argument('--device_csv_name', type=str, help='Enter the csv name to store expected values', default=None)
optional.add_argument("--wait_time", type=int, help="Enter the maximum wait time for configurations to apply", default=60)
optional.add_argument("--config", action="store_true", help="Specify for configuring the devices")
+ optional.add_argument('--get_live_view', help="If true will heatmap will be generated from testhouse automation WebGui ", action='store_true')
+ optional.add_argument('--total_floors', help="Total floors from testhouse automation WebGui ", default="0")
args = parser.parse_args()
# help summary
@@ -2310,7 +2535,9 @@ def main():
expected_passfail_val=args.expected_passfail_value,
csv_name=args.device_csv_name,
wait_time=args.wait_time,
- config=args.config
+ config=args.config,
+ get_live_view=args.get_live_view,
+ total_floors=args.total_floors
)
throughput_qos.os_type()
_, configured_device, _, configuration = throughput_qos.phantom_check()
@@ -2349,6 +2576,7 @@ def main():
df1.to_csv('{}/overall_throughput.csv'.format(throughput_qos.result_dir), index=False)
raise ValueError("Aborting the test....")
throughput_qos.build()
+ throughput_qos.monitor_cx()
throughput_qos.start(False, False)
time.sleep(10)
connections_download, connections_upload, drop_a_per, drop_b_per, connections_download_avg, connections_upload_avg, avg_drop_a, avg_drop_b = throughput_qos.monitor()
@@ -2365,6 +2593,22 @@ def main():
"contact": "support@candelatech.com"
}
throughput_qos.cleanup()
+
+ # Update webgui running json with latest entry and test status completed
+ if throughput_qos.dowebgui == "True":
+ last_entry = throughput_qos.overall[len(throughput_qos.overall) - 1]
+ last_entry["status"] = "Stopped"
+ last_entry["timestamp"] = datetime.now().strftime("%d/%m %I:%M:%S %p")
+ last_entry["remaining_time"] = "0"
+ last_entry["end_time"] = last_entry["timestamp"]
+ throughput_qos.df_for_webui.append(
+ last_entry
+ )
+ df1 = pd.DataFrame(throughput_qos.df_for_webui)
+ df1.to_csv('{}/overall_throughput.csv'.format(args.result_dir, ), index=False)
+
+ # copying to home directory i.e home/user_name
+ throughput_qos.copy_reports_to_home_dir()
if args.group_name:
throughput_qos.generate_report(
data=data,
@@ -2384,22 +2628,6 @@ def main():
avg_drop_a=avg_drop_a,
avg_drop_b=avg_drop_b)
- # Update webgui running json with latest entry and test status completed
- if throughput_qos.dowebgui == "True":
- last_entry = throughput_qos.overall[len(throughput_qos.overall) - 1]
- last_entry["status"] = "Stopped"
- last_entry["timestamp"] = datetime.now().strftime("%d/%m %I:%M:%S %p")
- last_entry["remaining_time"] = "0"
- last_entry["end_time"] = last_entry["timestamp"]
- throughput_qos.df_for_webui.append(
- last_entry
- )
- df1 = pd.DataFrame(throughput_qos.df_for_webui)
- df1.to_csv('{}/overall_throughput.csv'.format(args.result_dir, ), index=False)
-
- # copying to home directory i.e home/user_name
- throughput_qos.copy_reports_to_home_dir()
-
if __name__ == "__main__":
main()
diff --git a/py-scripts/lf_interop_throughput.py b/py-scripts/lf_interop_throughput.py
index aa4d83a8a..03db155b9 100755
--- a/py-scripts/lf_interop_throughput.py
+++ b/py-scripts/lf_interop_throughput.py
@@ -108,6 +108,11 @@
python3 lf_interop_throughput.py --mgr 192.168.204.74 --mgr_port 8080 --upstream_port eth1 --test_duration 1m --download 1000000 --traffic_type lf_udp
--ssid NETGEAR_2G_wpa2 --passwd Password@123 --security wpa2 --config --device_list 1.10,1.11,1.12 --do_interopability
+ EXAMPLE-9:
+ Command Line Interface to run the test without configuration
+ python3 lf_interop_throughput.py --mgr 192.168.204.74 --mgr_port 8080 --upstream_port eth0 --test_duration 30s --traffic_type lf_udp --ssid NETGEAR_2G_wpa2
+ --passwd Password@123 --security wpa2 --do_interopability --device_list 1.15,1.400 --download 10000000 --default_config
+
SCRIPT_CLASSIFICATION : Test
SCRIPT_CATEGORIES: Performance, Functional, Report Generation
@@ -149,6 +154,7 @@
import asyncio
import csv
import matplotlib.pyplot as plt
+import re
logger = logging.getLogger(__name__)
@@ -169,7 +175,7 @@
from lf_graph import lf_line_graph
from datetime import datetime, timedelta
-
+import requests
DeviceConfig=importlib.import_module("py-scripts.DeviceConfig")
lf_logger_config = importlib.import_module("py-scripts.lf_logger_config")
@@ -206,6 +212,9 @@ def __init__(self,
dowebgui=False,
precleanup=False,
do_interopability=False,
+ get_live_view=False,
+ total_floors=0,
+ default_config = False,
ip="localhost",
csv_direction='',
device_csv_name=None,
@@ -302,6 +311,8 @@ def __init__(self,
self.overall_avg_rssi = overall_avg_rssi if overall_avg_rssi is not None else []
self.dowebgui = dowebgui
self.do_interopability = do_interopability
+ self.get_live_view = get_live_view
+ self.total_floors = total_floors
self.ip = ip
self.device_found = False
self.gave_incremental = False
@@ -335,6 +346,9 @@ def __init__(self,
self.config = config
self.configdevices = {}
self.group_device_map = {}
+ self.config_dict = {}
+ self.configured_devices_check = {}
+ self.default_config = default_config
def os_type(self):
"""
@@ -372,16 +386,79 @@ def os_type(self):
self.android_list.append(hw_version)
self.laptop_list = self.windows_list + self.linux_list + self.mac_list
+ def disconnect_all_devices(self,devices_to_disconnect=[]):
+ """
+ Disconnects either all devices or a specific list of devices from Wi-Fi networks.
+ """
+ obj = DeviceConfig.DeviceConfig(lanforge_ip=self.host, file_name=self.file_name, wait_time=self.wait_time)
+ # all_devices = obj.get_all_devices()
+ # GET ANDROIDS FROM DEVICE LIST
+ adb_obj = DeviceConfig.ADB_DEVICES(lanforge_ip=self.host)
+
+ async def do_disconnect():
+ all_devices = obj.get_all_devices()
+ # TO DISCONNECT ALL DEVICES
+ if len(devices_to_disconnect) == 0:
+ android_resources = [d for d in all_devices if d.get('os') == 'Android' and d.get('eid') in self.device_list]
+ if(len(android_resources)>0):
+ # TO STOP APP FOR ALL DEVICES FOR ANDROIDS
+ await adb_obj.stop_app(port_list=android_resources)
+ # TO FORGET ALL NETWORKS FOR ALL OS TYPES
+ await obj.connectivity(device_list=self.device_list, wifi_config=self.config_dict, disconnect=True)
+ if(len(android_resources)>0):
+ adb_obj.set_wifi_state(port_list=android_resources, state = 'disable')
+
+ # TO DISCONNECT SPECIFIC DEVICES
+ else:
+ android_resources = [d for d in all_devices if d.get('os') == 'Android' and d.get('eid') in devices_to_disconnect]
+ if(len(android_resources)>0):
+ await adb_obj.stop_app(port_list=android_resources)
+ await obj.connectivity(device_list=devices_to_disconnect, wifi_config=self.config_dict, disconnect=True)
+ if(len(android_resources)>0):
+ adb_obj.set_wifi_state(port_list=android_resources, state = 'disable')
+
+ asyncio.run(do_disconnect())
+
+ def configure_specific(self,device_to_configure_list):
+ """
+ Configure specific devices using the provided list of device IDs or names.
+ """
+ obj = DeviceConfig.DeviceConfig(lanforge_ip=self.host, file_name=self.file_name, wait_time=self.wait_time)
+ all_devices = obj.get_all_devices()
+ android_resources = [d for d in all_devices if (d.get('os') == 'Android') and d.get('eid') in device_to_configure_list]
+ laptop_resources = [d for d in all_devices if (d.get('os') != 'Android' ) and '1.' + d.get('resource') in device_to_configure_list]
+ devices_connected = asyncio.run(obj.connectivity(device_list=device_to_configure_list, wifi_config=self.config_dict))
+ if len(devices_connected) > 0:
+ if android_resources:
+ self.configured_devices_check[android_resources[0]['user-name']] = True
+ elif laptop_resources:
+ self.configured_devices_check[laptop_resources[0]['hostname']] = True
+ return True
+ else:
+ if android_resources:
+ self.configured_devices_check[android_resources[0]['user-name']] = False
+ elif laptop_resources:
+ self.configured_devices_check[laptop_resources[0]['hostname']] = False
+ return False
+
+ def extract_digits_until_alpha(self,s):
+ """
+ Extracts digits (including decimals) from the start of a string until the first alphabet.
+ """
+ match = re.match(r'^[\d.]+', s)
+ return match.group() if match else ''
+
def phantom_check(self):
"""
Checks for non-phantom resources and ports, categorizes them, and prepares a list of available devices for testing.
"""
port_eid_list, same_eid_list, original_port_list = [], [], []
+ interop_response = self.json_get("/adb")
obj = DeviceConfig.DeviceConfig(lanforge_ip=self.host, file_name=self.file_name, wait_time=self.wait_time)
upstream_port_ip = self.change_port_to_ip(self.upstream)
config_devices = {}
- config_dict = {
+ self.config_dict = {
'ssid': self.ssid,
'passwd': self.password,
'enc': self.security,
@@ -422,7 +499,7 @@ def phantom_check(self):
self.device_list = self.device_list.split(',')
if self.config:
- self.device_list = asyncio.run(obj.connectivity(device_list=self.device_list, wifi_config=config_dict))
+ self.device_list = asyncio.run(obj.connectivity(device_list=self.device_list, wifi_config=self.config_dict))
# Configuration of devices with SSID , Password and Security when the device list is not specified
elif self.device_list == [] and self.config:
all_devices = obj.get_all_devices()
@@ -433,9 +510,9 @@ def phantom_check(self):
else:
device_list.append(device["shelf"] + '.' + device["resource"] + " " + device["serial"])
logger.info("AVAILABLE RESOURCES", device_list)
- self.device_list = input("Enter the desired resources to run the test:").split(',')
+ self.device_list = input("Select the desired resources to run the test:").split(',')
if self.config:
- self.device_list = asyncio.run(obj.connectivity(device_list=self.device_list, wifi_config=config_dict))
+ self.device_list = asyncio.run(obj.connectivity(device_list=self.device_list, wifi_config=self.config_dict))
# Retrieve all resources from the LANforge
response = self.json_get("/resource/all")
@@ -469,7 +546,15 @@ def phantom_check(self):
if b['kernel'] == '':
self.eid_list.append(b['eid'])
self.mac_list.append(b['hw version'])
- self.devices_available.append(b['eid'] + " " + 'iOS' + " " + b['hostname'])
+ if "devices" in interop_response.keys():
+ interop_devices = interop_response['devices']
+ if(len([v['user-name'] for d in interop_devices for k, v in d.items() if v.get('resource-id') == b['eid']]) == 0):
+ self.devices_available.append(b['eid'] + " " + 'iOS' + " " + b['hostname'])
+ else:
+ ios_username = [v['user-name'] for d in interop_devices for k, v in d.items() if v.get('resource-id') == b['eid']][0]
+ self.devices_available.append(b['eid'] + " " + 'iOS' + " " + ios_username)
+ else:
+ self.devices_available.append(b['eid'] + " " + 'iOS' + " " + b['hostname'])
else:
self.eid_list.append(b['eid'])
self.mac_list.append(b['hw version'])
@@ -516,7 +601,7 @@ def phantom_check(self):
configure_list = []
if len(self.device_list) == 0 and self.config == False and self.group_name is None:
logger.info("AVAILABLE DEVICES TO RUN TEST : {}".format(self.user_list))
- self.device_list = input("Enter the desired resources to run the test:").split(',')
+ self.device_list = input("Select the desired resources to run the test:").split(',')
# If self.device_list is provided, check availability against devices_available
if len(self.device_list) != 0:
devices_list = self.device_list
@@ -834,13 +919,16 @@ def get_layer3_endp_data(self):
[4]: Status of the Device ("Run" or "Stopped")
"""
cx_list_endp = []
+ cx_list_l3 = []
for i in self.cx_profile.created_cx.keys():
cx_list_endp.append(i + '-A')
cx_list_endp.append(i + '-B')
+ cx_list_l3.append(i)
# Fetch required throughput data from Lanforge
try:
# for dynamic data, taken rx rate lasts from layer3 endp tab
l3_endp_data = list(self.json_get('/endp/{}/list?fields=rx rate (last),rx drop %25,name,run,name'.format(','.join(cx_list_endp)))['endpoint'])
+ l3_cx_data = self.json_get('/cx/all')
except Exception as e:
cx_data = self.json_get('/cx/all/')
logger.info(cx_data)
@@ -851,7 +939,7 @@ def get_layer3_endp_data(self):
throughput = {}
# mapping the data based upon the cx_list order
for cx in cx_list:
- throughput[i] = [0, 0, 0, 0, "Stopped"]
+ throughput[i] = [0, 0, 0, 0, "Stopped",0]
for j in l3_endp_data:
key, value = next(iter(j.items()))
endp_a = cx + '-A'
@@ -864,13 +952,20 @@ def get_layer3_endp_data(self):
throughput[i][3] = value['rx drop %']
if value['name'] == endp_a or value['name'] == endp_b:
throughput[i][4] = 'Run' if value['run'] else 'Stopped'
+ for j in l3_cx_data:
+ if(j == "handler" or j == "uri"):
+ continue
+ # key, value = next(iter(j.items()))
+ if cx == l3_cx_data[j]['name']:
+ throughput[i][5] = l3_cx_data[j]['avg rtt']
i += 1
return throughput
- def monitor(self, iteration, individual_df, device_names, incremental_capacity_list, overall_start_time, overall_end_time):
+ def monitor(self, iteration, individual_df, device_names, incremental_capacity_list, overall_start_time, overall_end_time, is_device_configured):
individual_df_for_webui = individual_df.copy() # for webui
throughput, upload, download, upload_throughput, download_throughput, connections_upload, connections_download = {}, [], [], [], [], {}, {}
drop_a, drop_a_per, drop_b, drop_b_per, state, state_of_device = [], [], [], [], [], []
+ avg_rtt = []
test_stopped_by_user = False
if (self.test_duration is None) or (int(self.test_duration) <= 1):
raise ValueError("Monitor test duration should be > 1 second")
@@ -891,7 +986,7 @@ def monitor(self, iteration, individual_df, device_names, incremental_capacity_l
connections_download_realtime = dict.fromkeys(list(self.cx_profile.created_cx.keys()), float(0))
# Initialize lists for throughput and drops for each connection
- [(upload.append([]), download.append([]), drop_a.append([]), drop_b.append([]), state.append([])) for i in range(len(self.cx_profile.created_cx))]
+ [(upload.append([]), download.append([]), drop_a.append([]), drop_b.append([]), state.append([]), avg_rtt.append([])) for i in range(len(self.cx_profile.created_cx))]
# If using web GUI, set runtime directory
if self.dowebgui:
@@ -908,10 +1003,10 @@ def monitor(self, iteration, individual_df, device_names, incremental_capacity_l
if self.dowebgui:
time.sleep(1) # for each second data in csv while ensuring webgui
individual_df_data = []
- temp_upload, temp_download, temp_drop_a, temp_drop_b = [], [], [], []
+ temp_upload, temp_download, temp_drop_a, temp_drop_b, temp_avg_rtt = [], [], [], [], []
# Initialize temporary lists for each connection
- [(temp_upload.append([]), temp_download.append([]), temp_drop_a.append([]), temp_drop_b.append([])) for
+ [(temp_upload.append([]), temp_download.append([]), temp_drop_a.append([]), temp_drop_b.append([]), temp_avg_rtt.append([])) for
i in range(len(self.cx_profile.created_cx))]
# Populate temporary lists with current throughput data
@@ -921,12 +1016,13 @@ def monitor(self, iteration, individual_df, device_names, incremental_capacity_l
temp_download[i].append(0)
temp_drop_a[i].append(0)
temp_drop_b[i].append(0)
+ temp_avg_rtt[i].append(0)
else:
temp_upload[i].append(throughput[index][i][1])
temp_download[i].append(throughput[index][i][0])
temp_drop_a[i].append(throughput[index][i][2])
temp_drop_b[i].append(throughput[index][i][3])
-
+ temp_avg_rtt[i].append(throughput[index][i][5])
# Calculate average throughput and drop percentages
upload_throughput = [float(f"{(sum(i) / 1000000) / len(i): .2f}") for i in temp_upload]
download_throughput = [float(f"{(sum(i) / 1000000) / len(i): .2f}") for i in temp_download]
@@ -947,9 +1043,9 @@ def monitor(self, iteration, individual_df, device_names, incremental_capacity_l
" min" if int(overall_total_hours) != 0 or int(overall_remaining_minutes) != 0 else '<1 min'][0]
if remaining_minutes_instrf != '<1 min':
remaining_minutes_instrf = str(overall_time_difference).split(".")[0]
- # Storing individual device throughput data(download, upload, Rx % drop A, Rx % drop B) to dataframe
+ # Storing individual device throughput data(download, upload, Rx % drop , Tx % drop) to dataframe
for i in range(len(download_throughput)):
- individual_df_data.extend([download_throughput[i], upload_throughput[i], drop_a_per[i], drop_b_per[i], int(signal_list[i]), link_speed_list[i], rx_rate_list[i]])
+ individual_df_data.extend([download_throughput[i], upload_throughput[i], drop_a_per[i], drop_b_per[i], temp_avg_rtt[i][0], int(signal_list[i]), link_speed_list[i], rx_rate_list[i]])
# Storing Overall throughput data for all devices and also start time, end time, remaining time and status of test running
individual_df_data.extend([round(sum(download_throughput),
@@ -969,9 +1065,9 @@ def monitor(self, iteration, individual_df, device_names, incremental_capacity_l
if (current_time - previous_time).total_seconds() >= time_break:
individual_df_for_webui.loc[len(individual_df_for_webui)] = individual_df_data
if self.group_name is None:
- individual_df_for_webui.to_csv('{}/throughput_data.csv'.format(runtime_dir), index=False)
+ individual_df.to_csv('{}/throughput_data.csv'.format(runtime_dir), index=False)
else:
- individual_df_for_webui.to_csv('{}/overall_throughput.csv'.format(runtime_dir), index=False)
+ individual_df.to_csv('{}/overall_throughput.csv'.format(runtime_dir), index=False)
previous_time = current_time
# Append data to individual_df and save to CSV
@@ -1029,19 +1125,19 @@ def monitor(self, iteration, individual_df, device_names, incremental_capacity_l
for index, key in enumerate(throughput):
for i in range(len(throughput[key])):
- upload[i], download[i], drop_a[i], drop_b[i] = [], [], [], []
+ upload[i], download[i], drop_a[i], drop_b[i], avg_rtt[i] = [], [], [], [], []
if throughput[key][i][4] != 'Run':
upload[i].append(0)
download[i].append(0)
drop_a[i].append(0)
drop_b[i].append(0)
-
+ avg_rtt[i].append(0)
else:
upload[i].append(throughput[key][i][1])
download[i].append(throughput[key][i][0])
drop_a[i].append(throughput[key][i][2])
drop_b[i].append(throughput[key][i][3])
-
+ avg_rtt[i].append(throughput[key][i][5])
# Calculate average throughput and drop percentages
upload_throughput = [float(f"{(sum(i) / 1000000) / len(i): .2f}") for i in upload]
download_throughput = [float(f"{(sum(i) / 1000000) / len(i): .2f}") for i in download]
@@ -1059,9 +1155,9 @@ def monitor(self, iteration, individual_df, device_names, incremental_capacity_l
" min" if int(overall_total_hours) != 0 or int(overall_remaining_minutes) != 0 else '<1 min'][0]
if remaining_minutes_instrf != '<1 min':
remaining_minutes_instrf = str(overall_time_difference).split(".")[0]
- # Storing individual device throughput data(download, upload, Rx % drop A, Rx % drop B) to dataframe
+ # Storing individual device throughput data(download, upload, Rx % drop , Tx % drop) to dataframe
for i in range(len(download_throughput)):
- individual_df_data.extend([download_throughput[i], upload_throughput[i], drop_a_per[i], drop_b_per[i], int(signal_list[i]), link_speed_list[i], rx_rate_list[i]])
+ individual_df_data.extend([download_throughput[i], upload_throughput[i], drop_a_per[i], drop_b_per[i], avg_rtt[i][0], int(signal_list[i]), link_speed_list[i], rx_rate_list[i]])
# Storing Overall throughput data for all devices and also start time, end time, remaining time and status of test running
individual_df_data.extend([round(sum(download_throughput),
@@ -1085,20 +1181,23 @@ def monitor(self, iteration, individual_df, device_names, incremental_capacity_l
break
if not self.background_run and self.background_run is not None:
break
-
+ if not is_device_configured and not self.default_config:
+ break
for index, key in enumerate(throughput):
for i in range(len(throughput[key])):
- upload[i], download[i], drop_a[i], drop_b[i] = [], [], [], []
+ upload[i], download[i], drop_a[i], drop_b[i], avg_rtt[i] = [], [], [], [], []
if throughput[key][i][4] != 'Run':
upload[i].append(0)
download[i].append(0)
drop_a[i].append(0)
drop_b[i].append(0)
+ avg_rtt[i].append(0)
else:
upload[i].append(throughput[key][i][1])
download[i].append(throughput[key][i][0])
drop_a[i].append(throughput[key][i][2])
drop_b[i].append(throughput[key][i][3])
+ avg_rtt[i].append(throughput[key][i][5])
individual_df_data = []
upload_throughput = [float(f"{(sum(i) / 1000000) / len(i): .2f}") for i in upload]
@@ -1108,9 +1207,9 @@ def monitor(self, iteration, individual_df, device_names, incremental_capacity_l
signal_list, channel_list, mode_list, link_speed_list, rx_rate_list = self.get_signal_and_channel_data(self.input_devices_list)
signal_list = [int(i) if i != "" else 0 for i in signal_list]
- # Storing individual device throughput data(download, upload, Rx % drop A, Rx % drop B) to dataframe after test stopped
+ # Storing individual device throughput data(download, upload, Rx % drop , Tx % drop) to dataframe after test stopped
for i in range(len(download_throughput)):
- individual_df_data.extend([download_throughput[i], upload_throughput[i], drop_a_per[i], drop_b_per[i], int(signal_list[i]), link_speed_list[i], rx_rate_list[i]])
+ individual_df_data.extend([download_throughput[i], upload_throughput[i], drop_a_per[i], drop_b_per[i], avg_rtt[i][0], int(signal_list[i]), link_speed_list[i], rx_rate_list[i]])
timestamp = datetime.now().strftime("%d/%m %I:%M:%S %p")
# If it's the last iteration, append final metrics and 'Stopped' status
@@ -1150,7 +1249,7 @@ def monitor(self, iteration, individual_df, device_names, incremental_capacity_l
individual_df_for_webui.to_csv('{}/overall_throughput.csv'.format(runtime_dir), index=False)
individual_df.to_csv('overall_throughput.csv', index=False)
else:
- individual_df_for_webui.to_csv('{}/throughput_data.csv'.format(runtime_dir), index=False)
+ individual_df.to_csv('{}/throughput_data.csv'.format(runtime_dir), index=False)
individual_df.to_csv('throughput_data.csv', index=False)
else:
individual_df.to_csv('throughput_data.csv', index=False)
@@ -1401,6 +1500,12 @@ def build_line_graph(self, data_set, xaxis_name, yaxis_name, xaxis_categories, l
logger.debug("{}.csv".format(graph_image_name))
return f"{graph_image_name}.png"
+
+ def convert_to_table(self,configured_devices_check):
+ return {
+ "Username": list(configured_devices_check.keys()),
+ "Configuration Status": ["Pass" if status else "Fail" for status in configured_devices_check.values()]
+ }
def generate_report(self, iterations_before_test_stopped_by_user, incremental_capacity_list, data=None, data1=None, report_path='', result_dir_name='Throughput_Test_report',
selected_real_clients_names=None):
@@ -1433,8 +1538,8 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
# objective title and description
report.set_obj_html(_obj_title="Objective",
- _obj="The Candela Client Capacity test is designed to measure an Access Point’s client capacity and performance when handling different amounts of Real clients like android, Linux,"
- " windows, and IOS. The test allows the user to increase the number of clients in user-defined steps for each test iteration and measure the per client and the overall throughput for"
+ _obj="The Candela Client Capacity test is designed to measure an Access Point’s client capacity and performance when handling different amounts of Real clients like Android, Linux,"
+ " Windows, MacOS and IOS. The test allows the user to increase the number of clients in user-defined steps for each test iteration and measure the per client and the overall throughput for"
" this test, we aim to assess the capacity of network to handle high volumes of traffic while"
" each trial. Along with throughput other measurements made are client connection times, Station 4-Way Handshake time, DHCP times, and more. The expected behavior is for the"
" AP to be able to handle several stations (within the limitations of the AP specs) and make sure all Clients get a fair amount of airtime both upstream and downstream. An AP that"
@@ -1558,6 +1663,7 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
upload_list, download_list = [], []
rssi_data = []
data_iter = data[data['Iteration'] == i + 1]
+ avg_rtt_data = []
# for sig in self.signal_list[0:int(incremental_capacity_list[i])]:
# signal_data.append(int(sig)*(-1))
@@ -1583,13 +1689,14 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
# Append average upload and download drop from filtered dataframe
- upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop B" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
- download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop A" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
# Calculate and append upload and download throughput to lists
- upload_list.append(str(round((int(self.cx_profile.side_a_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)) + "Mbps")
- download_list.append(str(round((int(self.cx_profile.side_b_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)) + "Mbps")
+ upload_list.append(str(round((int(self.cx_profile.side_a_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+ download_list.append(str(round((int(self.cx_profile.side_b_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
if self.cx_profile.side_a_min_pdu == -1:
packet_size_in_table.append('AUTO')
else:
@@ -1608,11 +1715,11 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
# Calculate and append upload and download throughput to lists
- upload_list.append(str(round((int(self.cx_profile.side_a_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)) + "Mbps")
- download_list.append(str(round((int(self.cx_profile.side_b_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)) + "Mbps")
+ upload_list.append(str(round((int(self.cx_profile.side_a_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+ download_list.append(str(round((int(self.cx_profile.side_b_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
# Append average download drop data from filtered dataframe
-
- download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop A" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
if self.cx_profile.side_a_min_pdu == -1:
packet_size_in_table.append('AUTO')
else:
@@ -1622,8 +1729,8 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
elif self.direction == 'Upload':
# Calculate and append upload and download throughput to lists
- upload_list.append(str(round((int(self.cx_profile.side_a_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)) + "Mbps")
- download_list.append(str(round((int(self.cx_profile.side_b_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)) + "Mbps")
+ upload_list.append(str(round((int(self.cx_profile.side_a_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
+ download_list.append(str(round((int(self.cx_profile.side_b_min_bps) / 1000000) / int(incremental_capacity_list[i]), 2)))
rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
@@ -1633,8 +1740,8 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
# Append 0 for download data
download_data.append(0)
# Append average upload drop data from filtered dataframe
- upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop B" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
-
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
if self.cx_profile.side_a_min_pdu == -1:
packet_size_in_table.append('AUTO')
else:
@@ -1648,15 +1755,15 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
# Append average download and upload drop data from filtered dataframe
- upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop B" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
- download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop A" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
# upload_data.append(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[-1])
rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
-
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
# Calculate and append upload and download throughput to lists
- upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps")
- download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps")
+ upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)))
if self.cx_profile.side_a_min_pdu == -1:
packet_size_in_table.append('AUTO')
@@ -1671,12 +1778,12 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
upload_data.append(0)
rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
-
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
# Calculate and append upload and download throughput to lists
- upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps")
- download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps")
+ upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)))
# Append average download drop data from filtered dataframe
- download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop A" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
if self.cx_profile.side_a_min_pdu == -1:
packet_size_in_table.append('AUTO')
else:
@@ -1685,15 +1792,15 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
elif self.direction == 'Upload':
# Calculate and append upload and download throughput to lists
- upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps")
- download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps")
+ upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)))
rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
-
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
# Append average upload data from filtered dataframe
upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
# Append average upload drop data from filtered dataframe
- upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop B" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
# Append 0 for download data
download_data.append(0)
@@ -1803,6 +1910,9 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
report.set_graph_image(graph_png)
report.move_graph_image()
report.build_graph()
+ if(self.dowebgui and self.get_live_view):
+ self.add_live_view_images_to_report(report)
+
if self.group_name:
report.set_obj_html(
_obj_title="Detailed Result Table For Groups ",
@@ -1829,6 +1939,7 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
self.mode_list[0:int(incremental_capacity_list[i])],
direction_in_table[0:int(incremental_capacity_list[i])],
download_list[0:int(incremental_capacity_list[i])],
+ [str(n) for n in avg_rtt_data[0:int(incremental_capacity_list[i])]],
[str(n) + " Mbps" for n in download_data[0:int(incremental_capacity_list[i])]],
upload_list[0:int(incremental_capacity_list[i])],
[str(n) + " Mbps" for n in upload_data[0:int(incremental_capacity_list[i])]],
@@ -1850,6 +1961,7 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
self.mode_list[0:int(incremental_capacity_list[i])],
direction_in_table[0:int(incremental_capacity_list[i])],
download_list[0:int(incremental_capacity_list[i])],
+ [str(n) for n in avg_rtt_data[0:int(incremental_capacity_list[i])]],
[str(n) + " Mbps" for n in download_data[0:int(incremental_capacity_list[i])]],
upload_list[0:int(incremental_capacity_list[i])],
[str(n) + " Mbps" for n in upload_data[0:int(incremental_capacity_list[i])]],
@@ -1875,26 +1987,27 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
" Channel ": self.channel_list[0:int(incremental_capacity_list[i])],
" Mode": self.mode_list[0:int(incremental_capacity_list[i])],
# " Direction":direction_in_table[0:int(incremental_capacity_list[i])],
- " Offered download rate ": download_list[0:int(incremental_capacity_list[i])],
- " Observed Average download rate ": [str(n) + " Mbps" for n in download_data[0:int(incremental_capacity_list[i])]],
- " Offered upload rate ": upload_list[0:int(incremental_capacity_list[i])],
- " Observed Average upload rate ": [str(n) + " Mbps" for n in upload_data[0:int(incremental_capacity_list[i])]],
- " RSSI ": ['' if n == 0 else '-' + str(n) + " dbm" for n in rssi_data[0:int(incremental_capacity_list[i])]],
+ " Offered download rate (Mbps) ": download_list[0:int(incremental_capacity_list[i])],
+ " Observed Average download rate (Mbps) ": [str(n) for n in download_data[0:int(incremental_capacity_list[i])]],
+ " Offered upload rate (Mbps) ": upload_list[0:int(incremental_capacity_list[i])],
+ " Observed Average upload rate (Mbps) ": [str(n) for n in upload_data[0:int(incremental_capacity_list[i])]],
+ " RSSI (dBm) ": ['' if n == 0 else '-' + str(n) for n in rssi_data[0:int(incremental_capacity_list[i])]],
# " Link Speed ":self.link_speed_list[0:int(incremental_capacity_list[i])],
+ " Average RTT (ms)" : avg_rtt_data[0:int(incremental_capacity_list[i])],
" Packet Size(Bytes) ": [str(n) for n in packet_size_in_table[0:int(incremental_capacity_list[i])]],
}
if self.direction == "Bi-direction":
- bk_dataframe[" Average Rx Drop B% "] = upload_drop
- bk_dataframe[" Average Rx Drop A% "] = download_drop
+ bk_dataframe[" Average Tx Drop % "] = upload_drop
+ bk_dataframe[" Average Rx Drop % "] = download_drop
elif self.direction == 'Download':
- bk_dataframe[" Average Rx Drop A% "] = download_drop
+ bk_dataframe[" Average Rx Drop % "] = download_drop
# adding rx drop while uploading as 0
- bk_dataframe[" Average Rx Drop B% "] = [0.0] * len(download_drop)
+ bk_dataframe[" Average Tx Drop % "] = [0.0] * len(download_drop)
else:
- bk_dataframe[" Average Rx Drop B% "] = upload_drop
+ bk_dataframe[" Average Tx Drop % "] = upload_drop
# adding rx drop while downloading as 0
- bk_dataframe[" Average Rx Drop A% "] = [0.0] * len(upload_drop)
+ bk_dataframe[" Average Rx Drop % "] = [0.0] * len(upload_drop)
if self.expected_passfail_value or self.device_csv_name:
bk_dataframe[" Expected " + self.direction + " rate "] = [str(n) + " Mbps" for n in test_input_list]
bk_dataframe[" Status "] = pass_fail_list
@@ -1922,7 +2035,7 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
# objective title and description
report.set_obj_html(_obj_title="Objective",
_obj="The Candela Interoperability test is designed to measure an Access Point’s client performance when handling different amounts of Real clients"
- " like android, Linux, windows, and IOS. The test allows the user to increase the number of clients in user-defined steps for each test iteration and"
+ " like Android, Linux, Windows, MacOS and IOS. The test allows the user to increase the number of clients in user-defined steps for each test iteration and"
" measure the per-client throughput for each trial. Along with throughput other measurements made are client connection times, Station 4-Way"
" Handshake time, DHCP times, and more. The expected behavior is for the AP to be able to handle several stations (within the limitations of the"
" AP specs) and make sure all Clients get a fair amount of airtime both upstream and downstream. An AP that scales well will not show a"
@@ -1987,6 +2100,17 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
}
report.test_setup_table(test_setup_data=test_setup_info, value="Test Configuration")
+ if(not self.default_config):
+
+ report.set_obj_html(_obj_title="Configuration Status of Devices",
+ _obj="The table below shows the configuration status of each device (except iOS) with respect to the SSID connection.")
+ report.build_objective()
+
+ configured_dataframe = self.convert_to_table(self.configured_devices_check)
+ dataframe1 = pd.DataFrame(configured_dataframe)
+ report.set_table_dataframe(dataframe1)
+ report.build_table()
+
# Loop through iterations and build graphs, tables for each device
for i in range(len(iterations_before_test_stopped_by_user)):
rssi_signal_data = []
@@ -2002,10 +2126,14 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
upload_list, download_list = [], []
rssi_data = []
data_iter = data[data['Iteration'] == i + 1]
+ avg_rtt_data = []
# Fetch devices_on_running from real_client_list
devices_on_running.append(self.real_client_list[data1[i][-1] - 1].split(" ")[-1])
+ if not self.default_config and devices_on_running[0] in self.configured_devices_check and not self.configured_devices_check[devices_on_running[0]]:
+ continue
+
for k in devices_on_running:
# individual_device_data=[]
@@ -2019,14 +2147,14 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
# Append download and upload data from filtered dataframe
download_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Download" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
- upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop B" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
- download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop A" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
-
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
# Calculate and append upload and download throughput to lists
- upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps")
- download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps")
+ upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)))
direction_in_table.append(self.direction)
elif self.direction == 'Download':
@@ -2038,22 +2166,22 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
upload_data.append(0)
rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
- download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop A" in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
-
+ download_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop " in col][0]].values.tolist()[1:dl_len]) / (dl_len - 1)), 2))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
# Calculate and append upload and download throughput to lists
- upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps")
- download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps")
+ upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)))
direction_in_table.append(self.direction)
elif self.direction == 'Upload':
# Calculate and append upload and download throughput to lists
- upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)) + "Mbps")
- download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)) + "Mbps")
+ upload_list.append(str(round(int(self.cx_profile.side_a_min_bps) / 1000000, 2)))
+ download_list.append(str(round(int(self.cx_profile.side_b_min_bps) / 1000000, 2)))
rssi_data.append(int(round(sum(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()) /
len(filtered_df[[col for col in filtered_df.columns if "RSSI" in col][0]].values.tolist()), 2)) * -1)
- upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Rx % Drop B" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
-
+ upload_drop.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Tx % Drop" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
+ avg_rtt_data.append(filtered_df[[col for col in filtered_df.columns if "Average RTT " in col][0]].values.tolist()[-1])
# Append upload data from filtered dataframe
upload_data.append(round((sum(filtered_df[[col for col in filtered_df.columns if "Upload" in col][0]].values.tolist()[1:ul_len]) / (ul_len - 1)), 2))
@@ -2202,21 +2330,21 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
bk_dataframe[" MAC "] = self.mac_id_list[int(incremental_capacity_list[i]) - 1]
bk_dataframe[" Channel "] = self.channel_list[int(incremental_capacity_list[i]) - 1]
bk_dataframe[" Mode"] = self.mode_list[int(incremental_capacity_list[i]) - 1]
- bk_dataframe[" Offered download rate "] = download_list[-1]
- bk_dataframe[" Observed Average download rate "] = [str(download_data[-1]) + " Mbps"]
- bk_dataframe[" Offered upload rate "] = upload_list[-1]
- bk_dataframe[" Observed Average upload rate "] = [str(upload_data[-1]) + " Mbps"]
- bk_dataframe[" RSSI "] = ['' if rssi_data[-1] == 0 else '-' + str(rssi_data[-1]) + " dbm"]
-
+ bk_dataframe[" Offered download rate (Mbps)"] = download_list[-1]
+ bk_dataframe[" Observed Average download rate (Mbps)"] = [str(download_data[-1])]
+ bk_dataframe[" Offered upload rate (Mbps)"] = upload_list[-1]
+ bk_dataframe[" Observed Average upload rate (Mbps)"] = [str(upload_data[-1])]
+ bk_dataframe[" Average RTT (ms) "] = avg_rtt_data[-1]
+ bk_dataframe[" RSSI (dBm)"] = ['' if rssi_data[-1] == 0 else '-' + str(rssi_data[-1])]
if self.direction == "Bi-direction":
- bk_dataframe[" Average Rx Drop B% "] = upload_drop
- bk_dataframe[" Average Rx Drop A% "] = download_drop
+ bk_dataframe[" Average Tx Drop % "] = upload_drop
+ bk_dataframe[" Average Rx Drop % "] = download_drop
elif self.direction == 'Download':
- bk_dataframe[" Average Rx Drop A% "] = download_drop
- bk_dataframe[" Average Rx Drop B% "] = [0.0] * len(download_drop)
+ bk_dataframe[" Average Rx Drop % "] = download_drop
+ bk_dataframe[" Average Tx Drop % "] = [0.0] * len(download_drop)
else:
- bk_dataframe[" Average Rx Drop B% "] = upload_drop
- bk_dataframe[" Average Rx Drop A% "] = [0.0] * len(upload_drop)
+ bk_dataframe[" Average Tx Drop % "] = upload_drop
+ bk_dataframe[" Average Rx Drop % "] = [0.0] * len(upload_drop)
# When pass fail criteria is specified
if self.expected_passfail_value or self.device_csv_name:
bk_dataframe[" Expected " + self.direction + " rate "] = test_input_list
@@ -2228,13 +2356,17 @@ def generate_report(self, iterations_before_test_stopped_by_user, incremental_ca
report.set_custom_html('
')
report.build_custom()
+ if(self.dowebgui and self.get_live_view and self.do_interopability):
+ self.add_live_view_images_to_report(report)
+
+
# report.build_custom()
report.build_footer()
report.write_html()
report.write_pdf(_orientation="Landscape")
# Creates a separate DataFrame for each group of devices.
- def generate_dataframe(self, groupdevlist, typeofdevice, devusername, devssid, devmac, devchannel, devmode, devdirection, devofdownload, devobsdownload,
+ def generate_dataframe(self, groupdevlist, typeofdevice, devusername, devssid, devmac, devchannel, devmode, devdirection, devofdownload, devrtt, devobsdownload,
devoffupload, devobsupload, devrssi, devExpected, devlinkspeed, devpacketsize, devstatus, upload_drop, download_drop):
"""
Creates a separate DataFrame for each group of devices.
@@ -2262,6 +2394,7 @@ def generate_dataframe(self, groupdevlist, typeofdevice, devusername, devssid, d
statuslist = []
avg_updrop = []
avg_dndrop = []
+ avgrtt = []
interop_tab_data = self.json_get('/adb/')["devices"]
for i in range(len(typeofdevice)):
for j in groupdevlist:
@@ -2279,6 +2412,7 @@ def generate_dataframe(self, groupdevlist, typeofdevice, devusername, devssid, d
obsupload.append(devobsupload[i])
rssi.append(devrssi[i])
linkspeed.append(devlinkspeed[i])
+ avgrtt.append(devrtt[i])
if len(upload_drop) != 0:
avg_updrop.append(upload_drop[i])
if len(download_drop) != 0:
@@ -2305,7 +2439,7 @@ def generate_dataframe(self, groupdevlist, typeofdevice, devusername, devssid, d
offupload.append(devoffupload[i])
obsupload.append(devobsupload[i])
rssi.append(devrssi[i])
-
+ avgrtt.append(devrtt[i])
linkspeed.append(devlinkspeed[i])
if len(upload_drop) != 0:
avg_updrop.append(upload_drop[i])
@@ -2333,6 +2467,7 @@ def generate_dataframe(self, groupdevlist, typeofdevice, devusername, devssid, d
" RSSI ": rssi,
" Link Speed ": linkspeed,
" Packet Size(Bytes) ": packetsize,
+ " RTT ": avgrtt
}
if self.direction == "Bi-direction":
@@ -2368,6 +2503,8 @@ def generate_dataframe(self, groupdevlist, typeofdevice, devusername, devssid, d
" Observed upload rate ": obsupload,
" RSSI ": rssi,
" Link Speed ": linkspeed,
+ " RTT ": avgrtt
+
}
if self.direction == "Bi-direction":
dataframe[" Average Rx Drop B% "] = avg_updrop
@@ -2522,6 +2659,42 @@ def change_port_to_ip(self, upstream_port):
return upstream_port
+ def add_live_view_images_to_report(self,report):
+ """
+ This function looks for throughput and RSSI images for each floor
+ in the 'live_view_images' folder within `self.result_dir`.
+ It waits up to **60 seconds** for each image. If an image is found,
+ it's added to the `report` on a new page; otherwise, it's skipped.
+
+ **Args:**
+ self: An object containing `total_floors`, `result_dir`, and `test_name`.
+ report: An object with `set_custom_html()` and `build_custom()` methods.
+ """
+ for floor in range(0,int(self.total_floors)):
+ throughput_image_path = os.path.join(self.result_dir, "live_view_images", f"{self.test_name}_throughput_{floor+1}.png")
+ rssi_image_path = os.path.join(self.result_dir, "live_view_images", f"{self.test_name}_rssi_{floor+1}.png")
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(throughput_image_path) and os.path.exists(rssi_image_path)):
+ if time.time() - start_time > timeout:
+ print("Timeout: Images not found within 60 seconds.")
+ break
+ time.sleep(1)
+ while not os.path.exists(throughput_image_path) and not os.path.exists(rssi_image_path):
+ if os.path.exists(throughput_image_path) and os.path.exists(rssi_image_path):
+ break
+ if os.path.exists(throughput_image_path):
+ report.set_custom_html('')
+ report.build_custom()
+ report.set_custom_html(f'
')
+ report.build_custom()
+
+ if os.path.exists(rssi_image_path):
+ report.set_custom_html('')
+ report.build_custom()
+ report.set_custom_html(f'
')
+ report.build_custom()
# To validate the input args
def validate_args(args):
@@ -2642,6 +2815,10 @@ def main():
EXAMPLE-4:
Command Line Interface to run the test with postcleanup
python3 lf_interop_throughput.py --mgr 192.168.214.219 --mgr_port 8080 --upstream_port eth1 --test_duration 1m --download 1000000 --traffic_type lf_udp --do_interopability --postcleanup
+
+EXAMPLE-5:
+Command Line Interface to run the test without configuration
+python3 lf_interop_throughput.py --mgr 192.168.204.74 --mgr_port 8080 --upstream_port eth0 --test_duration 30s --traffic_type lf_udp --ssid NETGEAR_2G_wpa2 --passwd Password@123 --security wpa2 --do_interopability --device_list 1.15,1.400 --download 10000000 --default_config
SCRIPT_CLASSIFICATION : Test
SCRIPT_CATEGORIES: Performance, Functional, Report Generation
@@ -2684,7 +2861,7 @@ def main():
required.add_argument('--upload', help='--upload traffic load per connection (upload rate)', default='2560')
required.add_argument('--download', help='--download traffic load per connection (download rate)', default='2560')
required.add_argument('--test_duration', help='--test_duration sets the duration of the test', default="")
- required.add_argument('--report_timer', help='--duration to collect data', default="5s")
+ required.add_argument('--report_timer', help='--duration to collect data', default="1s")
required.add_argument('--ap_name', help="AP Model Name", default="Test-AP")
required.add_argument('--dowebgui', help="If true will execute script for webgui", action='store_true')
required.add_argument('--tos', default="Best_Efforts")
@@ -2703,6 +2880,8 @@ def main():
optional.add_argument('--security', help='WiFi Security protocol: < open | wep | wpa | wpa2 | wpa3 >', default="open")
optional.add_argument('--test_name', help='Specify test name to store the runtime csv results', default=None)
optional.add_argument('--result_dir', help='Specify the result dir to store the runtime logs', default='')
+ optional.add_argument('--get_live_view', help="If true will heatmap will be generated from testhouse automation WebGui ", action='store_true')
+ optional.add_argument('--total_floors', help="Total floors from testhouse automation WebGui ", default="0")
optional.add_argument("--expected_passfail_value", help="Specify the expected number of urls", default=None)
optional.add_argument("--device_csv_name", type=str, help='Specify the csv name to store expected url values', default=None)
optional.add_argument("--eap_method", type=str, default='DEFAULT', help="Specify the EAP method for authentication.")
@@ -2727,6 +2906,8 @@ def main():
optional.add_argument('--profile_name', type=str, help='Specify the profile name to apply configurations to the devices.')
optional.add_argument("--wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
optional.add_argument("--config", action="store_true", help="Specify for configuring the devices")
+ optional.add_argument("--default_config", action="store_true", help="To stop configuring the devices in interoperability")
+ optional.add_argument("--thpt_mbps", action="store_true", help="Interpret rated download and upload values as Mbps instead of bytes")
parser.add_argument('--help_summary', help='Show summary of what this script does', action="store_true")
args = parser.parse_args()
@@ -2743,6 +2924,14 @@ def main():
logger_config = lf_logger_config.lf_logger_config()
+ if(args.thpt_mbps):
+ if args.download != '2560' and args.download != '0' and args.upload != '0' and args.upload != '2560':
+ args.download = str(int(args.download) * 1000000)
+ args.upload = str(int(args.upload) * 1000000)
+ elif args.upload != '2560' and args.upload != '0':
+ args.upload = str(int(args.upload) * 1000000)
+ else:
+ args.download = str(int(args.download) * 1000000)
loads = {}
iterations_before_test_stopped_by_user = []
gave_incremental = False
@@ -2836,6 +3025,8 @@ def main():
do_interopability=args.do_interopability,
incremental=args.incremental,
precleanup=args.precleanup,
+ get_live_view= args.get_live_view,
+ total_floors = args.total_floors,
csv_direction=csv_direction,
expected_passfail_value=args.expected_passfail_value,
device_csv_name=args.device_csv_name,
@@ -2860,7 +3051,8 @@ def main():
pk_passwd=args.pk_passwd,
pac_file=args.pac_file,
wait_time=args.wait_time,
- config=args.config
+ config=args.config,
+ default_config = args.default_config
)
if gave_incremental:
@@ -2892,10 +3084,10 @@ def main():
for i in range(len(clients_to_run)):
# Extend individual_dataframe_column with dynamically generated column names
- individual_dataframe_column.extend([f'Download{clients_to_run[i]}', f'Upload{clients_to_run[i]}', f'Rx % Drop A {clients_to_run[i]}',
- f'Rx % Drop B{clients_to_run[i]}', f'RSSI {clients_to_run[i]} ', f'Tx-Rate {clients_to_run[i]} ', f'Rx-Rate {clients_to_run[i]} '])
+ individual_dataframe_column.extend([f'Download{clients_to_run[i]}', f'Upload{clients_to_run[i]}', f'Rx % Drop {clients_to_run[i]}',
+ f'Tx % Drop{clients_to_run[i]}', f'Average RTT {clients_to_run[i]} ', f'RSSI {clients_to_run[i]} ', f'Tx-Rate {clients_to_run[i]} ', f'Rx-Rate {clients_to_run[i]} '])
- individual_dataframe_column.extend(['Overall Download', 'Overall Upload', 'Overall Rx % Drop A', 'Overall Rx % Drop B', 'Iteration',
+ individual_dataframe_column.extend(['Overall Download', 'Overall Upload', 'Overall Rx % Drop ', 'Overall Tx % Drop', 'Iteration',
'TIMESTAMP', 'Start_time', 'End_time', 'Remaining_Time', 'Incremental_list', 'status'])
individual_df = pd.DataFrame(columns=individual_dataframe_column)
@@ -2903,6 +3095,11 @@ def main():
overall_end_time = overall_start_time + timedelta(seconds=int(args.test_duration) * len(incremental_capacity_list))
for i in range(len(to_run_cxs)):
+ is_device_configured = True
+ if args.do_interopability:
+ # To get resource of device under test in interopability
+ device_to_run_resource = throughput.extract_digits_until_alpha(to_run_cxs[i][0])
+
# Check the load type specified by the user
if args.load_type == "wc_intended_load":
# Perform intended load for the current iteration
@@ -2918,14 +3115,23 @@ def main():
if (args.do_interopability and i != 0):
throughput.stop_specific(to_run_cxs[i - 1])
time.sleep(5)
- throughput.start_specific(to_run_cxs[i])
+ if not args.default_config:
+ if (args.do_interopability and i == 0):
+ throughput.disconnect_all_devices()
+ if args.do_interopability and "iOS" not in to_run_cxs[i][0]:
+ logger.info("Configuring device of resource{}".format(to_run_cxs[i][0]))
+ is_device_configured = throughput.configure_specific([device_to_run_resource])
+ if is_device_configured:
+ throughput.start_specific(to_run_cxs[i])
# Determine device names based on the current iteration
device_names = created_cx_lists_keys[:to_run_cxs_len[i][-1]]
# Monitor throughput and capture all dataframes and test stop status
- all_dataframes, test_stopped_by_user = throughput.monitor(i, individual_df, device_names, incremental_capacity_list, overall_start_time, overall_end_time)
-
+ all_dataframes, test_stopped_by_user = throughput.monitor(i, individual_df, device_names, incremental_capacity_list, overall_start_time, overall_end_time, is_device_configured)
+ if args.do_interopability and "iOS" not in to_run_cxs[i][0] and not args.default_config:
+ # logger.info("Disconnecting device of resource{}".format(to_run_cxs[i][0]))
+ throughput.disconnect_all_devices([device_to_run_resource])
# Check if the test was stopped by the user
if test_stopped_by_user == False:
@@ -2949,4 +3155,4 @@ def main():
if __name__ == "__main__":
- main()
+ main()
\ No newline at end of file
diff --git a/py-scripts/lf_interop_video_streaming.py b/py-scripts/lf_interop_video_streaming.py
index f54e71d7e..591911517 100755
--- a/py-scripts/lf_interop_video_streaming.py
+++ b/py-scripts/lf_interop_video_streaming.py
@@ -125,7 +125,7 @@
class VideoStreamingTest(Realm):
def __init__(self, host, ssid, passwd, encryp, media_source, media_quality, suporrted_release=None, max_speed=None, url=None,
urls_per_tenm=None, duration=None, resource_ids=None, dowebgui=False, result_dir="", test_name=None, incremental=None, postcleanup=False, precleanup=False,
- pass_fail_val=None, csv_name=None, groups=None, profiles=None, config=None, file_name=None):
+ pass_fail_val=None, csv_name=None, groups=None, profiles=None, config=None, file_name=None, floors=None, get_live_view=None ):
super().__init__(lfclient_host=host, lfclient_port=8080)
self.adb_device_list = None
self.host = host
@@ -183,6 +183,8 @@ def __init__(self, host, ssid, passwd, encryp, media_source, media_quality, supo
self.selected_profiles = profiles
self.config = config
self.file_name = file_name
+ self.floors = floors
+ self.get_live_view = get_live_view
@property
def run(self):
@@ -462,10 +464,10 @@ def create_real(self, ports=None, sleep_time=.5, debug_=False, suppress_related_
if (url is None) or (url == ""):
raise ValueError("HTTPProfile::create: url unset")
if ftp:
- cx_name = name + "_ftp"
+ cx_name = 'vs_' + name + "_ftp"
else:
- cx_name = name + "_http"
+ cx_name = 'vs_' + name + "_http"
if interop is None:
if upload_name is None:
@@ -1349,7 +1351,7 @@ def generate_report(self, date, iterations_before_test_stopped_by_user, test_set
_xaxis_categories=self.trim_data(len(realtime_dataset['timestamp'][realtime_dataset['iteration'] == iter + 1].values.tolist()),
realtime_dataset['timestamp'][realtime_dataset['iteration'] == iter + 1].values.tolist()),
_label=['Rate'],
- _graph_image_name=f"line_graph{iter}"
+ _graph_image_name=f"vs_line_graph{iter}"
)
graph_png = graph.build_line_graph()
logger.info("graph name {}".format(graph_png))
@@ -1436,6 +1438,34 @@ def generate_report(self, date, iterations_before_test_stopped_by_user, test_set
report.move_graph_image()
report.build_graph()
+ if self.dowebgui and self.get_live_view:
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+
+ report.set_custom_html("No of Buffers and Wait Time %
")
+ report.build_custom()
+
+ for floor in range(int(self.floors)):
+ # Construct expected image paths
+ vs_buffer_image = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_vs_buffer_{floor+1}.png")
+ vs_wait_time_image = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_vs_wait_time_{floor+1}.png")
+
+
+ # Wait for all required images to be generated (up to timeout)
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(vs_buffer_image) and os.path.exists(vs_wait_time_image)):
+ if time.time() - start_time > timeout:
+ print(f"Timeout: Heatmap images for floor {floor + 1} not found within {timeout} seconds.")
+ break
+ time.sleep(1)
+
+ # Generate report sections for each image if it exists
+ for image_path in [vs_buffer_image, vs_wait_time_image,]:
+ if os.path.exists(image_path):
+ report.set_custom_html(f'
')
+ report.build_custom()
+
# Table 1
report.set_obj_html("Overall - Detailed Result Table", "The below tables provides detailed information for the Video Streaming test.")
report.build_objective()
@@ -1762,6 +1792,14 @@ def main():
parser.add_argument("--wait_time", type=int, help="Specify the time for configuration", default=60)
parser.add_argument('--config', action='store_true', help='specify this flag whether to config devices or not')
parser.add_argument("--device_csv_name", type=str, help="Specify the device csv name for pass/fail", default=None)
+ parser.add_argument('--get_live_view',
+ action="store_true",
+ help='specify this flag to get the liveview of the devices')
+
+ parser.add_argument('--floors',
+ type=int,
+ default=0,
+ help='specify the Number of floors there in the house')
args = parser.parse_args()
if args.help_summary:
@@ -1826,7 +1864,9 @@ def main():
groups=args.group_name,
profiles=args.profile_name,
config=args.config,
- file_name=args.file_name
+ file_name=args.file_name,
+ floors=args.floors,
+ get_live_view=args.get_live_view
)
args.upstream_port = obj.change_port_to_ip(args.upstream_port)
obj.validate_args()
diff --git a/py-scripts/lf_mixed_traffic.py b/py-scripts/lf_mixed_traffic.py
index 17437a332..103c237f2 100755
--- a/py-scripts/lf_mixed_traffic.py
+++ b/py-scripts/lf_mixed_traffic.py
@@ -209,6 +209,8 @@ def __init__(self,
result_dir=None,
test_name=None,
device_list=None,
+ get_live_view=False,
+ total_floors=0,
debug=False):
super().__init__(lfclient_host=host,
lfclient_port=port)
@@ -343,6 +345,8 @@ def __init__(self,
self.http_dev = []
self.http_mac = []
self.rx_rate = []
+ self.get_live_view = get_live_view
+ self.total_floors = total_floors
if self.dowebgui:
self.stopped = False
self.ping_execution = False
@@ -1368,7 +1372,7 @@ def http_test(self, ssid, password, security, http_file_size, target_per_ten, ht
uc_avg_val = self.http_obj.data['uc_avg']
url_times = self.http_obj.data['url_data']
rx_bytes_val = self.http_obj.data['bytes_rd']
- rx_rate_val = self.http_obj.data['rx_rate']
+ rx_rate_val = self.http_obj.data['rx rate (1m)']
else:
uc_avg_val = self.http_obj.my_monitor('uc-avg')
url_times = self.http_obj.my_monitor('total-urls')
@@ -1868,6 +1872,39 @@ def generate_all_report(self):
self.lf_report_mt.set_csv_filename(graph_png)
self.lf_report_mt.move_csv_file()
self.lf_report_mt.build_graph()
+ if self.dowebgui and self.get_live_view:
+ # test_name = os.path.basename(self.result_dir)
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+
+ for floor in range(int(self.total_floors)):
+ # Construct expected image paths
+ packet_sent_image = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_ping_packet_sent_{floor+1}.png")
+ packet_recv_image = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_ping_packet_recv_{floor+1}.png")
+ packet_loss_image = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_ping_packet_loss_{floor+1}.png")
+
+ # Wait for all required images to be generated (up to timeout)
+ timeout = 120 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(packet_sent_image) and os.path.exists(packet_recv_image) and os.path.exists(packet_loss_image)):
+ if time.time() - start_time > timeout:
+ print(f"Timeout: Heatmap images for floor {floor + 1} not found within {timeout} seconds.")
+ break
+ time.sleep(1)
+
+ self.lf_report_mt.set_custom_html("Ping Packet Sent vs Recevied vs Lost:
")
+ self.lf_report_mt.build_custom()
+
+ # Generate report sections for each image if it exists
+ for image_path in [packet_sent_image, packet_recv_image, packet_loss_image]:
+ if os.path.exists(image_path):
+ # report.set_custom_html('''
+ #
+ #

+ #
+ # '''.format(image_path))
+ self.lf_report_mt.set_custom_html(f'
')
+ self.lf_report_mt.build_custom()
dataframe1 = pd.DataFrame({
'Wireless Client': self.ping_test_obj.device_names,
'MAC': self.ping_test_obj.device_mac,
@@ -1980,7 +2017,7 @@ def generate_all_report(self):
self.lf_report_mt.set_csv_filename(graph_png)
self.lf_report_mt.move_csv_file()
self.lf_report_mt.build_graph()
- qos_obj.generate_individual_graph(self.res, self.lf_report_mt, qos_obj.connections_download_avg, qos_obj.connections_upload_avg, qos_obj.avg_drop_a, qos_obj.avg_drop_b)
+ qos_obj.generate_individual_graph(self.res, self.lf_report_mt, qos_obj.connections_download_avg, qos_obj.connections_upload_avg, qos_obj.avg_drop_a, qos_obj.avg_drop_b,self.total_floors,multicast_exists=True if "5" in self.tests and self.get_live_view else False)
if "3" in self.tests and self.ftp_test_status:
# 3.FTP test reporting in mixed traffic
self.lf_report_mt.set_obj_html(_obj_title="3. File Transfer Protocol (FTP) Test", _obj="")
@@ -2060,6 +2097,31 @@ def generate_all_report(self):
self.lf_report_mt.set_csv_filename(ftp_graph2)
self.lf_report_mt.move_csv_file()
self.lf_report_mt.build_graph()
+ if(self.dowebgui and self.get_live_view):
+ for floor in range(0,int(self.total_floors)):
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ throughput_image_path = os.path.join(script_dir, "heatmap_images", f"ftp_{self.test_name}_{floor+1}.png")
+ # rssi_image_path = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_rssi_{floor+1}.png")
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(throughput_image_path)):
+ if time.time() - start_time > timeout:
+ print("Timeout: Images not found within 60 seconds.")
+ break
+ time.sleep(1)
+ while not os.path.exists(throughput_image_path):
+ if os.path.exists(throughput_image_path):
+ break
+ # time.sleep(10)
+ if os.path.exists(throughput_image_path):
+ self.lf_report_mt.set_custom_html('')
+ self.lf_report_mt.build_custom()
+ #self.lf_report_mt.set_custom_html("Average Throughput Heatmap:
")
+ #self.lf_report_mt.build_custom()
+ self.lf_report_mt.set_custom_html(f'
')
+ self.lf_report_mt.build_custom()
+ # os.remove(throughput_image_path)
self.lf_report_mt.set_table_title("Overall Results")
self.lf_report_mt.build_table_title()
dataframe = {
@@ -2110,6 +2172,33 @@ def generate_all_report(self):
self.lf_report_mt.move_csv_file()
self.lf_report_mt.move_graph_image()
self.lf_report_mt.build_graph()
+ if(self.dowebgui and self.get_live_view):
+ print('total floors',self.total_floors)
+ for floor in range(0,int(self.total_floors)):
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ throughput_image_path = os.path.join(script_dir, "heatmap_images", f"http_{self.test_name}_{floor+1}.png")
+ print('image_path',f"{self.test_name}_{floor+1}.png")
+ # rssi_image_path = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_rssi_{floor+1}.png")
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(throughput_image_path)):
+ if time.time() - start_time > timeout:
+ print("Timeout: Images not found within 60 seconds.")
+ break
+ time.sleep(1)
+ while not os.path.exists(throughput_image_path):
+ if os.path.exists(throughput_image_path):
+ break
+ # time.sleep(10)
+ if os.path.exists(throughput_image_path):
+ self.lf_report_mt.set_custom_html('')
+ self.lf_report_mt.build_custom()
+ # self.lf_report_mt.set_custom_html("Average Throughput Heatmap:
")
+ # self.lf_report_mt.build_custom()
+ self.lf_report_mt.set_custom_html(f'
')
+ self.lf_report_mt.build_custom()
+ # os.remove(throughput_image_path)
self.lf_report_mt.set_table_title("Overall Results")
self.lf_report_mt.build_table_title()
dataframe = {
@@ -2222,6 +2311,40 @@ def generate_all_report(self):
self.lf_report_mt.set_graph_image(graph_png)
self.lf_report_mt.move_graph_image()
self.lf_report_mt.build_graph()
+ if(self.dowebgui and self.get_live_view):
+ for floor in range(0,int(self.total_floors)):
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ throughput_image_path = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_throughput_{floor+1}.png")
+ rssi_image_path = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_rssi_{floor+1}.png")
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(throughput_image_path) and os.path.exists(rssi_image_path)):
+ if time.time() - start_time > timeout:
+ print("Timeout: Images not found within 60 seconds.")
+ break
+ time.sleep(1)
+ while not os.path.exists(throughput_image_path) and not os.path.exists(rssi_image_path):
+ if os.path.exists(throughput_image_path) and os.path.exists(rssi_image_path):
+ break
+ # time.sleep(10)
+ if os.path.exists(throughput_image_path):
+ self.lf_report_mt.set_custom_html('')
+ self.lf_report_mt.build_custom()
+ # self.lf_report_mt.set_custom_html("Average Throughput Heatmap:
")
+ # self.lf_report_mt.build_custom()
+ self.lf_report_mt.set_custom_html(f'
')
+ self.lf_report_mt.build_custom()
+ # os.remove(throughput_image_path)
+
+ if os.path.exists(rssi_image_path):
+ self.lf_report_mt.set_custom_html('')
+ self.lf_report_mt.build_custom()
+ # self.lf_report_mt.set_custom_html("Average RSSI Heatmap:
")
+ # self.lf_report_mt.build_custom()
+ self.lf_report_mt.set_custom_html(f'
')
+ self.lf_report_mt.build_custom()
+ # os.remove(rssi_image_path)
tos_dataframe_A = {
" Client Name ": client_names,
" Endp Name": endp_names,
@@ -2245,7 +2368,8 @@ def generate_all_report(self):
self.lf_report_mt.build_table()
overall_setup_info = {"contact": "support@candelatech.com"}
self.lf_report_mt.test_setup_table(test_setup_data=overall_setup_info, value="Overall Info")
- self.lf_report_mt.build_custom()
+ if not self.get_live_view:
+ self.lf_report_mt.build_custom()
self.lf_report_mt.build_footer()
self.lf_report_mt.write_html()
self.lf_report_mt.write_pdf_with_timestamp(_page_size='A4', _orientation='Portrait')
@@ -2515,6 +2639,8 @@ def main():
parser.add_argument('--help_summary', help='Show summary of what this script does', default=None,
action="store_true")
+ optional.add_argument('--get_live_view', help="If true will heatmap will be generated from testhouse automation WebGui ", action='store_true')
+ optional.add_argument('--total_floors', help="Total floors from testhouse automation WebGui ", default="0")
args = parser.parse_args()
@@ -2671,6 +2797,8 @@ def main():
device_list=args.device_list,
test_name=args.test_name,
result_dir=args.result_dir,
+ get_live_view= args.get_live_view,
+ total_floors = args.total_floors,
# path=path
)
# pre-cleaning & creating / selecting clients for both real and virtual
@@ -3240,7 +3368,6 @@ def __init__(self, client_dict_A, client_dict_B):
side_a_pdu=args.side_a_min_pdu, side_b_pdu=args.side_b_min_pdu,
all_bands=True)
# generating overall report
- mixed_obj.generate_all_report()
if mixed_obj.dowebgui:
try:
overall_status["status"] = "completed"
@@ -3261,6 +3388,8 @@ def __init__(self, client_dict_A, client_dict_B):
except Exception as e:
logging.info("Error while wrinting status file for webui", e)
+ mixed_obj.generate_all_report()
+ if mixed_obj.dowebgui:
# copying to home directory i.e home/user_name
mixed_obj.copy_reports_to_home_dir()
else:
diff --git a/py-scripts/lf_report.py b/py-scripts/lf_report.py
index 726698e3b..da13fa783 100755
--- a/py-scripts/lf_report.py
+++ b/py-scripts/lf_report.py
@@ -992,6 +992,15 @@ def build_banner_cover(self):
date=self.date,
)
self.html += self.banner_html
+
+ def insert_table_at_marker(self, df, marker_id="for_table"):
+ html_extra = df.to_html(index=False, justify='center')
+ marker = f""
+ if marker not in self.html:
+ raise ValueError(f"Marker div with id '{marker_id}' not found in HTML")
+
+ # Replace marker with table HTML
+ self.html = self.html.replace(marker, html_extra)
# Unit Test
diff --git a/py-scripts/lf_webpage.py b/py-scripts/lf_webpage.py
index 433c08b6b..1d1e18aa4 100755
--- a/py-scripts/lf_webpage.py
+++ b/py-scripts/lf_webpage.py
@@ -90,7 +90,7 @@
import shutil
import json
from lf_graph import lf_bar_graph_horizontal
-
+import traceback
import asyncio
from typing import List, Optional
import csv
@@ -116,10 +116,10 @@
class HttpDownload(Realm):
def __init__(self, lfclient_host, lfclient_port, upstream, num_sta, security, ssid, password, ap_name,
target_per_ten, file_size, bands, start_id=0, twog_radio=None, fiveg_radio=None, sixg_radio=None, _debug_on=False, _exit_on_error=False,
- test_name=None, _exit_on_fail=False, client_type="", port_list=[], devices_list=[], macid_list=[], lf_username="lanforge", lf_password="lanforge", result_dir="", dowebgui=False,
- device_list=[], get_url_from_file=None, file_path=None, device_csv_name='', expected_passfail_value=None, file_name=None, group_name=None, profile_name=None, eap_method=None,
+ test_name=None, _exit_on_fail=False, client_type="", port_list=None, devices_list=None, macid_list=None, lf_username="lanforge", lf_password="lanforge", result_dir="", dowebgui=False,
+ device_list=None, get_url_from_file=None, file_path=None, device_csv_name='', expected_passfail_value=None, file_name=None, group_name=None, profile_name=None, eap_method=None,
eap_identity=None, ieee80211=None, ieee80211u=None, ieee80211w=None, enable_pkc=None, bss_transition=None, power_save=None, disable_ofdma=None, roam_ft_ds=None, key_management=None,
- pairwise=None, private_key=None, ca_cert=None, client_cert=None, pk_passwd=None, pac_file=None, config=False, wait_time=60):
+ pairwise=None, private_key=None, ca_cert=None, client_cert=None, pk_passwd=None, pac_file=None, config=False, wait_time=60, get_live_view=False, total_floors=0,):
# super().__init__(lfclient_host=lfclient_host,
# lfclient_port=lfclient_port)
self.ssid_list = []
@@ -165,6 +165,8 @@ def __init__(self, lfclient_host, lfclient_port, upstream, num_sta, security, ss
self.created_cx = {}
self.station_list = []
self.radio = []
+ self.failed_cx = []
+ self.tracking_map = {}
self.get_url_from_file = get_url_from_file
self.file_path = file_path
self.file_name = file_name
@@ -195,6 +197,8 @@ def __init__(self, lfclient_host, lfclient_port, upstream, num_sta, security, ss
self.api_url = 'http://{}:{}'.format(self.host, self.port)
self.group_device_map = {}
self.individual_device_csv_names = []
+ self.get_live_view = get_live_view
+ self.total_floors = total_floors
# The 'phantom_check' will be handled within the 'get_real_client_list' function
def get_real_client_list(self):
@@ -630,6 +634,53 @@ def stop(self):
df1 = pd.DataFrame(self.data)
df1.to_csv("http_datavalues.csv", index=False)
+ def get_layer4_data(self):
+ cx_list = list(self.http_profile.created_cx.keys())
+ try:
+ l4_data = self.local_realm.json_get('layer4/{}/list?fields=uc-avg,uc-max,uc-min,total-urls,rx rate (1m),bytes-rd,total-err'.format(','.join(cx_list)))['endpoint']
+ except:
+ logger.error("l4 DATA not found")
+ exit(1)
+ l4_dict = {
+ 'uc_avg_data': [],
+ 'uc_max_data':[],
+ 'uc_min_data':[],
+ 'url_times':[],
+ 'rx_rate':[],
+ 'bytes_rd':[],
+ 'total_err':[]
+ }
+ if type(l4_data) != list:
+ l4_data = [{l4_data['name']:l4_data}]
+ idx = 0
+ for cx in cx_list:
+ cx_found = False
+ for i in l4_data:
+ for cx_name,value in i.items():
+ if cx == cx_name:
+ l4_dict['uc_avg_data'].append(value['uc-avg'])
+ l4_dict['uc_max_data'].append(value['uc-max'])
+ l4_dict['uc_min_data'].append(value['uc-min'])
+ l4_dict['url_times'].append(value['total-urls'])
+ l4_dict['rx_rate'].append(value['rx rate (1m)'])
+ l4_dict['bytes_rd'].append(value['bytes-rd'])
+ l4_dict['total_err'].append(value['total-err'])
+ cx_found = True
+ if not cx_found:
+ print(f'apending default for http {cx}')
+ self.failed_cx.append(cx)
+ l4_dict['uc_avg_data'].append(0 if not self.tracking_map else self.tracking_map['uc_avg_data'][idx])
+ l4_dict['uc_max_data'].append(0 if not self.tracking_map else self.tracking_map['uc_max_data'][idx])
+ l4_dict['uc_min_data'].append(0 if not self.tracking_map else self.tracking_map['uc_min_data'][idx])
+ l4_dict['url_times'].append(0 if not self.tracking_map else self.tracking_map['url_times'][idx])
+ l4_dict['rx_rate'].append(0 if not self.tracking_map else self.tracking_map['rx_rate'][idx])
+ l4_dict['bytes_rd'].append(0 if not self.tracking_map else self.tracking_map['bytes_rd'][idx])
+ l4_dict['total_err'].append(0 if not self.tracking_map else self.tracking_map['total_err'][idx])
+ idx += 1
+ self.tracking_map = l4_dict.copy()
+
+ return l4_dict
+
def monitor_for_runtime_csv(self, duration):
time_now = datetime.now()
@@ -661,12 +712,25 @@ def monitor_for_runtime_csv(self, duration):
# uc_min_data = self.json_get("layer4/list?fields=uc-min")
# total_url_data = self.json_get("layer4/list?fields=total-urls")
# bytes_rd = self.json_get("layer4/list?fields=bytes-rd")
- uc_avg_data = self.my_monitor('uc-avg')
- uc_max_data = self.my_monitor('uc-max')
- uc_min_data = self.my_monitor('uc-min')
- url_times = self.my_monitor('total-urls')
- rx_rate = self.my_monitor('rx rate (1m)')
- bytes_rd = self.my_monitor('bytes-rd')
+ # uc_avg_data = self.my_monitor('uc-avg')
+ # uc_max_data = self.my_monitor('uc-max')
+ # uc_min_data = self.my_monitor('uc-min')
+ # url_times = self.my_monitor('total-urls')
+ # rx_rate = self.my_monitor('rx rate (1m)')
+ # bytes_rd = self.my_monitor('bytes-rd')
+ # total_err = self.my_monitor('total-err')
+ l4_dict = self.get_layer4_data()
+ uc_avg_data = l4_dict['uc_avg_data']
+ uc_max_data = l4_dict['uc_max_data']
+ uc_min_data = l4_dict['uc_min_data']
+ url_times = l4_dict['url_times']
+ rx_rate = l4_dict['rx_rate']
+ bytes_rd = l4_dict['bytes_rd']
+ total_err = l4_dict['total_err']
+ urls_downloaded = []
+ for i in range(len(total_err)):
+ urls_downloaded.append(url_times[i]-total_err[i])
+ url_times = list(urls_downloaded)
self.data["MAC"] = self.macid_list
self.data["SSID"] = self.ssid_list
self.data["Channel"] = self.channel_list
@@ -675,9 +739,16 @@ def monitor_for_runtime_csv(self, duration):
individual_rx_data = []
individual_rx_data.extend([current_time])
for i, port in enumerate(self.port_list):
- row_data = [current_time, bytes_rd[i], url_times[i], rx_rate[i], rx_rate_list[i], tx_rate_list[i], rssi_list[i]]
- individual_device_data[port].loc[len(individual_device_data[port])] = row_data
-
+ # logger.info(f"row data HTTP",row_data)
+
+ try:
+ row_data = [current_time, bytes_rd[i], url_times[i], rx_rate[i], rx_rate_list[i], tx_rate_list[i], rssi_list[i]]
+ individual_device_data[port].loc[len(individual_device_data[port])] = row_data
+ except:
+ logger.info(f'http0 iii {i}')
+ logger.info(f"row data FTP0: {current_time}, {bytes_rd}, {url_times}, {rx_rate}, {rx_rate_list}, {tx_rate_list}, {rssi_list}")
+ traceback.print_exc()
+ exit(1)
if len(max_bytes_rd) == 0:
max_bytes_rd = list(bytes_rd)
for i in range(len(max_bytes_rd)):
@@ -706,6 +777,7 @@ def monitor_for_runtime_csv(self, duration):
self.data["uc_avg"] = uc_avg_data
self.data["bytes_rd"] = bytes_rd
self.data["rx rate (1m)"] = rx_rate
+ self.data["total_err"] = total_err
else:
self.data["status"] = ["RUNNING"] * len(self.devices_list)
self.data["url_data"] = [0] * len(self.devices_list)
@@ -714,6 +786,7 @@ def monitor_for_runtime_csv(self, duration):
self.data["uc_min"] = [0] * len(self.devices_list)
self.data["bytes_rd"] = [0] * len(self.devices_list)
self.data["rx rate (1m)"] = [0] * len(self.devices_list)
+ self.data["total_err"] = [0] * len(self.devices_list)
time_difference = abs(end_time - datetime.now())
total_hours = time_difference.total_seconds() / 3600
remaining_minutes = (total_hours % 1) * 60
@@ -722,7 +795,12 @@ def monitor_for_runtime_csv(self, duration):
self.data["remaining_time"] = [[str(int(total_hours)) + " hr and " + str(
int(remaining_minutes)) + " min" if int(total_hours) != 0 or int(remaining_minutes) != 0 else '<1 min'][
0]] * len(self.devices_list)
- df1 = pd.DataFrame(self.data)
+ try:
+ df1 = pd.DataFrame(self.data)
+ except:
+ logger.info(f'error error http data {self.data}')
+ traceback.print_exc()
+ exit(1)
if self.dowebgui:
df1.to_csv('{}/http_datavalues.csv'.format(self.result_dir), index=False)
elif self.client_type == 'Real':
@@ -744,7 +822,58 @@ def monitor_for_runtime_csv(self, duration):
for port, df in individual_device_data.items():
df.to_csv(f"{endtime}-http-{port}.csv", index=False)
individual_device_csv_names.append(f'{endtime}-http-{port}')
- self.individual_device_csv_names = individual_device_csv_names
+ self.individual_device_csv_names = individual_device_csv_names.copy()
+ try:
+ all_l4_data = self.get_all_l4_data()
+ df = pd.DataFrame(all_l4_data)
+ df.to_csv("all_l4_data.csv", index=False)
+ except:
+ logger.error("All l4 data not found")
+
+
+ def get_all_l4_data(self):
+ # List of all fields to collect
+ fields = [
+ "name", "eid", "type", "status", "total-urls", "urls/s", "bytes-rd", "bytes-wr",
+ "total-buffers", "total-rebuffers", "total-wait-time", "video-format-bitrate",
+ "audio-format-bitrate", "frame-rate", "video-quality", "tx rate", "tx-rate-1m",
+ "rx rate", "rx rate (1m)", "fb-min", "fb-avg", "fb-max", "uc-min", "uc-avg",
+ "uc-max", "dns-min", "dns-avg", "dns-max", "total-err", "bad-proto", "bad-url",
+ "rslv-p", "rslv-h", "!conn", "timeout", "nf (4xx)", "http-r", "http-p", "http-t",
+ "acc. denied", "ftp-host", "ftp-stor", "ftp-port", "write", "read", "redir",
+ "login-denied", "other-err", "elapsed", "rpt timer", "time-stamp"
+ ]
+
+ # Fetch all data in one go
+ data = self.local_realm.json_get(f"layer4/list?fields={','.join(fields)}")
+
+ # Initialize result dict
+ result = {field: [] for field in fields}
+
+ # Access 'endpoint' field
+ endpoint = data.get("endpoint", {})
+ cx_list = self.http_profile.created_cx.keys()
+ if isinstance(endpoint, dict):
+ # Single endpoint format
+ for field in fields:
+ result[field].append(endpoint.get(field, None))
+ else:
+ # Multiple endpoints
+ for created_cx in cx_list:
+ for cx in endpoint:
+ if created_cx in cx:
+ for field in fields:
+ result[field].append(cx[created_cx].get(field, None))
+ break
+
+ # Example transformation for specific fields (e.g., bytes-rd in MB)
+ if "bytes-rd" in result:
+ result["bytes-rd"] = [
+ float(f"{int(x) / 1_000_000:.4f}") if x is not None else None
+ for x in result["bytes-rd"]
+ ]
+
+ return result
def my_monitor(self, data_mon):
# data in json format
@@ -957,7 +1086,7 @@ def summary_calculation(self, result_data, bands, threshold_5g, threshold_2g, th
def check_station_ip(self):
pass
- def generate_graph(self, dataset, lis, bands):
+ def generate_graph(self, dataset, lis, bands,graph_no=''):
bands = ['Download']
if self.client_type == "Real":
lis = self.devices_list
@@ -991,14 +1120,14 @@ def generate_graph(self, dataset, lis, bands):
_color_name=['steelblue'],
_show_bar_value=True,
_enable_csv=True,
- _graph_image_name="ucg-avg", _color_edge=['black'],
+ _graph_image_name=f"ucg-avg_http{graph_no}", _color_edge=['black'],
_color=['steelblue'],
_label=bands)
graph_png = graph.build_bar_graph_horizontal()
print("graph name {}".format(graph_png))
return graph_png
- def graph_2(self, dataset2, lis, bands):
+ def graph_2(self, dataset2, lis, bands,graph_no=''):
bands = ['Download']
if self.client_type == "Real":
lis = self.devices_list
@@ -1023,7 +1152,7 @@ def graph_2(self, dataset2, lis, bands):
_color_name=['orange'],
_show_bar_value=True,
_enable_csv=True,
- _graph_image_name="Total-url", _color_edge=['black'],
+ _graph_image_name=f"Total-url_http{graph_no}", _color_edge=['black'],
_color=['orange'],
_label=bands)
graph_png = graph_2.build_bar_graph_horizontal()
@@ -1046,10 +1175,13 @@ def generate_report(self, date, num_stations, duration, test_setup_info, dataset
result_data, test_rig, rx_rate,
test_tag, dut_hw_version, dut_sw_version, dut_model_num, dut_serial_num, test_id,
test_input_infor, csv_outfile, _results_dir_name='webpage_test', report_path=''):
+ print("Current working directory:", os.getcwd())
if self.dowebgui == "True" and report_path == '':
+ print("SCOP1")
report = lf_report.lf_report(_results_dir_name="webpage_test", _output_html="Webpage.html",
_output_pdf="Webpage.pdf", _path=self.result_dir)
else:
+ print('scope2')
report = lf_report.lf_report(_results_dir_name="webpage_test", _output_html="Webpage.html",
_output_pdf="Webpage.pdf", _path=report_path)
@@ -1058,7 +1190,12 @@ def generate_report(self, date, num_stations, duration, test_setup_info, dataset
# It ensures no blocker for virtual clients
if self.client_type == 'Real':
shutil.move('http_datavalues.csv', report_path_date_time)
+ try:
+ shutil.move('all_l4_data.csv', report_path_date_time)
+ except:
+ logging.info("failed to generate all l4 data")
# Moving indiviudal csv's to report directory
+ print('where is the path',os.getcwd())
for csv_name in self.individual_device_csv_names:
shutil.move(f"{csv_name}.csv", report_path_date_time)
if bands == "Both":
@@ -1096,6 +1233,33 @@ def generate_report(self, date, num_stations, duration, test_setup_info, dataset
report.move_csv_file()
report.move_graph_image()
report.build_graph()
+ if(self.dowebgui and self.get_live_view):
+ print('total floors',self.total_floors)
+ for floor in range(0,int(self.total_floors)):
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ throughput_image_path = os.path.join(script_dir, "heatmap_images", f"http_{self.test_name}_{floor+1}.png")
+ print('image_path',f"{self.test_name}_{floor+1}.png")
+ # rssi_image_path = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_rssi_{floor+1}.png")
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(throughput_image_path)):
+ if time.time() - start_time > timeout:
+ print("Timeout: Images not found within 60 seconds.")
+ break
+ time.sleep(1)
+ while not os.path.exists(throughput_image_path):
+ if os.path.exists(throughput_image_path):
+ break
+ # time.sleep(10)
+ if os.path.exists(throughput_image_path):
+ report.set_custom_html('')
+ report.build_custom()
+ # report.set_custom_html("Average Throughput Heatmap:
")
+ # report.build_custom()
+ report.set_custom_html(f'
')
+ report.build_custom()
+ # os.remove(throughput_image_path)
# report.set_obj_html("Summary Table Description", "This Table shows you the summary "
# "result of Webpage Download Test as PASS or FAIL criteria. If the average time taken by " +
@@ -1289,7 +1453,8 @@ def generate_report(self, date, num_stations, duration, test_setup_info, dataset
" No of times File downloaded ": dataset2,
" Average time taken to Download file (ms)": dataset,
" Bytes-rd (Mega Bytes) ": dataset1,
- "Rx Rate (Mbps)": rx_rate
+ "Rx Rate (Mbps)": rx_rate,
+ "Failed url's": self.data["total_err"]
}
if self.expected_passfail_value or self.device_csv_name:
dataframe[" Expected value of no of times file downloaded"] = test_input_list
@@ -1317,6 +1482,12 @@ def generate_report(self, date, num_stations, duration, test_setup_info, dataset
print("returned file {}".format(html_file))
print(html_file)
report.write_pdf()
+ # if(self.get_live_view):
+ # folder_path = os.path.join(script_dir, "heatmap_images")
+ # for f in os.listdir(folder_path):
+ # file_path = os.path.join(folder_path, f)
+ # if os.path.isfile(file_path):
+ # os.remove(file_path)
def copy_reports_to_home_dir(self):
curr_path = self.result_dir
@@ -1521,6 +1692,59 @@ def get_signal_and_link_speed_data(self):
rx_rate_list.append('-')
return signal_list, link_speed_list, rx_rate_list
+ def monitor_cx(self):
+ """
+ This function waits for upto 20 iterations to allow all CXs (connections) to be created.
+
+ If some CXs are still not created after 20 iterations, then the CXs related to that device are removed,
+ along with their associated client and MAC entries from all relevant lists.
+ """
+ max_retry = 20
+ current_retry = 0
+ failed_cx = []
+ flag = 0
+ idx_list = []
+ del_device_list, del_mac_list, del_port_list, del_device_list1 = [], [], [], []
+ while current_retry < max_retry:
+ failed_cx.clear()
+ idx_list.clear()
+ del_device_list.clear()
+ del_mac_list.clear()
+ del_port_list.clear()
+ del_device_list1.clear()
+ created_cx_list = list(self.http_profile.created_cx.keys())
+ for i, created_cxs in enumerate(created_cx_list):
+ try:
+ _ = self.local_realm.json_get("layer4/%s/list?fields=%s" %
+ (created_cxs, 'status'))['endpoint']['status']
+ except BaseException:
+ logger.error(f'cx not created for {self.port_list[i]}')
+ failed_cx.append(created_cxs)
+ del_device_list.append(self.device_list[i])
+ del_mac_list.append(self.macid_list[i])
+ del_port_list.append(self.port_list[i])
+ del_device_list1.append(self.devices_list[i])
+ if len(failed_cx) == 0:
+ flag = 1
+ break
+ logger.info(f'Try {current_retry} out of 20: Waiting for the cross-connections to be created.')
+ time.sleep(2)
+ current_retry += 1
+
+ if flag:
+ logger.info('cross connections found for all devices')
+ return
+ for cx in failed_cx:
+ del self.http_profile.created_cx[cx]
+ for i in range(len(del_port_list)):
+ self.port_list.remove(del_port_list[i])
+ self.macid_list.remove(del_mac_list[i])
+ self.device_list.remove(del_device_list[i])
+ self.devices_list.remove(del_device_list1[i])
+ if len(self.port_list) == 0:
+ logger.error('No cross connections created, aborting test')
+ exit(1)
+
def validate_args(args):
if args.expected_passfail_value and args.device_csv_name:
@@ -1735,6 +1959,8 @@ def main():
optional.add_argument("--wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
optional.add_argument("--config", action="store_true", help="Specify for configuring the devices")
+ optional.add_argument('--get_live_view', help="If true will heatmap will be generated from testhouse automation WebGui ", action='store_true')
+ optional.add_argument('--total_floors', help="Total floors from testhouse automation WebGui ", default="0")
help_summary = '''\
lf_webpage.py will verify that N clients are connected on a specified band and can download
some amount of file data from the HTTP server while measuring the time taken by clients to download the file and number of
@@ -1862,7 +2088,9 @@ def main():
expected_passfail_value=args.expected_passfail_value,
device_csv_name=args.device_csv_name,
wait_time=args.wait_time,
- config=args.config
+ config=args.config,
+ get_live_view= args.get_live_view,
+ total_floors = args.total_floors
)
if args.client_type == "Real":
if not isinstance(args.device_list, list):
@@ -1886,38 +2114,6 @@ def main():
"configuration_status": "configured"
}
http.updating_webui_runningjson(obj)
- android_devices, windows_devices, linux_devices, mac_devices = 0, 0, 0, 0
- all_devices_names = []
- device_type = []
- total_devices = ""
- for i in device_list:
- split_device_name = i.split(" ")
- if 'android' in split_device_name:
- all_devices_names.append(split_device_name[2] + ("(Android)"))
- device_type.append("Android")
- android_devices += 1
- elif 'Win' in split_device_name:
- all_devices_names.append(split_device_name[2] + ("(Windows)"))
- device_type.append("Windows")
- windows_devices += 1
- elif 'Lin' in split_device_name:
- all_devices_names.append(split_device_name[2] + ("(Linux)"))
- device_type.append("Linux")
- linux_devices += 1
- elif 'Mac' in split_device_name:
- all_devices_names.append(split_device_name[2] + ("(Mac)"))
- device_type.append("Mac")
- mac_devices += 1
-
- # Build total_devices string based on counts
- if android_devices > 0:
- total_devices += f" Android({android_devices})"
- if windows_devices > 0:
- total_devices += f" Windows({windows_devices})"
- if linux_devices > 0:
- total_devices += f" Linux({linux_devices})"
- if mac_devices > 0:
- total_devices += f" Mac({mac_devices})"
args.num_stations = len(port_list)
if not args.get_url_from_file:
http.file_create(ssh_port=args.ssh_port)
@@ -1928,6 +2124,9 @@ def main():
http.set_values()
http.precleanup()
http.build()
+ if args.client_type == 'Real':
+ http.monitor_cx()
+ logger.info(f'Test started on the devices : {http.port_list}')
test_time = datetime.now()
# Solution For Leap Year conflict changed it to %Y
test_time = test_time.strftime("%Y %d %H:%M:%S")
@@ -1947,7 +2146,8 @@ def main():
uc_avg_val = http.data['uc_avg']
url_times = http.data['url_data']
rx_bytes_val = http.data['bytes_rd']
- rx_rate_val = http.data['rx rate (1m)']
+ # print('rx_rate_Val',http.data['rx rate (1m)'])
+ rx_rate_val = list(http.data['rx rate (1m)'])
else:
uc_avg_val = http.my_monitor('uc-avg')
url_times = http.my_monitor('total-urls')
@@ -2080,6 +2280,38 @@ def main():
if int(duration == 3600) or (int(duration) > 3600):
duration = str(duration / 3600) + "h"
+ android_devices, windows_devices, linux_devices, mac_devices = 0, 0, 0, 0
+ all_devices_names = []
+ device_type = []
+ total_devices = ""
+ for i in http.devices_list:
+ split_device_name = i.split(" ")
+ if 'android' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Android)"))
+ device_type.append("Android")
+ android_devices += 1
+ elif 'Win' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Windows)"))
+ device_type.append("Windows")
+ windows_devices += 1
+ elif 'Lin' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Linux)"))
+ device_type.append("Linux")
+ linux_devices += 1
+ elif 'Mac' in split_device_name:
+ all_devices_names.append(split_device_name[2] + ("(Mac)"))
+ device_type.append("Mac")
+ mac_devices += 1
+
+ # Build total_devices string based on counts
+ if android_devices > 0:
+ total_devices += f" Android({android_devices})"
+ if windows_devices > 0:
+ total_devices += f" Windows({windows_devices})"
+ if linux_devices > 0:
+ total_devices += f" Linux({linux_devices})"
+ if mac_devices > 0:
+ total_devices += f" Mac({mac_devices})"
if args.client_type == "Real":
if args.group_name:
group_names = ', '.join(configuration.keys())
@@ -2089,7 +2321,7 @@ def main():
"AP name": args.ap_name,
"Configuration": configmap,
"Configured Devices": ", ".join(all_devices_names),
- "No of Devices": "Total" + f"({args.num_stations})" + total_devices,
+ "No of Devices": "Total" + f"({len(all_devices_names)})" + total_devices,
"Traffic Direction": "Download",
"Traffic Duration ": duration
}
@@ -2099,7 +2331,7 @@ def main():
"SSID": ssid,
"Device List": ", ".join(all_devices_names),
"Security": security,
- "No of Devices": "Total" + f"({args.num_stations})" + total_devices,
+ "No of Devices": "Total" + f"({len(all_devices_names)})" + total_devices,
"Traffic Direction": "Download",
"Traffic Duration ": duration
}
@@ -2128,6 +2360,8 @@ def main():
test_input_infor["File size"] = args.file_size
else:
test_setup_info["File location (URLs from the File)"] = args.file_path
+ if args.client_type == "Real":
+ test_setup_info["failed_cx's"] = http.failed_cx if http.failed_cx else "NONE"
# dataset = http.download_time_in_sec(result_data=result_data)
rx_rate = []
for i in result_data:
@@ -2158,6 +2392,16 @@ def main():
# "": args.bands,
# "PASS/FAIL": data
# }
+ if args.dowebgui:
+ http.data_for_webui["status"] = ["STOPPED"] * len(http.devices_list)
+ http.data_for_webui['rx rate (1m)'] = http.data['rx rate (1m)']
+ http.data_for_webui['total_err'] = http.data['total_err']
+ http.data_for_webui["start_time"] = http.data["start_time"]
+ http.data_for_webui["end_time"] = http.data["end_time"]
+ http.data_for_webui["remaining_time"] = http.data["remaining_time"]
+ df1 = pd.DataFrame(http.data_for_webui)
+ df1.to_csv('{}/http_datavalues.csv'.format(http.result_dir), index=False)
+
http.generate_report(date, num_stations=args.num_stations,
duration=args.duration, test_setup_info=test_setup_info, dataset=dataset, lis=lis,
bands=args.bands, threshold_2g=args.threshold_2g, threshold_5g=args.threshold_5g,
@@ -2171,13 +2415,6 @@ def main():
http.postcleanup()
# FOR WEBGUI, filling csv at the end to get the last terminal logs
if args.dowebgui:
- http.data_for_webui["status"] = ["STOPPED"] * len(http.devices_list)
- http.data_for_webui["start_time"] = http.data["start_time"]
- http.data_for_webui["end_time"] = http.data["end_time"]
- http.data_for_webui["remaining_time"] = http.data["remaining_time"]
- df1 = pd.DataFrame(http.data_for_webui)
- df1.to_csv('{}/http_datavalues.csv'.format(http.result_dir), index=False)
-
http.copy_reports_to_home_dir()
diff --git a/py-scripts/real_application_tests/real_browser/lf_interop_real_browser_test.py b/py-scripts/real_application_tests/real_browser/lf_interop_real_browser_test.py
index 41f6176f2..b402fc658 100644
--- a/py-scripts/real_application_tests/real_browser/lf_interop_real_browser_test.py
+++ b/py-scripts/real_application_tests/real_browser/lf_interop_real_browser_test.py
@@ -162,7 +162,9 @@ def __init__(self,
wait_time=60,
config=None,
selected_groups=None,
- selected_profiles=None):
+ selected_profiles=None,
+ no_browser_precleanup = True,
+ no_browser_postcleanup=True):
super().__init__(lfclient_host=host, lfclient_port=8080)
# Initialize attributes with provided parameters
self.host = host
@@ -184,7 +186,8 @@ def __init__(self,
self.no_precleanup = no_precleanup
self.direction = "dl"
self.dest = "/dev/null"
-
+ self.no_browser_precleanup = no_browser_precleanup
+ self.no_browser_postcleanup = no_browser_postcleanup
self.app = Flask(__name__)
self.app.logger.setLevel(logging.WARNING)
self.laptop_stats = {}
@@ -249,6 +252,7 @@ def __init__(self,
self.device_csv_name = device_csv_name
self.wait_time = wait_time
self.config = config
+ self.cx_order_list = []
self.selected_groups = selected_groups
self.selected_profiles = selected_profiles
self.config_obj = None
@@ -331,12 +335,16 @@ def build(self):
for i in range(0, len(self.laptop_os_types)):
if self.laptop_os_types[i] == 'windows':
cmd = "real_browser.bat --url %s --server %s --duration %s" % (self.url, self.upstream_port, self.duration)
+ if self.no_browser_precleanup:
+ cmd+=" --no_precleanup"
+ if self.no_browser_postcleanup:
+ cmd+=" --no_postcleanup"
self.generic_endps_profile.set_cmd(self.generic_endps_profile.created_endp[i], cmd)
elif self.laptop_os_types[i] == 'linux':
- cmd = "su -l lanforge ctrb.bash %s %s %s %s" % (self.new_port_list[i], self.url, self.upstream_port, self.duration)
+ cmd = "su -l lanforge ctrb.bash %s %s %s %s %s %s" % (self.new_port_list[i], self.url, self.upstream_port, self.duration,str(self.no_browser_precleanup).lower(),str(self.no_browser_postcleanup).lower())
self.generic_endps_profile.set_cmd(self.generic_endps_profile.created_endp[i], cmd)
elif self.laptop_os_types[i] == 'macos':
- cmd = "sudo bash ctrb.bash --url %s --server %s --duration %s" % (self.url, self.upstream_port, self.duration)
+ cmd = "sudo bash ctrb.bash --url %s --server %s --duration %s --no_precleanup=%s --no_postcleanup=%s" % (self.url, self.upstream_port, self.duration,str(self.no_browser_precleanup).lower(),str(self.no_browser_postcleanup).lower())
self.generic_endps_profile.set_cmd(self.generic_endps_profile.created_endp[i], cmd)
if len(self.phone_data) != 0:
@@ -460,10 +468,10 @@ def create_real(self, ports=None, sleep_time=.5, debug_=False, suppress_related_
if (url is None) or (url == ""):
raise ValueError("HTTPProfile::create: url unset")
if ftp:
- cx_name = name + "_ftp"
+ cx_name = 'rb_' + name + "_ftp"
else:
- cx_name = name + "_http"
+ cx_name = 'rb_' + name + "_http"
if interop is None:
if upload_name is None:
@@ -958,7 +966,7 @@ def run_test(self, available_resources):
sys.exit(1)
cx_order_list = self.calculate_cx_order_list()
-
+ self.cx_order_list = cx_order_list.copy()
for i, cx_batch in enumerate(cx_order_list):
self.start_specific(cx_batch)
logging.info(f"Test started on Devices with resource Ids : {cx_batch}")
@@ -1126,12 +1134,15 @@ def process_resources(self, config_dict):
_ = asyncio.run(self.config_obj.connectivity(device_list=device_list, wifi_config=config_dict))
self.devices = self.devices.get_devices()
+ print(f"self.devices{self.devices}")
resource_ids = sorted(set(int(item.split('.')[1]) for item in device_list if '.' in item))
+ print(f'resource IDs {resource_ids}')
# obj.resource_ids = ','.join(map(str, resource_ids))
available_resources = [res_id for res_id in resource_ids if any(
int(device.split('.')[1]) == res_id for device in self.devices if '.' in device
)]
+ print(f"availbe res {available_resources}")
return available_resources
@@ -1687,6 +1698,7 @@ def create_report(self):
x_fig_size = 18
y_fig_size = len(device_type_data) * 1 + 4
+ print('DEVICE NAMES',device_names)
bar_graph_horizontal = lf_bar_graph_horizontal(
_data_set=[total_urls],
_xaxis_name="URL",
@@ -1702,6 +1714,7 @@ def create_report(self):
_graph_image_name=f"{self.csv_file_names[i]}_urls_per_device",
_label=["URLs"]
)
+ # print('yaxssss)
graph_image = bar_graph_horizontal.build_bar_graph_horizontal()
report.set_graph_image(graph_image)
report.move_graph_image()
@@ -1798,6 +1811,7 @@ def create_report(self):
"Link Speed": tx_rate_data,
}
+ logger.info(f"dataframe realbrowser {final_test_results}")
test_results_df = pd.DataFrame(final_test_results)
report.set_table_dataframe(test_results_df)
report.build_table()
@@ -1812,6 +1826,8 @@ def create_report(self):
report.write_pdf()
except Exception as e:
logging.error(f"Error in create_report function {e}", exc_info=True)
+ logger.info("HIIIIIIIIIIIIIIII")
+ logger.info(f"REALBBBB {final_test_results}")
finally:
if not self.dowebgui:
source_dir = "."
@@ -1830,6 +1846,8 @@ def extract_device_data(self, file_path):
# Load the CSV file
data = pd.read_csv(file_path)
+
+ print("Absolute path:", os.path.abspath(file_path))
# Initialize lists to store data
final_eid_data = []
mac_data = []
diff --git a/py-scripts/real_application_tests/youtube/lf_interop_youtube.py b/py-scripts/real_application_tests/youtube/lf_interop_youtube.py
index 30ff9a0e3..3c3101847 100644
--- a/py-scripts/real_application_tests/youtube/lf_interop_youtube.py
+++ b/py-scripts/real_application_tests/youtube/lf_interop_youtube.py
@@ -129,8 +129,9 @@ def __init__(self,
upstream_port=None,
config=None,
selected_groups=None,
- selected_profiles=None
-
+ selected_profiles=None,
+ no_browser_precleanup = False,
+ no_browser_postcleanup = False
):
"""
@@ -197,7 +198,8 @@ def __init__(self,
self.config = config
self.selected_groups = selected_groups
self.selected_profiles = selected_profiles
-
+ self.no_browser_precleanup = no_browser_precleanup
+ self.no_browser_postcleanup = no_browser_postcleanup
def stop(self):
self.stop_signal = True
@@ -412,13 +414,17 @@ def create_generic_endp(self, query_resources):
for i in range(0, len(self.real_sta_os_types)):
if self.real_sta_os_types[i] == 'windows':
cmd = "youtube_stream.bat --url %s --host %s --device_name %s --duration %s --res %s" % (self.url, self.upstream_port, self.real_sta_hostname[i], self.duration, self.resolution)
+ if self.no_browser_precleanup:
+ cmd += " --no_precleanup"
+ if self.no_browser_precleanup:
+ cmd += " --no_postcleanup"
self.generic_endps_profile.set_cmd(self.generic_endps_profile.created_endp[i], cmd)
elif self.real_sta_os_types[i] == 'linux':
- cmd = "su -l lanforge ctyt.bash %s %s %s %s %s %s" % (self.new_port_list[i], self.url, self.upstream_port, self.real_sta_hostname[i], self.duration, self.resolution)
+ cmd = "su -l lanforge ctyt.bash %s %s %s %s %s %s %s %s" % (self.new_port_list[i], self.url, self.upstream_port, self.real_sta_hostname[i], self.duration, self.resolution,str(self.no_browser_precleanup).lower(),str(self.no_browser_postcleanup).lower())
self.generic_endps_profile.set_cmd(self.generic_endps_profile.created_endp[i], cmd)
elif self.real_sta_os_types[i] == 'macos':
- cmd = "sudo bash ctyt.bash --url %s --host %s --device_name %s --duration %s --res %s" % (self.url, self.upstream_port, self.real_sta_hostname[i], self.duration, self.resolution)
+ cmd = "sudo bash ctyt.bash --url %s --host %s --device_name %s --duration %s --res %s --no_precleanup=%s --no_postcleanup=%s" % (self.url, self.upstream_port, self.real_sta_hostname[i], self.duration, self.resolution,str(self.no_browser_precleanup).lower(), str(self.no_browser_postcleanup).lower())
self.generic_endps_profile.set_cmd(self.generic_endps_profile.created_endp[i], cmd)
def select_real_devices(self, real_devices, real_sta_list=None, base_interop_obj=None):
@@ -708,7 +714,11 @@ def move_files(self, source_file, dest_dir):
try:
filename = os.path.basename(source_file)
dest_file = os.path.join(dest_dir, filename)
- shutil.move(source_file, dest_file)
+ if self.do_webUI:
+ shutil.copy(source_file, dest_file)
+ else:
+ shutil.move(source_file, dest_file)
+
logging.info(f"Successfully moved '{source_file}' to '{dest_file}'.")
diff --git a/py-scripts/real_application_tests/zoom_automation/lf_interop_zoom.py b/py-scripts/real_application_tests/zoom_automation/lf_interop_zoom.py
index 519896a4d..fe953d4d8 100644
--- a/py-scripts/real_application_tests/zoom_automation/lf_interop_zoom.py
+++ b/py-scripts/real_application_tests/zoom_automation/lf_interop_zoom.py
@@ -91,7 +91,7 @@
class ZoomAutomation(Realm):
def __init__(self, ssid="SSID", band="5G", security="wpa2", apname="AP Name", audio=True, video=True, lanforge_ip=None,
- upstream_port='0.0.0.0', wait_time=30, devices=None, testname=None, config=None, selected_groups=None, selected_profiles=None):
+ upstream_port='0.0.0.0', wait_time=30, devices=None, testname=None, config=None, selected_groups=None, selected_profiles=None,no_browser_precleanup=False,no_browser_postcleanup=False):
super().__init__(lfclient_host=lanforge_ip)
self.upstream_port = upstream_port
@@ -130,7 +130,8 @@ def __init__(self, ssid="SSID", band="5G", security="wpa2", apname="AP Name", au
self.zoom_host = None
self.testname = testname
self.stop_signal = False
-
+ self.no_browser_precleanup = no_browser_precleanup
+ self.no_browser_postcleanup = no_browser_postcleanup
# self.path = "/home/lanforge/lanforge-scripts/py-scripts/zoom_automation/test_results"
self.path = os.path.join(os.getcwd(), "zoom_test_results")
if not os.path.exists(self.path):
@@ -469,11 +470,11 @@ def run(self, duration, upstream_port, signin_email, signin_passwd, participants
self.generic_endps_profile.set_cmd(self.generic_endps_profile.created_endp[0], cmd)
elif self.real_sta_os_type[0] == 'linux':
- cmd = "su -l lanforge ctzoom.bash %s %s %s" % (self.new_port_list[0], self.upstream_port, "host")
+ cmd = "su -l lanforge ctzoom.bash %s %s %s %s %s" % (self.new_port_list[0], self.upstream_port, "host",str(self.no_browser_precleanup).lower(),str(self.no_browser_postcleanup).lower())
self.generic_endps_profile.set_cmd(self.generic_endps_profile.created_endp[0], cmd)
elif self.real_sta_os_type[0] == 'macos':
- cmd = "sudo bash ctzoom.bash %s %s" % (self.upstream_port, "host")
+ cmd = "sudo bash ctzoom.bash %s %s --no_precleanup=%s --no_postcleanup=%s" % (self.upstream_port, "host",str(self.no_browser_precleanup).lower(),str(self.no_browser_postcleanup).lower())
self.generic_endps_profile.set_cmd(self.generic_endps_profile.created_endp[0], cmd)
self.generic_endps_profile.start_cx()
time.sleep(5)
@@ -504,10 +505,10 @@ def run(self, duration, upstream_port, signin_email, signin_passwd, participants
cmd = f"py zoom_client.py --ip {self.upstream_port}"
self.generic_endps_profile.set_cmd(self.generic_endps_profile.created_endp[i], cmd)
elif self.real_sta_os_type[i] == 'linux':
- cmd = "su -l lanforge ctzoom.bash %s %s %s" % (self.new_port_list[i], self.upstream_port, "client")
+ cmd = "su -l lanforge ctzoom.bash %s %s %s %s %s" % (self.new_port_list[i], self.upstream_port, "client",str(self.no_browser_precleanup).lower(),str(self.no_browser_postcleanup).lower())
self.generic_endps_profile.set_cmd(self.generic_endps_profile.created_endp[i], cmd)
elif self.real_sta_os_type[i] == 'macos':
- cmd = "sudo bash ctzoom.bash %s %s" % (self.upstream_port, "client")
+ cmd = "sudo bash ctzoom.bash %s %s %s %s" % (self.upstream_port, "client",str(self.no_browser_precleanup).lower(),str(self.no_browser_postcleanup).lower())
self.generic_endps_profile.set_cmd(self.generic_endps_profile.created_endp[i], cmd)
self.generic_endps_profile.start_cx()
@@ -669,7 +670,7 @@ def generate_report(self):
_results_dir_name="zoom_call_report",
_path=self.path)
report_path_date_time = report.get_path_date_time()
-
+ self.report_path_date_time = report_path_date_time
report.set_title("Zoom Call Automated Report")
report.build_banner()
diff --git a/py-scripts/requirements.txt b/py-scripts/requirements.txt
new file mode 100644
index 000000000..59b2ef346
--- /dev/null
+++ b/py-scripts/requirements.txt
@@ -0,0 +1,118 @@
+aiohappyeyeballs==2.6.1
+aiohttp==3.11.16
+aiosignal==1.3.2
+amqp==5.3.1
+appdirs==1.4.4
+asgiref==3.8.1
+attrs==25.3.0
+autopep8==2.3.2
+bcrypt==4.3.0
+beautifulsoup4==4.13.3
+billiard==4.2.1
+blinker==1.9.0
+bs4==0.0.2
+celery==5.5.1
+certifi==2025.1.31
+cffi==1.17.1
+cfgv==3.4.0
+charset-normalizer==3.4.1
+click==8.1.8
+click-didyoumean==0.3.1
+click-plugins==1.1.1
+click-repl==0.3.0
+contourpy==1.3.1
+cron-descriptor==1.4.5
+cryptography==44.0.2
+cycler==0.12.1
+distlib==0.3.9
+Django==5.1.8
+django-celery-beat==2.7.0
+django-enum-choices==2.1.4
+django-timezone-field==7.1
+django_celery_results==2.6.0
+filelock==3.18.0
+flake8==7.2.0
+flake8-bugbear==24.12.12
+Flask==3.1.0
+flask-cors==6.0.1
+flower==2.0.1
+fonttools==4.57.0
+frozenlist==1.5.0
+humanize==4.12.2
+identify==2.6.9
+idna==3.10
+iniconfig==2.1.0
+itsdangerous==2.2.0
+Jinja2==3.1.6
+jsonfield==3.1.0
+kaleido==0.2.1
+kiwisolver==1.4.8
+kombu==5.5.2
+lxml==5.3.2
+markdown-it-py==3.0.0
+MarkupSafe==3.0.2
+matplotlib==3.10.1
+mccabe==0.7.0
+mdurl==0.1.2
+multidict==6.4.3
+narwhals==1.34.1
+nodeenv==1.9.1
+numpy==2.2.4
+packaging==24.2
+pandas==2.2.3
+paramiko==3.5.1
+pdfkit==1.0.0
+pexpect==4.9.0
+pexpect-serial==0.1.0
+pillow==11.2.1
+pip_search==0.0.13
+platformdirs==4.3.7
+plotly==6.0.1
+pluggy==1.5.0
+pre_commit==4.2.0
+prometheus_client==0.21.1
+prompt_toolkit==3.0.50
+propcache==0.3.1
+psutil==7.0.0
+psycopg2-binary==2.9.10
+ptyprocess==0.7.0
+pycodestyle==2.13.0
+pycparser==2.22
+pyflakes==3.3.2
+Pygments==2.19.1
+PyNaCl==1.5.0
+pyparsing==3.2.3
+pyserial==3.5
+pyshark==0.6
+pytest==8.3.5
+pytest-html==4.1.1
+pytest-json==0.4.0
+pytest-json-report==1.5.0
+pytest-metadata==3.1.1
+python-crontab==3.2.0
+python-dateutil==2.9.0.post0
+pytz==2025.2
+PyYAML==6.0.2
+redis==5.2.1
+requests==2.32.3
+rich==14.0.0
+scipy==1.15.2
+scp==0.15.0
+setuptools==78.1.0
+simple-geometry==0.1.4
+six==1.17.0
+soupsieve==2.6
+sqlparse==0.5.3
+termcolor==3.0.1
+tornado==6.4.2
+typing_extensions==4.13.2
+tzdata==2025.2
+urllib3==2.4.0
+vine==5.1.0
+virtualenv==20.30.0
+wcwidth==0.2.13
+websocket-client==1.8.0
+Werkzeug==3.1.3
+wkhtmltopdf==0.2
+XlsxWriter==3.2.2
+yarl==1.19.0
diff --git a/py-scripts/test_l3.py b/py-scripts/test_l3.py
index 7f8fa668d..e479865eb 100755
--- a/py-scripts/test_l3.py
+++ b/py-scripts/test_l3.py
@@ -646,6 +646,7 @@
import shutil
import asyncio
+import copy
if sys.version_info[0] != 3:
print("This script requires Python 3")
exit(1)
@@ -751,6 +752,8 @@ def __init__(self,
dowebgui=False,
test_name="",
ip="",
+ get_live_view=False,
+ total_floors=0,
# for uniformity from webGUI result_dir as variable is used insead of local_lf_report_dir
result_dir="",
# wifi extra configuration
@@ -845,6 +848,8 @@ def __init__(self,
else:
self.dataplane = False
self.ssid_list = ssid_list
+ self.get_live_view = get_live_view
+ self.total_floors = total_floors
self.ssid_password_list = ssid_password_list
self.wifi_mode_list = wifi_mode_list
self.enable_flags_list = enable_flags_list
@@ -1585,7 +1590,8 @@ def get_endp_stats_for_port(self, port_eid, endps):
for endp in endps:
# pprint(endp)
if not self.dowebgui:
- logging.info(pformat(endp))
+ pass
+ # logging.info(pformat(endp))
eid_endp = endp["eid"].split(".")
logger.debug(
"Comparing eid:{eid} to endp-id {eid_endp}".format(eid=eid, eid_endp=eid_endp))
@@ -1607,7 +1613,11 @@ def get_endp_stats_for_port(self, port_eid, endps):
'Expected integer response for jitter, received non-numeric string instead. Replacing with 0')
jit += 0
else:
- jit += int(endp["jitter"])
+ try:
+ jit += int(endp["jitter"])
+ except:
+ jit += 0
+ logging.info(f"jitter was appended with zero")
# lat += int(endp["delay"])
# jit += int(endp["jitter"])
name = endp["name"]
@@ -2032,6 +2042,151 @@ def build(self, rebuild=False):
"PASS: Stations & CX build finished: created/updated: %s stations and %s connections." %
(self.station_count, self.cx_count))
+ def l3_endp_port_data(self,tos):
+ # Gather port data (only need SSID)
+ port_data = self.json_get('port/all?fields=signal,signal')
+ port_data.pop("handler", None)
+ port_data.pop("uri", None)
+ port_data.pop("warnings", None)
+ # logger.info("port_data type: {dtype} data: {data}".format(dtype=type(port_data), data=port_data))
+
+ # Gather resource data (only need hostname for alias)
+ resource_data = self.json_get('resource/all?fields=eid,hostname')
+ resource_data.pop("handler", None)
+ resource_data.pop("uri", None)
+ if not self.dowebgui:
+ logger.info("resource_data type: {dtype}".format(dtype=type(port_data)))
+
+ # Handle single resource case
+ if "resource" in resource_data.keys():
+ resource_data["resources"] = [{'1.1': resource_data['resource']}]
+ resource_data.pop("resource")
+
+ # Gather endpoint data (only need name, tx/rx rate, a/b, tos, eid, type)
+ endp_type_present = False
+ endp_data = self.json_get('endp/all?fields=name,tx+rate,rx+rate,a/b,tos,eid,type')
+ if endp_data is not None:
+ endp_type_present = True
+ else:
+ logger.info(
+ "Consider upgrading to 5.4.7 + endp field type not supported in LANforge GUI version results for Multicast reversed in graphs and tables")
+ endp_data = self.json_get('endp/all?fields=name,tx+rate,rx+rate,a/b,eid')
+ endp_type_present = False
+ endp_data.pop("handler", None)
+ endp_data.pop("uri", None)
+ logger.info("endpoint_data type: {dtype} data: {data}".format(
+ dtype=type(endp_data), data=endp_data))
+
+ # Initialize lists for the single TOS
+ clients_A = []
+ tos_ul_A = []
+ tos_dl_A = []
+ resource_alias_A = []
+ port_signal_A = []
+
+ clients_B = []
+ tos_ul_B = []
+ tos_dl_B = []
+ resource_alias_B = []
+ port_signal_B = []
+
+ for endp in endp_data['endpoint']:
+ endp_key = list(endp.keys())[0]
+ endp_info = endp[endp_key]
+ logger.info("endpoint_data key: {key} name: {name} a/b {ab} rx rate {rx_rate}".format(
+ key=endp_key, name=endp_info['name'], ab=endp_info['a/b'], rx_rate=endp_info['rx rate']))
+
+ # Process only if TOS matches or name contains TOS for non-Mcast types
+ if (endp_type_present and endp_info['type'] == 'Mcast' and endp_info['tos'] == tos) or \
+ (endp_type_present and endp_info['type'] in ['LF/TCP', 'LF/UDP'] and endp_info['tos'] == tos) or \
+ (not endp_type_present and tos in endp_info['name']):
+
+ # Resource lookup (for alias)
+ eid_tmp_resource = f"{self.name_to_eid(endp_info['eid'])[0]}.{self.name_to_eid(endp_info['eid'])[1]}"
+ # resource_found = False
+ alias = 'NA'
+ for res in resource_data['resources']:
+ res_key = list(res.keys())[0]
+ if res_key == eid_tmp_resource:
+ # resource_found = True
+ alias = self.create_resource_alias(
+ eid=res[res_key]['eid'],
+ host=res[res_key]['hostname'],
+ hw_version='',
+ kernel='')
+ break
+
+ # Port lookup (for signal)
+ eid_info = endp_info['name'].split('-')
+ eid_tmp_port = f"{eid_tmp_resource}.{eid_info[3 if endp_type_present and endp_info['type'] == 'Mcast' else 1]}"
+ # port_found = False
+ signal = 'NA'
+ for port in port_data['interfaces']:
+ port_key = list(port.keys())[0]
+ if port_key == eid_tmp_port:
+ signal = port[port_key]['signal']
+ # port_found = True
+ break
+
+ if endp_type_present and endp_info['type'] == 'Mcast':
+ if endp_info['a/b'] == "B":
+ clients_A.append(endp_info['name'])
+ tos_ul_A.append(endp_info["tx rate"])
+ tos_dl_A.append(endp_info["rx rate"])
+ resource_alias_A.append(alias)
+ port_signal_A.append(signal)
+ elif endp_info['a/b'] == "A":
+ clients_B.append(endp_info['name'])
+ tos_dl_B.append(endp_info["tx rate"])
+ tos_ul_B.append(endp_info["rx rate"])
+ resource_alias_B.append(alias)
+ port_signal_B.append(signal)
+ elif endp_type_present and endp_info['type'] in ['LF/TCP', 'LF/UDP']:
+ if endp_info['a/b'] == "A":
+ clients_A.append(endp_info['name'])
+ tos_ul_A.append(endp_info["tx rate"])
+ tos_dl_A.append(endp_info["rx rate"])
+ resource_alias_A.append(alias)
+ port_signal_A.append(signal)
+ elif endp_info['a/b'] == "B":
+ clients_B.append(endp_info['name'])
+ tos_dl_B.append(endp_info["tx rate"])
+ tos_ul_B.append(endp_info["rx rate"])
+ resource_alias_B.append(alias)
+ port_signal_B.append(signal)
+ else: # Non-Mcast, no type field
+ if endp_info['a/b'] == "A":
+ clients_A.append(endp_info['name'])
+ tos_ul_A.append(endp_info["tx rate"])
+ tos_dl_A.append(endp_info["rx rate"])
+ resource_alias_A.append(alias)
+ port_signal_A.append(signal)
+ elif endp_info['a/b'] == "B":
+ clients_B.append(endp_info['name'])
+ tos_dl_B.append(endp_info["tx rate"])
+ tos_ul_B.append(endp_info["rx rate"])
+ resource_alias_B.append(alias)
+ port_signal_B.append(signal)
+
+ # Construct the client dictionary for the single TOS
+ client_dict_A = {
+ tos: {
+ "clients_A": clients_A,
+ "ul_A": tos_ul_A,
+ "dl_A": tos_dl_A,
+ "resource_alias_A": resource_alias_A,
+ "port_signal_A": port_signal_A,
+ "clients_B": clients_B,
+ "ul_B": tos_ul_B,
+ "dl_B": tos_dl_B,
+ "resource_alias_B": resource_alias_B,
+ "port_signal_B": port_signal_B,
+ }
+ }
+
+ logger.info("printed the collected data")
+ return client_dict_A
+
def start(self, print_pass=False) -> int:
"""Run configured Layer-3 variable time test.
@@ -2165,7 +2320,7 @@ def start(self, print_pass=False) -> int:
total_ul_ll_bps = 0
reset_timer = 0
self.overall = []
-
+ individual_device_data = {}
# Monitor loop
while cur_time < end_time:
# interval_time = cur_time + datetime.timedelta(seconds=5)
@@ -2191,6 +2346,20 @@ def start(self, print_pass=False) -> int:
total_dl_bps=total_dl_bps, total_ul_bps=total_ul_bps, total_dl_ll_bps=total_dl_ll_bps)
# Added logic creating a csv file for webGUI to get runtime data
if self.dowebgui:
+ new_data_check = self.l3_endp_port_data(self.tos[0])
+ l3_port_data = new_data_check[self.tos[0]]
+ # print('new_data_check',new_data_check)
+ for name in l3_port_data['resource_alias_A']:
+ r_id = name.split('_')[0]
+ if r_id not in individual_device_data:
+ # individual_device_data[r_id]
+ columns = ['download_rate_A', 'upload_rate_A', 'RSSI']
+ individual_device_data[r_id] = pd.DataFrame(columns=columns)
+ for i in range(len(l3_port_data['resource_alias_A'])):
+ row_data = [l3_port_data['dl_A'][i],l3_port_data['ul_A'][i],l3_port_data['port_signal_A'][i]]
+ r_id = l3_port_data['resource_alias_A'][i].split('_')[0]
+ individual_device_data[r_id].loc[len(individual_device_data[r_id])] = row_data
+ individual_device_data[r_id].to_csv(f'{self.result_dir}/individual_device_data_{r_id}.csv',index=False)
time_difference = abs(end_time - datetime.datetime.now())
total_hours = time_difference.total_seconds() / 3600
remaining_minutes = (total_hours % 1) * 60
@@ -2894,12 +3063,12 @@ def evaluate_qos(self):
self.side_a_min_bps = self.cx_profile.side_a_min_bps
for endp_data in self.endp_data['endpoint']:
- logger.info("endp_data type {endp_type} endp_data {endp_data}".format(
- endp_type=type(endp_data), endp_data=endp_data))
+ # logger.info("endp_data type {endp_type} endp_data {endp_data}".format(
+ # endp_type=type(endp_data), endp_data=endp_data))
# The dictionary only has one key
endp_data_key = list(endp_data.keys())[0]
- logger.info("endpoint_data key: {key} name: {name} a/b {ab} rx rate {rx_rate}".format(
- key=endp_data_key, name=endp_data[endp_data_key]['name'], ab=endp_data[endp_data_key]['a/b'], rx_rate=endp_data[endp_data_key]['rx rate']))
+ # logger.info("endpoint_data key: {key} name: {name} a/b {ab} rx rate {rx_rate}".format(
+ # key=endp_data_key, name=endp_data[endp_data_key]['name'], ab=endp_data[endp_data_key]['a/b'], rx_rate=endp_data[endp_data_key]['rx rate']))
# Gather data for upload , download for the four data types BK, BE, VI, VO, place the
# the data_set will be the upload and download rates for each client
@@ -5678,6 +5847,148 @@ def set_dut_info(self,
self.dut_sw_version = dut_sw_version
self.dut_serial_num = dut_serial_num
+ def update_a(self):
+ tos_list = ['BK', 'BE', 'VI', 'VO']
+ for tos in tos_list:
+ if tos in self.client_dict_A and self.client_dict_A[tos]["ul_A"] and self.client_dict_A[tos]["dl_A"]:
+ min_bps_a = self.client_dict_A["min_bps_a"]
+ min_bps_b = self.client_dict_A["min_bps_b"]
+
+ clients_list = []
+ client_names = []
+ client_ul_A_data = []
+ client_dl_A_data = []
+ hw_versions = []
+ endp_names = []
+ port_names = []
+ modes = []
+ mac_list = []
+ ssid_list = []
+ channel_list = []
+ traffic_types = []
+ traffic_protocols = []
+ per_client_download_rate = []
+ download_rx_drop_percentages = []
+ resource_hosts = []
+ resource_eids = []
+ resource_kernels = []
+ offered_dl_rates = []
+ offered_ul_rates = []
+ total_clients = 0
+ # Process A side
+ for client_index in range(len(self.client_dict_A[tos]["clients_A"])):
+ if self.client_dict_A[tos]["clients_A"][client_index].startswith('MLT'):
+ total_clients += 1
+ clients_list.append(self.client_dict_A[tos]["clients_A"][client_index])
+ client_names.append(self.client_dict_A[tos]['resource_alias_A'][client_index])
+ client_ul_A_data.append(self.client_dict_A[tos]["ul_A"][client_index])
+ client_dl_A_data.append(self.client_dict_A[tos]["dl_A"][client_index])
+ hw_versions.append(self.client_dict_A[tos]['resource_hw_ver_A'][client_index])
+ endp_names.append(self.client_dict_A[tos]["clients_A"][client_index])
+ port_names.append(self.client_dict_A[tos]['port_A'][client_index])
+ modes.append(self.client_dict_A[tos]['mode_A'][client_index])
+ mac_list.append(self.client_dict_A[tos]['mac_A'][client_index])
+ ssid_list.append(self.client_dict_A[tos]['ssid_A'][client_index])
+ channel_list.append(self.client_dict_A[tos]['channel_A'][client_index])
+ traffic_types.append(self.client_dict_A[tos]['traffic_type_A'][client_index])
+ traffic_protocols.append(self.client_dict_A[tos]['traffic_protocol_A'][client_index])
+ per_client_download_rate.append(self.client_dict_A[tos]['dl_A'][client_index])
+ download_rx_drop_percentages.append(self.client_dict_A[tos]['download_rx_drop_percent_A'][client_index])
+ resource_hosts.append(self.client_dict_A[tos]['resource_host_A'][client_index])
+ resource_eids.append(self.client_dict_A[tos]['resource_eid_A'][client_index])
+ resource_kernels.append(self.client_dict_A[tos]['resource_kernel_A'][client_index])
+ offered_dl_rates.append(self.client_dict_A[tos]['offered_download_rate_A'][client_index])
+ offered_ul_rates.append(self.client_dict_A[tos]['offered_upload_rate_A'][client_index])
+
+ # Process B side
+ clients_list_B = []
+ client_names_B = []
+ client_ul_B_data = []
+ client_dl_B_data = []
+ hw_versions_B = []
+ endp_names_B = []
+ port_names_B = []
+ modes_B = []
+ mac_list_B = []
+ ssid_list_B = []
+ channel_list_B = []
+ traffic_types_B = []
+ traffic_protocols_B = []
+ per_client_download_rate_B = []
+ download_rx_drop_percentages_B = []
+ resource_hosts_B = []
+ resource_eids_B = []
+ resource_kernels_B = []
+ offered_dl_rates_B = []
+ offered_ul_rates_B = []
+
+ for client_index in range(len(self.client_dict_A[tos]["clients_B"])):
+ if self.client_dict_A[tos]["clients_B"][client_index].startswith('MLT'):
+ total_clients += 1
+ clients_list_B.append(self.client_dict_A[tos]["clients_B"][client_index])
+ client_names_B.append(self.client_dict_A[tos]['resource_alias_B'][client_index])
+ client_ul_B_data.append(self.client_dict_A[tos]["ul_B"][client_index])
+ client_dl_B_data.append(self.client_dict_A[tos]["dl_B"][client_index])
+ hw_versions_B.append(self.client_dict_A[tos]['resource_hw_ver_B'][client_index])
+ endp_names_B.append(self.client_dict_A[tos]["clients_B"][client_index])
+ port_names_B.append(self.client_dict_A[tos]['port_B'][client_index])
+ modes_B.append(self.client_dict_A[tos]['mode_B'][client_index])
+ mac_list_B.append(self.client_dict_A[tos]['mac_B'][client_index])
+ ssid_list_B.append(self.client_dict_A[tos]['ssid_B'][client_index])
+ channel_list_B.append(self.client_dict_A[tos]['channel_B'][client_index])
+ traffic_types_B.append(self.client_dict_A[tos]['traffic_type_B'][client_index])
+ traffic_protocols_B.append(self.client_dict_A[tos]['traffic_protocol_B'][client_index])
+ per_client_download_rate_B.append(self.client_dict_A[tos]['dl_B'][client_index])
+ download_rx_drop_percentages_B.append(self.client_dict_A[tos]['download_rx_drop_percent_B'][client_index])
+ resource_hosts_B.append(self.client_dict_A[tos]['resource_host_B'][client_index])
+ resource_eids_B.append(self.client_dict_A[tos]['resource_eid_B'][client_index])
+ resource_kernels_B.append(self.client_dict_A[tos]['resource_kernel_B'][client_index])
+ offered_dl_rates_B.append(self.client_dict_A[tos]['offered_download_rate_B'][client_index])
+ offered_ul_rates_B.append(self.client_dict_A[tos]['offered_upload_rate_B'][client_index])
+
+ # Update the dict with filtered A-side data
+ self.client_dict_A[tos]["clients_A"] = clients_list
+ self.client_dict_A[tos]["resource_alias_A"] = client_names
+ self.client_dict_A[tos]["ul_A"] = client_ul_A_data
+ self.client_dict_A[tos]["dl_A"] = client_dl_A_data
+ self.client_dict_A[tos]["resource_hw_ver_A"] = hw_versions
+ self.client_dict_A[tos]["port_A"] = port_names
+ self.client_dict_A[tos]["mode_A"] = modes
+ self.client_dict_A[tos]["mac_A"] = mac_list
+ self.client_dict_A[tos]["ssid_A"] = ssid_list
+ self.client_dict_A[tos]["channel_A"] = channel_list
+ self.client_dict_A[tos]["traffic_type_A"] = traffic_types
+ self.client_dict_A[tos]["traffic_protocol_A"] = traffic_protocols
+ self.client_dict_A[tos]["download_rx_drop_percent_A"] = download_rx_drop_percentages
+ self.client_dict_A[tos]["resource_host_A"] = resource_hosts
+ self.client_dict_A[tos]["resource_eid_A"] = resource_eids
+ self.client_dict_A[tos]["resource_kernel_A"] = resource_kernels
+ self.client_dict_A[tos]["offered_download_rate_A"] = offered_dl_rates
+ self.client_dict_A[tos]["offered_upload_rate_A"] = offered_ul_rates
+
+ # Update the dict with filtered B-side data
+ self.client_dict_A[tos]["clients_B"] = clients_list_B
+ self.client_dict_A[tos]["resource_alias_B"] = client_names_B
+ self.client_dict_A[tos]["ul_B"] = client_ul_B_data
+ self.client_dict_A[tos]["dl_B"] = client_dl_B_data
+ self.client_dict_A[tos]["resource_hw_ver_B"] = hw_versions_B
+ self.client_dict_A[tos]["port_B"] = port_names_B
+ self.client_dict_A[tos]["mode_B"] = modes_B
+ self.client_dict_A[tos]["mac_B"] = mac_list_B
+ self.client_dict_A[tos]["ssid_B"] = ssid_list_B
+ self.client_dict_A[tos]["channel_B"] = channel_list_B
+ self.client_dict_A[tos]["traffic_type_B"] = traffic_types_B
+ self.client_dict_A[tos]["traffic_protocol_B"] = traffic_protocols_B
+ self.client_dict_A[tos]["download_rx_drop_percent_B"] = download_rx_drop_percentages_B
+ self.client_dict_A[tos]["resource_host_B"] = resource_hosts_B
+ self.client_dict_A[tos]["resource_eid_B"] = resource_eids_B
+ self.client_dict_A[tos]["resource_kernel_B"] = resource_kernels_B
+ self.client_dict_A[tos]["offered_download_rate_B"] = offered_dl_rates_B
+ self.client_dict_A[tos]["offered_upload_rate_B"] = offered_ul_rates_B
+ self.client_dict_B = copy.deepcopy(self.client_dict_A)
+
+
+
def generate_report(self, config_devices=None, group_device_map=None):
self.report.set_obj_html("Objective", "The Layer 3 Traffic Generation Test is designed to test the performance of the "
"Access Point by running layer 3 Cross-Connect Traffic. Layer-3 Cross-Connects represent a stream "
@@ -5685,7 +5996,8 @@ def generate_report(self, config_devices=None, group_device_map=None):
"each of which is associated with a particular Port (physical or virtual interface).")
self.report.build_objective()
-
+ self.update_a()
+ # self.update_b()
test_setup_info = {
"DUT Name": self.dut_model_num,
"DUT Hardware Version": self.dut_hw_version,
@@ -5792,9 +6104,62 @@ def generate_report(self, config_devices=None, group_device_map=None):
# graph BK A
# try to do as a loop
+ logger.info(f"BEFORE REAL A {self.client_dict_A}")
tos_list = ['BK', 'BE', 'VI', 'VO']
-
+ if self.real:
+ tos_types = ['BE', 'BK', 'VI', 'VO']
+ print("BOOLLLLL",self.client_dict_B is self.client_dict_A)
+ for tos_key in tos_types:
+ if tos_key in self.client_dict_A:
+ tos_data = self.client_dict_A[tos_key]
+
+ # Filter A side
+ traffic_proto_A = tos_data.get("traffic_protocol_A", [])
+ indices_to_keep_A = [i for i, proto in enumerate(traffic_proto_A) if proto == "Mcast"]
+
+ # Filter B side
+ traffic_proto_B = tos_data.get("traffic_protocol_B", [])
+ indices_to_keep_B = [i for i, proto in enumerate(traffic_proto_B) if proto == "Mcast"]
+
+ for key in list(tos_data.keys()):
+ if key in ["colors", "labels"]:
+ continue # Keep as-is
+
+ if key.endswith('_A'):
+ filtered_list = [tos_data[key][i] for i in indices_to_keep_A if i < len(tos_data[key])]
+ tos_data[key] = filtered_list
+
+ elif key.endswith('_B'):
+ filtered_list = [tos_data[key][i] for i in indices_to_keep_B if i < len(tos_data[key])]
+ tos_data[key] = filtered_list
+ for tos_key in tos_types:
+ if tos_key in self.client_dict_B:
+ tos_data = self.client_dict_B[tos_key]
+
+ # Filter A side
+ traffic_proto_A = tos_data.get("traffic_protocol_A", [])
+ indices_to_keep_A = [i for i, proto in enumerate(traffic_proto_A) if proto == "Mcast"]
+
+ # Filter B side
+ traffic_proto_B = tos_data.get("traffic_protocol_B", [])
+ indices_to_keep_B = [i for i, proto in enumerate(traffic_proto_B) if proto == "Mcast"]
+
+ for key in list(tos_data.keys()):
+ if key in ["colors", "labels"]:
+ continue # Keep as-is
+
+ if key.endswith('_A'):
+ filtered_list = [tos_data[key][i] for i in indices_to_keep_A if i < len(tos_data[key])]
+ tos_data[key] = filtered_list
+
+ elif key.endswith('_B'):
+ filtered_list = [tos_data[key][i] for i in indices_to_keep_B if i < len(tos_data[key])]
+ tos_data[key] = filtered_list
+ logger.info(f"AFTER REAL A {self.client_dict_A}")
for tos in tos_list:
+ print(self.tos)
+ if tos not in self.tos:
+ continue
if (self.client_dict_A[tos]["ul_A"] and self.client_dict_A[tos]["dl_A"]):
min_bps_a = self.client_dict_A["min_bps_a"]
min_bps_b = self.client_dict_A["min_bps_b"]
@@ -5849,6 +6214,40 @@ def generate_report(self, config_devices=None, group_device_map=None):
self.report.build_graph()
self.report.set_csv_filename(graph_png)
self.report.move_csv_file()
+ if(self.dowebgui and self.get_live_view):
+ for floor in range(0,int(self.total_floors)):
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ throughput_image_path = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_throughput_{floor+1}.png")
+ rssi_image_path = os.path.join(script_dir, "heatmap_images", f"{self.test_name}_rssi_{floor+1}.png")
+ timeout = 60 # seconds
+ start_time = time.time()
+
+ while not (os.path.exists(throughput_image_path) and os.path.exists(rssi_image_path)):
+ if time.time() - start_time > timeout:
+ print("Timeout: Images not found within 60 seconds.")
+ break
+ time.sleep(1)
+ while not os.path.exists(throughput_image_path) and not os.path.exists(rssi_image_path):
+ if os.path.exists(throughput_image_path) and os.path.exists(rssi_image_path):
+ break
+ # time.sleep(10)
+ if os.path.exists(throughput_image_path):
+ self.report.set_custom_html('')
+ self.report.build_custom()
+ # self.report.set_custom_html("Average Throughput Heatmap:
")
+ # self.report.build_custom()
+ self.report.set_custom_html(f'
')
+ self.report.build_custom()
+ # os.remove(throughput_image_path)
+
+ if os.path.exists(rssi_image_path):
+ self.report.set_custom_html('')
+ self.report.build_custom()
+ # self.report.set_custom_html("Average RSSI Heatmap:
")
+ # self.report.build_custom()
+ self.report.set_custom_html(f'
')
+ self.report.build_custom()
+ # os.remove(rssi_image_path)
# For real devices appending the required data for pass fail criteria
if self.real:
@@ -5858,7 +6257,7 @@ def generate_report(self, config_devices=None, group_device_map=None):
for i in self.client_dict_A[tos]['dl_A']:
down.append(int(i) / 1000000)
for i in self.client_dict_A[tos]['offered_upload_rate_A']:
- off_up.append(int(i) / 1000000)
+ off_up.append(int(i) / 1_000_000)
for i in self.client_dict_A[tos]['offered_download_rate_A']:
off_down.append(int(i) / 1000000)
# if either 'expected_passfail_value' or 'device_csv_name' is provided for pass/fail evaluation
@@ -6070,6 +6469,13 @@ def generate_report(self, config_devices=None, group_device_map=None):
self.report.build_table_title()
self.report.set_table_dataframe(last_row)
self.report.build_table()
+ # if(self.get_live_view):
+ # folder_path = os.path.join(script_dir, "heatmap_images")
+
+ # for f in os.listdir(folder_path):
+ # file_path = os.path.join(folder_path, f)
+ # if os.path.isfile(file_path):
+ # os.remove(file_path)
def write_report(self):
"""Write out HTML and PDF report as configured."""
@@ -6214,7 +6620,7 @@ def webgui_finalize(self):
df1 = pd.DataFrame(self.overall)
df1.to_csv('{}/overall_multicast_throughput.csv'.format(self.result_dir), index=False)
- self.copy_reports_to_home_dir()
+ # self.copy_reports_to_home_dir()
def get_pass_fail_list(self, tos, up, down):
res_list = []
@@ -6399,8 +6805,8 @@ def query_real_clients(args):
'pac_file': args.pac_file,
'server_ip': upstream_port_ip,
}
- if not args.expected_passfail_value and args.device_csv_name is None:
- config_obj.device_csv_file(csv_name="device.csv")
+ # if not args.expected_passfail_value and args.device_csv_name is None:
+ # config_obj.device_csv_file(csv_name="device.csv")
# Configuration of devices with groups and profiles
if args.group_name and args.file_name and args.profile_name:
selected_groups = args.group_name.split(',')
@@ -7576,6 +7982,8 @@ def parse_args():
test_l3_parser.add_argument("--config", action="store_true", help="Specify for configuring the devices")
test_l3_parser.add_argument("--wait_time", type=int, help='Specify the maximum time to wait for Configuration', default=60)
test_l3_parser.add_argument("--real", action="store_true", help='For testing on real devies')
+ test_l3_parser.add_argument('--get_live_view', help="If true will heatmap will be generated from testhouse automation WebGui ", action='store_true')
+ test_l3_parser.add_argument('--total_floors', help="Total floors from testhouse automation WebGui ", default="0")
parser.add_argument('--help_summary',
default=None,
action="store_true",
@@ -8184,6 +8592,8 @@ def main():
test_name=test_name,
dowebgui=args.dowebgui,
ip=ip,
+ get_live_view= args.get_live_view,
+ total_floors = args.total_floors,
# for uniformity from webGUI result_dir as variable is used insead of local_lf_report_dir
result_dir=args.local_lf_report_dir,
@@ -8271,7 +8681,8 @@ def main():
dut_sw_version=args.dut_sw_version,
dut_serial_num=args.dut_serial_num)
ip_var_test.set_report_obj(report=report)
-
+ if args.dowebgui:
+ ip_var_test.webgui_finalize()
# Generate and write out test report
logger.info("Generating test report")
if args.real:
@@ -8304,7 +8715,7 @@ def main():
# Run WebGUI-specific post test logic
if args.dowebgui:
- ip_var_test.webgui_finalize()
+ ip_var_test.copy_reports_to_home_dir()
if test_passed:
ip_var_test.exit_success()
diff --git a/py-scripts/tools/ct_tests_json/ct_us_002/ct_port_reset/ct_port_reset_2g_5g_6g_002.json b/py-scripts/tools/ct_tests_json/ct_us_002/ct_port_reset/ct_port_reset_2g_5g_6g_002.json
index ac8f84cc1..94bf31e50 100644
--- a/py-scripts/tools/ct_tests_json/ct_us_002/ct_port_reset/ct_port_reset_2g_5g_6g_002.json
+++ b/py-scripts/tools/ct_tests_json/ct_us_002/ct_port_reset/ct_port_reset_2g_5g_6g_002.json
@@ -8,7 +8,7 @@
]
},
"test_suites":{
- "cv_tests":{
+ "cv_port_reset_test":{
"LOAD_PORT_REST_DB":{
"enabled":"TRUE",
"timeout":"600",
@@ -33,14 +33,12 @@
" --raw_lines_file ./tools/ct_tests_json/ct_us_002/ct_port_reset/port_reset_002.txt",
" --pull_report",
" --local_lf_report_dir REPORT_PATH",
- " --test_tag 'PORT_RESET'",
- " --test_rig TEST_RIG ",
" --set DUT_SET_NAME",
" --verbosity 11"
]
},
"lf_qa":{
- "enabled":"FALSE",
+ "enabled":"TRUE",
"timeout":"600",
"load_db":"skip",
"command":"./tools/lf_qa.py",
@@ -50,7 +48,7 @@
]
},
"lf_inspect":{
- "enabled":"FALSE",
+ "enabled":"TRUE",
"timeout":"600",
"load_db":"skip",
"command":"./tools/lf_inspect.py",
diff --git a/py-scripts/tools/ct_tests_json/ct_us_002/ct_port_reset/port_reset_002.txt b/py-scripts/tools/ct_tests_json/ct_us_002/ct_port_reset/port_reset_002.txt
index f1a6c863f..70feaa3bc 100644
--- a/py-scripts/tools/ct_tests_json/ct_us_002/ct_port_reset/port_reset_002.txt
+++ b/py-scripts/tools/ct_tests_json/ct_us_002/ct_port_reset/port_reset_002.txt
@@ -44,9 +44,9 @@ rpt_path_make_subdir: 1
bg: 0xE0ECF8
dut_info_override:
dut_info_cmd:
-test_rig:
-test_tag:
-rpt_name:
+test_rig:CT_US_002
+test_tag:PORT_RESET
+rpt_name:PORT_RESET
rpt_dir_prefix_textfield:
show_scan: 1
auto_helper: 1
diff --git a/py-scripts/tools/ct_tests_json/ct_us_002/ct_scale/ct_scale_002.json b/py-scripts/tools/ct_tests_json/ct_us_002/ct_scale/ct_scale_002.json
index cc8b925a6..f0cf28c1a 100644
--- a/py-scripts/tools/ct_tests_json/ct_us_002/ct_scale/ct_scale_002.json
+++ b/py-scripts/tools/ct_tests_json/ct_us_002/ct_scale/ct_scale_002.json
@@ -8,7 +8,7 @@
]
},
"test_suites":{
- "ct_scale":{
+ "cv_scale_test":{
"SCALE_2G_5G":{
"enabled":"TRUE",
"timeout":"600",
diff --git a/py-scripts/tools/ct_tests_json/ct_us_005/ct_ap_auto/ct_ap_auto_capacity_005.json b/py-scripts/tools/ct_tests_json/ct_us_005/ct_ap_auto/ct_ap_auto_capacity_005.json
index f1b407eeb..4e2a30cd1 100644
--- a/py-scripts/tools/ct_tests_json/ct_us_005/ct_ap_auto/ct_ap_auto_capacity_005.json
+++ b/py-scripts/tools/ct_tests_json/ct_us_005/ct_ap_auto/ct_ap_auto_capacity_005.json
@@ -92,6 +92,7 @@
" --raw_line 'reset_duration_min: 5000'",
" --raw_line 'reset_duration_max: 10000'",
" --raw_line 'minimum_port_up_duration_ms: 5000'",
+ " --raw_line 'cap_pf_percent_combo: 800000'",
" --pull_report",
" --local_lf_report_dir REPORT_PATH",
" --test_tag 'AP_AUTO_CAPACITY'",
diff --git a/py-scripts/tools/ct_us_002_cv.bash b/py-scripts/tools/ct_us_002_cv.bash
index e3306ed4e..853a8a846 100755
--- a/py-scripts/tools/ct_us_002_cv.bash
+++ b/py-scripts/tools/ct_us_002_cv.bash
@@ -10,8 +10,8 @@ echo "Running Port Reset"
--json_rig ./ct_rig_json/ct_us_002_rig.json \
--json_dut ./ct_dut_json/ct_002_AX16K_dut.json \
--json_test \
-./ct_tests_json/ct_us_002/ct_port_reset/ct_port_reset_2g_5g_6g_002.json:cv_tests,\
-./ct_tests_json/ct_us_002/ct_scale/ct_scale_002.json:ct_scale \
+./ct_tests_json/ct_us_002/ct_port_reset/ct_port_reset_2g_5g_6g_002.json:cv_port_reset_test,\
+./ct_tests_json/ct_us_002/ct_scale/ct_scale_002.json:cv_scale_test \
--path /home/lanforge/html-reports/ct_us_002 \
--log_level debug \
--new_test_run
diff --git a/py-scripts/tools/ct_us_002_func_wc_dp_run.bash b/py-scripts/tools/ct_us_002_func_wc_dp_run.bash
index 297277fc5..e3b097c0a 100755
--- a/py-scripts/tools/ct_us_002_func_wc_dp_run.bash
+++ b/py-scripts/tools/ct_us_002_func_wc_dp_run.bash
@@ -9,4 +9,7 @@ echo "Running Wifi Capacity Tests"
./ct_us_002_wc.bash
echo "Running Dataplane Tests"
-./ct_us_002_dp.bash
\ No newline at end of file
+./ct_us_002_dp.bash
+
+echo "Running ChamberView: Port Reset and Scale Tests"
+./ct_us_002_cv.bash
diff --git a/py-scripts/tools/lf_test_gen/lf_create_radio_frame.py b/py-scripts/tools/lf_test_gen/lf_create_radio_frame.py
index f4ca3f322..92cd574ef 100755
--- a/py-scripts/tools/lf_test_gen/lf_create_radio_frame.py
+++ b/py-scripts/tools/lf_test_gen/lf_create_radio_frame.py
@@ -310,6 +310,12 @@ def get_lanforge_radio_information(self):
self.use_radio_6g_var_dict[radio].set("Do Not Use")
elif '7921e' in radio_name_tmp:
self.use_radio_6g_var_dict[radio].set("Do Not Use")
+ elif 'mt7996' in radio_name_tmp:
+ if '802.11bgn' in radio_type or '802.11an' in radio_type:
+ self.use_radio_6g_var_dict[radio].set("Do Not Use")
+ else:
+ self.use_radio_6g_var_dict[radio].set("Use")
+ self.suite_test_name_6g_dict[radio] = self.radio_model_dict[radio]
else:
self.use_radio_6g_var_dict[radio].set("Use")
self.suite_test_name_6g_dict[radio] = self.radio_model_dict[radio]