-
-
- '''
-
- def _generate_head(self):
-
- return f'''
-
-
-
- Risk Assessment
-
-
- '''
-
- def _generate_header(self):
+ """Returns the current risk profile in HTML format"""
+
+ high_risk_message = '''The device has been assessed to be high
+ risk due to the nature of the answers provided
+ about the device functionality.'''
+ limited_risk_message = '''The device has been assessed to be limited risk
+ due to the nature of the answers provided about
+ the device functionality.'''
with open(test_run_img_file, 'rb') as f:
- tr_img_b64 = base64.b64encode(f.read()).decode('utf-8')
- header = f'''
-
{'high' if self.risk == 'High' else 'limited'} Risk
-
-
- {
- 'The device has been assessed to be high risk due to the nature of the answers provided about the device functionality.'
- if self.risk == 'High' else
- 'The device has been assessed to be limited risk due to the nature of the answers provided about the device functionality.'
- }
-
- '''
- # Add the device information
- manufacturer = (json_data['device']['manufacturer']
- if 'manufacturer' in json_data['device'] else 'Undefined')
- model = (json_data['device']['model']
- if 'model' in json_data['device'] else 'Undefined')
- fw = (json_data['device']['firmware']
- if 'firmware' in json_data['device'] else 'Undefined')
- mac = (json_data['device']['mac_addr']
- if 'mac_addr' in json_data['device'] else 'Undefined')
-
- summary += '''
- '''
- if trailing_space:
- label += ''''''
- return label
-
- def generate_head(self):
- return f'''
-
-
-
- Testrun Report
-
-
- '''
-
- def generate_css(self):
- return '''
- /* Set some global variables */
- :root {
- --header-height: .75in;
- --header-width: 8.5in;
- --header-pos-x: 0in;
- --header-pos-y: 0in;
- --page-width: 8.5in;
- --summary-height: 2.8in;
- --vertical-line-height: calc(var(--summary-height)-.2in);
- --vertical-line-pos-x: 25%;
- }
-
- @font-face {
- font-family: 'Google Sans';
- font-style: normal;
- src: url(https://fonts.gstatic.com/s/googlesans/v58/4Ua_rENHsxJlGDuGo1OIlJfC6l_24rlCK1Yo_Iqcsih3SAyH6cAwhX9RFD48TE63OOYKtrwEIJllpyk.woff2) format('woff2');
- unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+0304, U+0308, U+0329, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD;
- }
-
- @font-face {
- font-family: 'Roboto Mono';
- font-style: normal;
- src: url(https://fonts.googleapis.com/css2?family=Roboto+Mono:ital,wght@0,100..700;1,100..700&display=swap) format('woff2');
- unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+0304, U+0308, U+0329, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD;
- }
-
- /* Define some common body formatting*/
- body {
- font-family: 'Google Sans', sans-serif;
- margin: 0px;
- padding: 0px;
- }
-
- /* Use this for various section breaks*/
- .gradient-line {
- position: relative;
- background-image: linear-gradient(to right, red, blue, green, yellow, orange);
- height: 1px;
- /* Adjust the height as needed */
- width: 100%;
- /* To span the entire width */
- display: block;
- /* Ensures it's a block-level element */
- }
-
- /* Sets proper page size during print to pdf for weasyprint */
- @page {
- size: Letter;
- width: 8.5in;
- height: 11in;
- }
-
- .page {
- position: relative;
- margin: 0 20px;
- width: 8.5in;
- height: 11in;
- }
-
- /* Define the header related css elements*/
- .header {
- position: relative;
- }
-
- h1 {
- margin: 0 0 8px 0;
- font-size: 20px;
- font-weight: 400;
- }
-
- h2 {
- margin: 0px;
- font-size: 48px;
- font-weight: 700;
- }
-
- h3 {
- font-size: 24px;
- }
-
- h4 {
- font-size: 12px;
- font-weight: 500;
- color: #5F6368;
- margin-bottom: 0;
- margin-top: 0;
- }
-
- .module-summary {
- background-color: #F8F9FA;
- width: 100%;
- margin-bottom: 25px;
- margin-top: 25px;
- }
-
- .module-summary thead tr th {
- text-align: left;
- padding-top: 15px;
- padding-left: 15px;
- font-weight: 500;
- color: #5F6368;
- font-size: 14px;
- }
-
- .module-summary tbody tr td {
- padding-bottom: 15px;
- padding-left: 15px;
- font-size: 24px;
- }
-
- .module-data {
- border: 1px solid #DADCE0;
- border-radius: 3px;
- border-spacing: 0;
- }
-
- .module-data thead tr th {
- text-align: left;
- padding: 12px 25px;
- color: #3C4043;
- font-size: 14px;
- font-weight: 700;
- }
-
- .module-data tbody tr td {
- text-align: left;
- padding: 12px 25px;
- color: #3C4043;
- font-size: 14px;
- font-weight: 400;
- border-top: 1px solid #DADCE0;
- font-family: 'Roboto Mono', monospace;
- }
-
- div.steps-to-resolve {
- background-color: #F8F9FA;
- margin-bottom: 30px;
- width: 756px;
- padding: 20px 30px;
- vertical-align: top;
- }
-
- .steps-to-resolve-row {
- vertical-align: top;
- }
-
- .steps-to-resolve-test-name {
- display: inline-block;
- margin-left: 70px;
- margin-bottom: 20px;
- width: 250px;
- vertical-align: top;
- }
-
- .steps-to-resolve-description {
- display: inline-block;
- }
-
- .steps-to-resolve.subtitle {
- text-align: left;
- padding-top: 15px;
- font-weight: 500;
- color: #5F6368;
- font-size: 14px;
- }
-
- .steps-to-resolve-index {
- font-size: 40px;
- position: absolute;
- }
-
- .callout-container.info {
- background-color: #e8f0fe;
- }
-
- .callout-container.info .icon {
- width: 22px;
- height: 22px;
- margin-right: 5px;
- background-size: contain;
- background-image: url('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEwAAABOCAYAAACKX/AgAAAABHNCSVQICAgIfAhkiAAACYVJREFUeF7tXGtsVEUUPi0t0NIHli5Uni1I5KVYiCgPtQV8BcSIBkVUjFI0GiNGhR9KiIEfIqIkRlSqRlBQAVEREx9AqwIqClV5imILCBT6gHZLW2gLnm+xZHM5d2fm7t1tN9kv2R+dO3fmzHfncV7TmNKTZ89RFNoMxGrXjFb0MRAlzHAiRAmLEmbIgGH16AyLEmbIgGH16AyLEmbIgGH1OMP6rlVvZH1518E62nO4jkrKz9CBstNU4W2kU6fP8q/J10+Hdm34F0udkuOol6cdZXnaUr+uCTSwZwLFxca4JotJQzHh1PS9dU307Y4q2rjTS0XFp6j2zFkTWS/UTWwbS9m9O9CYgck09spUSm7fxlE7Tl4KC2F/H6un/PVlVLC7mhoa3bXE4uNiKHdACk0f66E+Xdo74cDonZASdryqgV7/5jit23aCQm2xtuElOn5IR3rsps7UOTXeiASTyiEhDEvv3cJyWrG5nM40uDujVINrFxdLk0d1oody0ik5wf2l6jphW/+uoZnLD1FV7fmNWzVA6Xnzfh7MrOzYIY7mT+lOw/okSV04LnOVsI+3VNDLX5QSTkAdJPEJOLJfCg3JSvAtI08y/1LjKC3p/OFdWdNIZVX88zYQlve24lrastdLNXyS6gAn6bMTMmjS8E461bXquEJYQ9M5mv/5Ufrk50plpyBjzKAUyuETbljvJIrjTdsEjXxobP2nhgp3eWnDzmoCqSrcdU0azbz9UopvY9aX1G7QhFXz0ntq2UHazmpCIECfmnpDOt1/fTq1j3fHwKhjteT978tpGf+gvwXCUFZDXrm/J6UkBrevBUUYZtaj+SUBycJXnchf+JExHrrk/6UWaGBOnmGWLdlQRp/8VBlwOwBpb0zLDGqmBUXYvDVHAi7DjI7xtGhqL7q8a+j1IxC990gdzXjvIB3j/c4OWJ7PTexq91hZ7nhtfLS5IiBZV2Um0vIn+oSNLIwUZhP6HMx922E177Mrf6ywe6wsd0TYz3/V0MJ1pbaNTxh6CeXnZV047WwrhuAB7M63uW/IYIcFa0tp6/4au8cBy40Jg1I6a8Uh270Cgr4wqZvx6RdQSsOHOHkhgx1pUHtmLf+XMBZTGBP2TkG5rVKKpTA7iP0Bpx4Mcv8fypwCstgtz5OnGn3WiCmMNn1sphMW7BPNHWzw2D+alU5TQVB/xOzdZCUogT0TW+YOcNKc7x24jKa8tl88CGBGrZ3Z18j2NJphi78+LpIF1QGnYTBkOWZE8SL2tEUP9hT9Z6cbz9Jidg6YQJuwv0rrad32E2Lbd16bFtbTUBQiQCFOT8goYd32k7Sf3U+60CYsnxVDyUSEBj99tEe3vxarN50VZ8hqRRMPagn76nRxcQvCmzhNCtn5JwHmTqg0eKk/p2XYLh5gs0wCHJveer0TU4uwr36vEj2lEAK2YaQAskr7LLzA6/+o0hqGVhCkYJc8u8ZekeqaIQ1pgzkNdUaLExeeklVsc1qxgb0fdwyT9zn/usoZBgO7iP1QEnIGJEvFrboMbiUJRf+cslXGjQjbeaiW6hsuVh7h/Luarf9IA3xwkN0KKMsI+6lw8ZuWNxA3lDCqf0qLmj+STDplMJtG9JNnGbwdKigJO1Amu0qyMxNUbbfa50OzZG9GcdkZpcxKwko4Ii0BplCkwi4Mh+i7CspTEraYBE+K+4SNnbeXai2u5kTeb9Y/308SwXEZgi0S7MbqX1dJWHOeg7WDdJtOrfVM/gZZVuPb5H3duohMSVDFBfCOcklK+Q+IG6YlBRdMkAQOVxmUVymXxW5y+MulJOycEGKMiQk+XBUuctzuR0mYncFaWaNne7ktsBvtIcokOxLUq0aDMLmRco5GRyoQTZcgTQ5rPSVhcMBJKKuOYMJsPrbdWP3HryQskzP/JByz+UpS3dZWhjwNCchyVEFJWC+PrLMUlcgGuarD1vB8e7FsAmWmt1WKpySsfzfZBNq0x0vwZEQakMyyea/srrIbq/8YlYRd0T1R9HnBQ7mNXSKRBmT+SOlSyJtFsrEKSsJg3SPsL6GAnW6RBqRJScjO6iBGlqx1lYThhdHspZSwnjOiJV+ZVLc1lEFW5JRJGD1IdvlY62oRdsvgVNH3BQXwgx/Mo8dWIcL1N3LJpAQ8ZGLfyO52HWgRhuTaHHYYSljK4XaE3Vs7TvDHXfqd/HGRtq6bQKxFGMhAHrxksGIDXbJRP67XUsS+xXFVyRuBMeXx2HShTVjfjPY0boicQrT6x0rad1Q/eqwrnFv1/jxST2ts8m/Hc7bRZQYXIrQJg/C4NID1bgX0sRnvHRD3B2vdcP+NPWvG0gOiztg2PoYe5zGZwCh7Bw0v+rKUlvLmKQHqBxLpTDOjm9uC89CqCuPzIJ7oBFBS8/KL6Tcbq+TBHA89eWsXo6aNJXko12ObiQzB5n56xEgA/8ogBgqk/88pWWh3Lufg2pGVytnUuC1iCmPCkLY9/94ehLs9Etb+eoLmrDpM+LotBfQ9Z+VhWst3nCTgwsNLU3pon4z+bRgThpev7ZtET4/LkGTxlYE0LAVJ57F9yaUH6HMa921HFrp55rYMGnaZsys1jghDp7gANTFALgKWwn2c+RfO0xOnIbINf7fZsyD3nZx2fvcI51dpjDd9/4mge7HhruFpvhwyXJgKBaCUQhfExYZAHpQhbC++mdeCFxsweN2rM8hnmMqb7H3XuXd1BrYhzB1o8JJS6v9xQNarD7Tw1ZlmgfBVX/zsKK3ZenEakXVGIcSFNKlczqLBVRbTC1PY0H9ht1Lhbi/B+NfZJ7EMZ7WWy1n+hHy4qYIWsp6GNEgd4K72qP7JlM36WxcOriKajgBxc8wTkSkEWxA/KD3ZQEUldbRpT7Xoz5L6w2n49PgMumek8z3L2m5Qe5i1Mfz9E98SwcUHLFWngMpyjgOimryL3UDPgvpzDZ/obsJ1wiAcyHq3oIxW8IVTty/FqwYPc2fyiHR6ODdCrjD7DwjLCHnwX3K6ejCzRUUSnkOPHs/Ogcdu7szLWw7c6LSjqhOSGWbtFDn+SO0u5P3HbQsAzoAc9mflcVo5PCqhRlgIax4E0teRkb2R3cRQbJ26t3GjN5uT4nIHphC8wbrOPzfIDCth/gJjpu34t9b3r2SQ5YjEvfP/SqbJp1Mh3wVGOP6dDCLSCCgjRopQ2KAeicbqiBtkoY0WI8ytAYS7Hce2ZLgFbS39RQkz/BJRwqKEGTJgWD06w6KEGTJgWD06w6KEGTJgWP0/nqir/+GPk3oAAAAASUVORK5CYII=');
- }
-
- .callout-container {
- display: flex;
- box-sizing: border-box;
- height: auto;
- min-height: 48px;
- padding: 6px 24px;
- border-radius: 8px;
- align-items: center;
- gap: 10px;
- color: #3c4043;
- font-size: 14px;
- }
-
- .device-information {
- padding-top: 0.2in;
- padding-left: 0.2in;
- background-color: #F8F9FA;
- width: 250px;
- height: 100.4%;
- }
-
- /* Define the summary related css elements*/
- .summary-content {
- position: relative;
- width: var(--page-width);
- height: var(--summary-height);
- margin-top: 19px;
- margin-bottom: 19px;
- background-color: #E8EAED;
- padding-bottom: 20px;
- }
-
- .summary-item-label {
- position: relative;
- }
-
- .summary-item-value {
- position: relative;
- font-size: 20px;
- font-weight: 400;
- color: #202124;
- }
-
- .summary-item-space {
- position: relative;
- padding-bottom: 15px;
- margin: 0;
- }
-
- .summary-device-modules {
- position: absolute;
- left: 3.2in;
- top: .3in;
- }
-
- .summary-device-module-label {
- font-size: 16px;
- font-weight: 500;
- color: #202124;
- width: fit-content;
- margin-bottom: 0.1in;
- }
-
- .summary-vertical-line {
- width: 1px;
- height: var(--vertical-line-height);
- background-color: #80868B;
- position: absolute;
- top: .3in;
- bottom: .1in;
- left: 3in;
- }
-
- /* CSS for the color box */
- .summary-color-box {
- position: absolute;
- right: 0in;
- top: 0in;
- width: 2.6in;
- height: 100%;
- }
-
- .summary-box-compliant {
- background-color: rgb(24, 128, 56);
- }
-
- .summary-box-non-compliant {
- background-color: #b31412;
- }
-
- .summary-box-label {
- font-size: 14px;
- margin-top: 5px;
- color: #DADCE0;
- position: relative;
- top: 10px;
- left: 20px;
- font-weight: 500;
- }
-
- .summary-box-value {
- font-size: 18px;
- margin: 0 0 10px 0;
- color: #ffffff;
- position: relative;
- top: 10px;
- left: 20px;
- }
-
- .result-list-title {
- font-size: 24px;
- }
-
- .result-list {
- position: relative;
- margin-top: .2in;
- font-size: 18px;
- }
-
- .result-line {
- border: 1px solid #D3D3D3;
- /* Light Gray border*/
- height: .4in;
- width: 8.5in;
- }
-
- .result-line-result {
- border-top: 0px;
- }
-
- .result-list-header-label {
- font-weight: 500;
- position: absolute;
- font-size: 12px;
- font-weight: bold;
- height: 40px;
- display: flex;
- align-items: center;
- }
-
- .result-test-label {
- position: absolute;
- font-size: 12px;
- margin-top: 12px;
- max-width: 300px;
- font-weight: normal;
- align-items: center;
- text-overflow: ellipsis;
- white-space: nowrap;
- overflow: hidden;
- }
-
- .result-test-description {
- max-width: 380px;
- }
-
- .result-test-result-error {
- background-color: #FCE8E6;
- color: #C5221F;
- left: 7.3in;
- }
-
- .result-test-result-feature-not-detected {
- background-color: #e3e3e3;
- left: 6.85in;
- }
-
- .result-test-result-informational {
- background-color: #d9f0ff;
- color: #0b5c8d;
- left: 7.08in;
- }
-
- .result-test-result-non-compliant {
- background-color: #FCE8E6;
- color: #C5221F;
- left: 7.01in;
- }
-
- .result-test-result {
- position: absolute;
- font-size: 12px;
- width: fit-content;
- height: 12px;
- margin-top: 8px;
- padding: 4px 4px 7px 5px;
- border-radius: 2px;
- }
-
- .result-test-result-compliant {
- background-color: #E6F4EA;
- color: #137333;
- left: 7.16in;
- }
-
- .result-test-result-skipped {
- background-color: #e3e3e3;
- color: #393939;
- left: 7.24in;
- }
-
- /* CSS for the footer */
- .footer {
- position: absolute;
- height: 30px;
- width: 8.5in;
- bottom: 0in;
- border-top: 1px solid #D3D3D3;
- }
-
- .footer-label {
- color: #3C4043;
- position: absolute;
- top: 5px;
- font-size: 12px;
- }
-
- /*CSS for the markdown tables */
- .markdown-table {
- border-collapse: collapse;
- margin-left: 20px;
- background-color: #F8F9FA;
- }
-
- .markdown-table th, .markdown-table td {
- border: none;
- text-align: left;
- padding: 8px;
- }
-
- .markdown-header-h1 {
- margin-top:20px;
- margin-bottom:20px;
- margin-right:0px;
- font-size: 2em;
- }
-
- .markdown-header-h2 {
- margin-top:20px;
- margin-bottom:20px;
- margin-right:0px;
- font-size: 1.5em;
- }
-
- .module-page-content {
- /*Page height minus header(93px), footer(30px),
- and a 20px bottom padding.*/
- height: calc(11in - 93px - 30px - 20px);
-
- /* In case we mess something up in our calculations
- we'll cut off the content of the page so
- the header, footer and line break work
- as expected
- */
- overflow: hidden;
- }
-
- .module-page-content h1 {
- font-size: 32px;
- }
-
- @media print {
- @page {
- size: Letter;
- width: 8.5in;
- height: 11in;
- }
- }'''
+ reports.append(page_content)
+ return reports
diff --git a/framework/python/src/common/util.py b/framework/python/src/common/util.py
index 096aaf4df..ba1b23e81 100644
--- a/framework/python/src/common/util.py
+++ b/framework/python/src/common/util.py
@@ -12,18 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Provides basic utilities for the network orchestrator."""
+"""Provides basic utilities for Testrun."""
import getpass
import os
import subprocess
import shlex
-from common import logger
+import typing as t
import netifaces
+from common import logger
LOGGER = logger.get_logger('util')
-def run_command(cmd, output=True):
+def run_command(cmd, output=True, timeout=None):
"""Runs a process at the os level
By default, returns the standard output and error output
If the caller sets optional output parameter to False,
@@ -35,7 +36,7 @@ def run_command(cmd, output=True):
with subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as process:
- stdout, stderr = process.communicate()
+ stdout, stderr = process.communicate(timeout)
if process.returncode != 0 and output:
err_msg = f'{stderr.strip()}. Code: {process.returncode}'
@@ -51,12 +52,15 @@ def run_command(cmd, output=True):
def interface_exists(interface):
+ """Checks whether an interface is available"""
return interface in netifaces.interfaces()
def prettify(mac_string):
+ """Formats a MAC address with colons"""
return ':'.join([f'{ord(b):02x}' for b in mac_string])
def get_host_user():
+ """Returns the username of the host user"""
user = get_os_user()
# If primary method failed, try secondary
@@ -66,6 +70,7 @@ def get_host_user():
return user
def get_os_user():
+ """Attempts to get the username using os library"""
user = None
try:
user = os.getlogin()
@@ -78,6 +83,7 @@ def get_os_user():
return user
def get_user():
+ """Attempts to get the host user using the getpass library"""
user = None
try:
user = getpass.getuser()
@@ -96,9 +102,11 @@ def get_user():
return user
def set_file_owner(path, owner):
+ """Change the owner of a file"""
run_command(f'chown -R {owner} {path}')
def get_module_display_name(search):
+ """Returns the display name of a test module"""
modules = {
'ntp': 'NTP',
'dns': 'DNS',
@@ -113,3 +121,32 @@ def get_module_display_name(search):
return module[1]
return 'Unknown'
+
+
+def diff_dicts(d1: t.Dict[t.Any, t.Any], d2: t.Dict[t.Any, t.Any]) -> t.Dict:
+ """Compares two dictionaries by keys
+
+ Args:
+ d1 (t.Dict[t.Any, t.Any]): first dict to compare
+ d2 (t.Dict[t.Any, t.Any]): second dict to compare
+
+ Returns:
+ t.Dict[t.Any, t.Any]: Returns an empty dictionary
+ if the compared dictionaries are equal,
+ otherwise returns a dictionary that contains
+ the removed items(if available)
+ and the added items(if available).
+ """
+ diff = {}
+ if d1 != d2:
+ s1 = set(d1)
+ s2 = set(d2)
+ keys_removed = s1 - s2
+ keys_added = s2 - s1
+ items_removed = {k:d1[k] for k in keys_removed}
+ items_added = {k:d2[k] for k in keys_added}
+ if items_removed:
+ diff['items_removed'] = items_removed
+ if items_added:
+ diff['items_added'] = items_added
+ return diff
diff --git a/framework/python/src/core/docker/docker_module.py b/framework/python/src/core/docker/docker_module.py
new file mode 100644
index 000000000..21dabdc16
--- /dev/null
+++ b/framework/python/src/core/docker/docker_module.py
@@ -0,0 +1,163 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Represents the base module."""
+import docker
+from docker.models.containers import Container
+import os
+from common import logger
+import json
+
+IMAGE_PREFIX = 'testrun/'
+CONTAINER_PREFIX = 'tr-ct'
+DEFAULT_NETWORK = 'bridge'
+
+
+class Module:
+ """Represents the base module."""
+
+ def __init__(self,
+ module_config_file,
+ session,
+ docker_network=DEFAULT_NETWORK,
+ extra_hosts=None):
+ self._session = session
+ self.extra_hosts = extra_hosts
+
+ # Read the config file into a json object
+ with open(module_config_file, encoding='UTF-8') as config_file:
+ module_json = json.load(config_file)
+
+ self.docker_network = docker_network
+ # General module information
+ self.name = module_json['config']['meta']['name']
+ self.display_name = module_json['config']['meta']['display_name']
+ self.description = module_json['config']['meta']['description']
+ self.enabled = module_json['config'].get('enabled', True)
+ self.depends_on = module_json['config']['docker'].get('depends_on', None)
+
+ # Absolute path
+ # Store the root directory of Testrun based on the expected locatoin
+ # Testrun/modules///conf -> 5 levels
+ self.root_path = os.path.abspath(
+ os.path.join(module_config_file, '../../../../..'))
+ self.dir = os.path.dirname(os.path.dirname(module_config_file))
+ self.dir_name = os.path.basename(self.dir)
+
+ # Docker settings
+ self.build_file = self.dir_name + '.Dockerfile'
+ self.image_name = f'{IMAGE_PREFIX}{self.dir_name}'
+ self.container_name = f'{CONTAINER_PREFIX}-{self.dir_name}'
+ if 'tests' in module_json['config']:
+ # Append Test module
+ self.image_name += '-test'
+ self.container_name += '-test'
+ self.enable_container = module_json['config']['docker'].get(
+ 'enable_container', True)
+ self.container: Container = None
+
+ self._add_logger(log_name=self.name, module_name=self.name)
+ self.setup_module(module_json)
+
+ def _add_logger(self, log_name, module_name, log_dir=None):
+ self.logger = logger.get_logger(
+ name=f'{log_name}_module', # pylint: disable=E1123
+ log_file=f'{module_name}_module',
+ log_dir=log_dir)
+
+ def build(self):
+ self.logger.debug('Building module ' + self.dir_name)
+ client = docker.from_env()
+ client.images.build(
+ dockerfile=os.path.join(self.dir, self.build_file),
+ path=self._path,
+ forcerm=True, # Cleans up intermediate containers during build
+ tag=self.image_name)
+
+ def get_container(self):
+ container = None
+ try:
+ client = docker.from_env()
+ container = client.containers.get(self.container_name)
+ except docker.errors.NotFound:
+ self.logger.debug('Container ' + self.container_name + ' not found')
+ except docker.errors.APIError as error:
+ self.logger.error('Failed to resolve container')
+ self.logger.error(error)
+ return container
+
+ def get_session(self):
+ return self._session
+
+ def get_status(self):
+ self.container = self.get_container()
+ if self.container is not None:
+ return self.container.status
+ return None
+
+ def get_network(self):
+ return self.docker_network
+
+ def get_mounts(self):
+ return []
+
+ def get_environment(self, device=None): # pylint: disable=W0613
+ return {}
+
+ def setup_module(self, module_json):
+ pass
+
+ def _setup_runtime(self, device=None):
+ pass
+
+ def start(self, device=None):
+ self._setup_runtime(device)
+
+ self.logger.debug('Starting module ' + self.display_name)
+ network = self.get_network()
+ self.logger.debug(f"""Network: {network}, image name: {self.image_name},
+ container name: {self.container_name}""")
+
+ try:
+ client = docker.from_env()
+ self.container = client.containers.run(
+ self.image_name,
+ auto_remove=True,
+ cap_add=['NET_ADMIN'],
+ name=self.container_name,
+ hostname=self.container_name,
+ network_mode=network,
+ privileged=True,
+ detach=True,
+ mounts=self.get_mounts(),
+ environment=self.get_environment(device),
+ extra_hosts=self.extra_hosts if self.extra_hosts is not None else {})
+ except docker.errors.ContainerError as error:
+ self.logger.error('Container run error')
+ self.logger.error(error)
+
+ def stop(self, kill=False):
+ self.logger.debug('Stopping module ' + self.container_name)
+ try:
+ container = self.get_container()
+ if container is not None:
+ if kill:
+ self.logger.debug('Killing container: ' + self.container_name)
+ container.kill()
+ else:
+ self.logger.debug('Stopping container: ' + self.container_name)
+ container.stop()
+ self.logger.debug('Container stopped: ' + self.container_name)
+ except Exception as error: # pylint: disable=W0703
+ self.logger.error('Container stop error')
+ self.logger.error(error)
diff --git a/framework/python/src/core/docker/network_docker_module.py b/framework/python/src/core/docker/network_docker_module.py
new file mode 100644
index 000000000..6c892092a
--- /dev/null
+++ b/framework/python/src/core/docker/network_docker_module.py
@@ -0,0 +1,98 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Represents a test module."""
+from core.docker.docker_module import Module
+import os
+from docker.types import Mount
+
+RUNTIME_DIR = 'runtime'
+RUNTIME_TEST_DIR = os.path.join(RUNTIME_DIR, 'test')
+DEFAULT_TIMEOUT = 60 # time in seconds
+DEFAULT_DOCKER_NETWORK = 'none'
+
+
+class NetworkModule(Module):
+ """Represents a test module."""
+
+ def __init__(self, module_config_file, session):
+ super().__init__(module_config_file=module_config_file,
+ docker_network=DEFAULT_DOCKER_NETWORK,
+ session=session)
+
+ def setup_module(self, module_json):
+ self.template = module_json['config']['docker'].get('template', False)
+ self.net_config = NetworkModuleNetConfig()
+ if self.enable_container:
+ self.net_config.enable_wan = module_json['config']['network'].get(
+ 'enable_wan', False)
+ self.net_config.host = module_json['config']['network'].get('host', False)
+ # Override default network if host is requested
+ if self.net_config.host:
+ self.docker_network = 'host'
+
+ if not self.net_config.host:
+ self.net_config.ip_index = module_json['config']['network'].get(
+ 'ip_index')
+
+ self.net_config.ipv4_address = self.get_session().get_ipv4_subnet()[
+ self.net_config.ip_index]
+ self.net_config.ipv4_network = self.get_session().get_ipv4_subnet()
+
+ self.net_config.ipv6_address = self.get_session().get_ipv6_subnet()[
+ self.net_config.ip_index]
+
+ self.net_config.ipv6_network = self.get_session().get_ipv6_subnet()
+
+ self._mounts = []
+ if 'mounts' in module_json['config']['docker']:
+ for mount_point in module_json['config']['docker']['mounts']:
+ self._mounts.append(
+ Mount(target=mount_point['target'],
+ source=os.path.join(os.getcwd(), mount_point['source']),
+ type='bind'))
+
+ def _setup_runtime(self, device):
+ pass
+
+ def get_environment(self, device=None): # pylint: disable=W0613
+ environment = {
+ 'TZ': self.get_session().get_timezone(),
+ 'HOST_USER': self.get_session().get_host_user()
+ }
+ return environment
+
+ def get_mounts(self):
+ return self._mounts
+
+
+class NetworkModuleNetConfig:
+ """Define all the properties of the network config for a network module"""
+
+ def __init__(self):
+
+ self.enable_wan = False
+
+ self.ip_index = 0
+ self.ipv4_address = None
+ self.ipv4_network = None
+ self.ipv6_address = None
+ self.ipv6_network = None
+
+ self.host = False
+
+ def get_ipv4_addr_with_prefix(self):
+ return format(self.ipv4_address) + '/' + str(self.ipv4_network.prefixlen)
+
+ def get_ipv6_addr_with_prefix(self):
+ return format(self.ipv6_address) + '/' + str(self.ipv6_network.prefixlen)
diff --git a/framework/python/src/core/docker/test_docker_module.py b/framework/python/src/core/docker/test_docker_module.py
new file mode 100644
index 000000000..3198ef1ba
--- /dev/null
+++ b/framework/python/src/core/docker/test_docker_module.py
@@ -0,0 +1,157 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Represents a test module."""
+from core.docker.docker_module import Module
+from test_orc.test_case import TestCase
+import os
+import json
+from common import util
+from docker.types import Mount
+
+RUNTIME_DIR = 'runtime'
+RUNTIME_TEST_DIR = os.path.join(RUNTIME_DIR, 'test')
+DEFAULT_TIMEOUT = 60 # time in seconds
+
+
+class TestModule(Module):
+ """Represents a test module."""
+
+ def __init__(self, module_config_file, test_orc, session, extra_hosts):
+ super().__init__(module_config_file=module_config_file,
+ session=session,
+ extra_hosts=extra_hosts)
+
+ self._test_orc = test_orc
+
+ # Set IP Index for all test modules
+ self.ip_index = 9
+
+ def setup_module(self, module_json):
+ # Set the defaults
+ self.network = True
+ self.total_tests = 0
+ self.tests: list = []
+
+ self.timeout = self._get_module_timeout(module_json)
+
+ # Determine if this module needs network access
+ if 'network' in module_json['config']:
+ self.network = module_json['config']['network']
+
+ # Load test cases
+ if 'tests' in module_json['config']:
+ self.total_tests = len(module_json['config']['tests'])
+ for test_case_json in module_json['config']['tests']:
+ try:
+ test_case = TestCase(
+ name=test_case_json['name'],
+ description=test_case_json['test_description'],
+ expected_behavior=test_case_json['expected_behavior'])
+
+ # Check if steps to resolve have been specified
+ if 'recommendations' in test_case_json:
+ test_case.recommendations = test_case_json['recommendations']
+
+ self.tests.append(test_case)
+ except Exception as error: # pylint: disable=W0718
+ self.logger.error('Failed to load test case. See error for details')
+ self.logger.error(error)
+
+ def _setup_runtime(self, device):
+ self.device_test_dir = os.path.join(self.root_path, RUNTIME_TEST_DIR,
+ device.mac_addr.replace(':', ''))
+
+ self.container_runtime_dir = os.path.join(self.device_test_dir, self.name)
+ os.makedirs(self.container_runtime_dir, exist_ok=True)
+
+ self.container_log_file = os.path.join(self.container_runtime_dir,
+ 'module.log')
+
+ self.config_file = os.path.join(self.root_path, 'local/system.json')
+ self.root_certs_dir = os.path.join(self.root_path, 'local/root_certs')
+
+ self.network_runtime_dir = os.path.join(self.root_path, 'runtime/network')
+
+ self.device_startup_capture = os.path.join(self.device_test_dir,
+ 'startup.pcap')
+ host_user = self.get_session().get_host_user()
+ util.run_command(f'chown -R {host_user} {self.device_startup_capture}')
+
+ self.device_monitor_capture = os.path.join(self.device_test_dir,
+ 'monitor.pcap')
+ util.run_command(f'chown -R {host_user} {self.device_monitor_capture}')
+
+ def get_environment(self, device):
+
+ # Obtain the test pack
+ test_pack = self._test_orc.get_test_pack(device.test_pack)
+
+ environment = {
+ 'TZ': self.get_session().get_timezone(),
+ 'HOST_USER': self.get_session().get_host_user(),
+ 'DEVICE_MAC': device.mac_addr,
+ 'IPV4_ADDR': device.ip_addr,
+ 'DEVICE_TEST_MODULES': json.dumps(device.test_modules),
+ 'DEVICE_TEST_PACK': json.dumps(test_pack.to_dict()),
+ 'IPV4_SUBNET': self.get_session().get_ipv4_subnet(),
+ 'IPV6_SUBNET': self.get_session().get_ipv6_subnet(),
+ 'DEV_IFACE': self.get_session().get_device_interface(),
+ 'DEV_IFACE_MAC': self.get_session().get_device_interface_mac_addr()
+ }
+ return environment
+
+ def get_mounts(self):
+ mounts = [
+ Mount(target='/testrun/system.json',
+ source=self.config_file,
+ type='bind',
+ read_only=True),
+ Mount(target='/testrun/root_certs',
+ source=self.root_certs_dir,
+ type='bind',
+ read_only=True),
+ Mount(target='/runtime/output',
+ source=self.container_runtime_dir,
+ type='bind'),
+ Mount(target='/runtime/network',
+ source=self.network_runtime_dir,
+ type='bind',
+ read_only=True),
+ Mount(target='/runtime/device/startup.pcap',
+ source=self.device_startup_capture,
+ type='bind',
+ read_only=True),
+ Mount(target='/runtime/device/monitor.pcap',
+ source=self.device_monitor_capture,
+ type='bind',
+ read_only=True)
+ ]
+ return mounts
+
+ def _get_module_timeout(self, module_json):
+ timeout = DEFAULT_TIMEOUT
+ try:
+ timeout = DEFAULT_TIMEOUT
+ test_modules = self.get_session().get_config().get('test_modules', {})
+ test_config = test_modules.get(self.name, {})
+ sys_timeout = test_config.get('timeout', None)
+
+ if sys_timeout is not None:
+ timeout = sys_timeout
+ elif 'timeout' in module_json['config']['docker']:
+ timeout = module_json['config']['docker']['timeout']
+ except Exception: # pylint: disable=W0718
+ # Ignore errors, just use default
+ timeout = DEFAULT_TIMEOUT
+ return timeout # pylint: disable=W0150
diff --git a/framework/python/src/common/session.py b/framework/python/src/core/session.py
similarity index 55%
rename from framework/python/src/common/session.py
rename to framework/python/src/core/session.py
index f555a9732..f2e5466d3 100644
--- a/framework/python/src/common/session.py
+++ b/framework/python/src/core/session.py
@@ -17,8 +17,11 @@
import pytz
import json
import os
-from common import util, logger
+from fastapi.encoders import jsonable_encoder
+from common import util, logger, mqtt
from common.risk_profile import RiskProfile
+from common.statuses import TestrunStatus, TestResult
+from net_orc.ip_control import IPControl
# Certificate dependencies
from cryptography import x509
@@ -34,9 +37,13 @@
API_URL_KEY = 'api_url'
API_PORT_KEY = 'api_port'
MAX_DEVICE_REPORTS_KEY = 'max_device_reports'
+ORG_NAME_KEY = 'org_name'
+TEST_CONFIG_KEY = 'test_modules'
CERTS_PATH = 'local/root_certs'
CONFIG_FILE_PATH = 'local/system.json'
-SECONDS_IN_YEAR = 31536000
+STATUS_TOPIC = 'status'
+
+MAKE_CONTROL_DIR = 'make/DEBIAN/control'
PROFILE_FORMAT_PATH = 'resources/risk_assessment.json'
PROFILES_DIR = 'local/risk_profiles'
@@ -44,13 +51,41 @@
LOGGER = logger.get_logger('session')
+def session_tracker(method):
+ """Session changes tracker."""
+ def wrapper(self, *args, **kwargs):
+
+ result = method(self, *args, **kwargs)
+
+ if self.get_status() != TestrunStatus.IDLE:
+ self.get_mqtt_client().send_message(
+ STATUS_TOPIC,
+ jsonable_encoder(self.to_json())
+ )
+
+ return result
+ return wrapper
+
+def apply_session_tracker(cls):
+ """Applies tracker decorator to class methods"""
+ for attr in dir(cls):
+ if (callable(getattr(cls, attr))
+ and not attr.startswith('_')
+ and not attr.startswith('get')
+ and not attr == 'to_json'
+ ):
+ setattr(cls, attr, session_tracker(getattr(cls, attr)))
+ return cls
+
+@apply_session_tracker
class TestrunSession():
- """Represents the current session of Test Run."""
+ """Represents the current session of Testrun."""
def __init__(self, root_dir):
self._root_dir = root_dir
- self._status = 'Idle'
+ self._status = TestrunStatus.IDLE
+ self._description = None
# Target test device
self._device = None
@@ -93,11 +128,20 @@ def __init__(self, root_dir):
self._config_file = os.path.join(root_dir, CONFIG_FILE_PATH)
self._config = self._get_default_config()
+ # System network interfaces
+ self._ifaces = {}
# Loading methods
self._load_version()
self._load_config()
self._load_profiles()
+ # Network information
+ self._ipv4_subnet = None
+ self._ipv6_subnet = None
+
+ # Store host user for permissions use
+ self._host_user = util.get_host_user()
+
self._certs = []
self.load_certs()
@@ -107,9 +151,12 @@ def __init__(self, root_dir):
self._timezone = tz[0]
LOGGER.debug(f'System timezone is {self._timezone}')
+ # MQTT client
+ self._mqtt_client = mqtt.MQTT()
+
def start(self):
self.reset()
- self._status = 'Waiting for Device'
+ self._status = TestrunStatus.WAITING_FOR_DEVICE
self._started = datetime.datetime.now()
def get_started(self):
@@ -119,14 +166,14 @@ def get_finished(self):
return self._finished
def stop(self):
- self.set_status('Stopping')
+ self.set_status(TestrunStatus.STOPPING)
self.finish()
def finish(self):
# Set any in progress test results to Error
for test_result in self._results:
- if test_result.result == 'In Progress':
- test_result.result = 'Error'
+ if test_result.result == TestResult.IN_PROGRESS:
+ test_result.result = TestResult.ERROR
self._finished = datetime.datetime.now()
@@ -141,7 +188,9 @@ def _get_default_config(self):
'monitor_period': 30,
'max_device_reports': 0,
'api_url': 'http://localhost',
- 'api_port': 8000
+ 'api_port': 8000,
+ 'org_name': '',
+ 'single_intf': False,
}
def get_config(self):
@@ -161,7 +210,7 @@ def _load_config(self):
# Network interfaces
if (NETWORK_KEY in config_file_json
and DEVICE_INTF_KEY in config_file_json.get(NETWORK_KEY)
- and INTERNET_INTF_KEY in config_file_json.get(NETWORK_KEY)):
+ and INTERNET_INTF_KEY in config_file_json.get(NETWORK_KEY)):
self._config[NETWORK_KEY][DEVICE_INTF_KEY] = config_file_json.get(
NETWORK_KEY, {}).get(DEVICE_INTF_KEY)
self._config[NETWORK_KEY][INTERNET_INTF_KEY] = config_file_json.get(
@@ -188,13 +237,21 @@ def _load_config(self):
self._config[MAX_DEVICE_REPORTS_KEY] = config_file_json.get(
MAX_DEVICE_REPORTS_KEY)
- LOGGER.debug(self._config)
+ if ORG_NAME_KEY in config_file_json:
+ self._config[ORG_NAME_KEY] = config_file_json.get(
+ ORG_NAME_KEY
+ )
+
+ if TEST_CONFIG_KEY in config_file_json:
+ self._config[TEST_CONFIG_KEY] = config_file_json.get(
+ TEST_CONFIG_KEY
+ )
def _load_version(self):
version_cmd = util.run_command(
'dpkg-query --showformat=\'${Version}\' --show testrun')
# index 1 of response is the stderr byte stream so if
- # it has any data in it, there was an error and we
+ # it has any data in it, there was an error and wen
# did not resolve the version and we'll use the fallback
if len(version_cmd[1]) == 0:
version = version_cmd[0]
@@ -202,14 +259,37 @@ def _load_version(self):
else:
LOGGER.debug('Failed getting the version from dpkg-query')
# Try getting the version from the make control file
+
+ # Check if MAKE_CONTROL_DIR exists
+ if not os.path.exists(MAKE_CONTROL_DIR):
+ LOGGER.error('make/DEBIAN/control file path not found')
+ self._version = 'Unknown'
+ return
+
try:
- version = util.run_command(
- '$(grep -R "Version: " $MAKE_CONTROL_DIR | awk "{print $2}"')
- except Exception as e:
+ # Run the grep command to find the version line
+ grep_cmd = util.run_command(f'grep -R "Version: " {MAKE_CONTROL_DIR}')
+
+ if grep_cmd[0] and len(grep_cmd[1]) == 0:
+ # Extract the version number from grep
+ version = grep_cmd[0].split()[1]
+ self._version = version
+ LOGGER.debug(f'Testrun version is: {self._version}')
+
+ else:
+ # Error handling if grep can't find the version line
+ self._version = 'Unknown'
+ LOGGER.debug(f'Testrun version is {self._version}')
+ raise Exception('Version line not found in make control file')
+
+ except Exception as e: # pylint: disable=W0703
LOGGER.debug('Failed getting the version from make control file')
LOGGER.error(e)
self._version = 'Unknown'
+ def get_host_user(self):
+ return self._host_user
+
def get_version(self):
return self._version
@@ -225,11 +305,17 @@ def get_runtime_params(self):
return self._runtime_params
def add_runtime_param(self, param):
+ if param == 'single_intf':
+ self._config['single_intf'] = True
self._runtime_params.append(param)
def get_device_interface(self):
return self._config.get(NETWORK_KEY, {}).get(DEVICE_INTF_KEY)
+ def get_device_interface_mac_addr(self):
+ iface = self.get_device_interface()
+ return IPControl.get_iface_mac_address(iface=iface)
+
def get_internet_interface(self):
return self._config.get(NETWORK_KEY, {}).get(INTERNET_INTF_KEY)
@@ -253,7 +339,7 @@ def set_config(self, config_json):
self._save_config()
# Update log level
- LOGGER.debug(f'Setting log level to {config_json["log_level"]}')
+ LOGGER.debug(f'Setting log level to {config_json["log_level"]}') # pylint: disable=W1405
logger.set_log_level(config_json['log_level'])
def set_target_device(self, device):
@@ -291,12 +377,21 @@ def get_device(self, mac_addr):
def remove_device(self, device):
self._device_repository.remove(device)
+ def get_ipv4_subnet(self):
+ return self._ipv4_subnet
+
+ def get_ipv6_subnet(self):
+ return self._ipv6_subnet
+
def get_status(self):
return self._status
def set_status(self, status):
self._status = status
+ def set_description(self, desc: str):
+ self._description = desc
+
def get_test_results(self):
return self._results
@@ -322,16 +417,51 @@ def add_test_result(self, result):
# result type is TestCase object
if test_result.name == result.name:
- # Just update the result and description
- test_result.result = result.result
- test_result.description = result.description
- test_result.recommendations = result.recommendations
+ # Just update the result, description and recommendations
+ if len(result.description) != 0:
+ test_result.description = result.description
+
+ # Add recommendations if provided
+ if result.recommendations is not None:
+ test_result.recommendations = result.recommendations
+
+ if len(result.recommendations) == 0:
+ test_result.recommendations = None
+
+ if result.result is not None:
+
+ # Any informational test should always report informational
+ if test_result.required_result == 'Informational':
+
+ # Set test result to informational
+ if result.result in [
+ TestResult.NON_COMPLIANT,
+ TestResult.COMPLIANT,
+ TestResult.INFORMATIONAL
+ ]:
+ test_result.result = TestResult.INFORMATIONAL
+ else:
+ test_result.result = result.result
+
+ # Copy any test recommendations to optional
+ test_result.optional_recommendations = result.recommendations
+
+ # Remove recommendations from informational tests
+ test_result.recommendations = None
+ else:
+ test_result.result = result.result
+
updated = True
if not updated:
- result.result = 'In Progress'
self._results.append(result)
+ def set_test_result_error(self, result):
+ """Set test result error"""
+ result.result = TestResult.ERROR
+ result.recommendations = None
+ self._results.append(result)
+
def add_module_report(self, module_report):
self._module_reports.append(module_report)
@@ -357,15 +487,18 @@ def get_report_url(self):
def set_report_url(self, url):
self._report_url = url
+ def set_subnets(self, ipv4_subnet, ipv6_subnet):
+ self._ipv4_subnet = ipv4_subnet
+ self._ipv6_subnet = ipv6_subnet
+
def _load_profiles(self):
# Load format of questionnaire
LOGGER.debug('Loading risk assessment format')
try:
- with open(os.path.join(
- self._root_dir, PROFILE_FORMAT_PATH
- ), encoding='utf-8') as profile_format_file:
+ with open(os.path.join(self._root_dir, PROFILE_FORMAT_PATH),
+ encoding='utf-8') as profile_format_file:
format_json = json.load(profile_format_file)
# Save original profile format for internal validation
self._profile_format = format_json
@@ -374,6 +507,10 @@ def _load_profiles(self):
'An error occurred whilst loading the risk assessment format')
LOGGER.debug(e)
+ # If the format JSON fails to load, skip loading profiles
+ LOGGER.error('Profiles will not be loaded')
+ return
+
profile_format_array = []
# Remove internal properties
@@ -398,21 +535,39 @@ def _load_profiles(self):
try:
for risk_profile_file in os.listdir(
- os.path.join(self._root_dir, PROFILES_DIR)):
+ os.path.join(self._root_dir, PROFILES_DIR)):
+
LOGGER.debug(f'Discovered profile {risk_profile_file}')
+ # Open the risk profile file
with open(os.path.join(self._root_dir, PROFILES_DIR, risk_profile_file),
encoding='utf-8') as f:
- json_data = json.load(f)
- risk_profile = RiskProfile()
- risk_profile.load(
- profile_json=json_data,
- profile_format=self._profile_format
- )
- risk_profile.status = self.check_profile_status(risk_profile)
+
+ # Parse risk profile json
+ json_data: dict = json.load(f)
+
+ # Validate profile JSON
+ if not self.validate_profile_json(json_data):
+ LOGGER.error('Profile failed validation')
+ continue
+
+ # Instantiate a new risk profile
+ risk_profile: RiskProfile = RiskProfile()
+
+ # Assign the profile questions
+ questions: list[dict] = json_data.get('questions')
+
+ # Pass only the valid questions to the risk profile
+ json_data['questions'] = self._remove_invalid_questions(questions)
+
+ # Pass JSON to populate risk profile
+ risk_profile.load(profile_json=json_data,
+ profile_format=self._profile_format)
+
+ # Add risk profile to session
self._profiles.append(risk_profile)
- except Exception as e:
+ except Exception as e: # pylint: disable=W0703
LOGGER.error('An error occurred whilst loading risk profiles')
LOGGER.debug(e)
@@ -428,25 +583,6 @@ def get_profile(self, name):
return profile
return None
- def validate_profile(self, profile_json):
-
- # Check name field is present
- if 'name' not in profile_json:
- return False
-
- # Check questions field is present
- if 'questions' not in profile_json:
- return False
-
- # Check all questions are present
- for format_q in self.get_profiles_format():
- if self._get_profile_question(profile_json,
- format_q.get('question')) is None:
- LOGGER.error('Missing question: ' + format_q.get('question'))
- return False
-
- return True
-
def _get_profile_question(self, profile_json, question):
for q in profile_json.get('questions'):
@@ -455,7 +591,14 @@ def _get_profile_question(self, profile_json, question):
return None
+ def get_profile_format_question(self, question):
+ for q in self.get_profiles_format():
+ if q.get('question') == question:
+ return q
+
def update_profile(self, profile_json):
+ """Update the risk profile with the provided JSON.
+ The content has already been validated in the API"""
profile_name = profile_json['name']
@@ -463,45 +606,19 @@ def update_profile(self, profile_json):
profile_json['version'] = self.get_version()
profile_json['created'] = datetime.datetime.now().strftime('%Y-%m-%d')
- if 'status' in profile_json and profile_json.get('status') == 'Valid':
- # Attempting to submit a risk profile, we need to check it
-
- # Check all questions have been answered
- all_questions_answered = True
-
- for question in self.get_profiles_format():
-
- # Check question is present
- profile_question = self._get_profile_question(profile_json,
- question.get('question'))
-
- if profile_question is not None:
-
- # Check answer is present
- if 'answer' not in profile_question:
- LOGGER.error('Missing answer for question: ' +
- question.get('question'))
- all_questions_answered = False
+ # Assign the profile questions
+ questions: list[dict] = profile_json.get('questions')
- else:
- LOGGER.error('Missing question: ' + question.get('question'))
- all_questions_answered = False
-
- if not all_questions_answered:
- LOGGER.error('Not all questions answered')
- return None
-
- else:
- profile_json['status'] = 'Draft'
+ # Pass only the valid questions to the risk profile
+ profile_json['questions'] = self._remove_invalid_questions(questions)
+ # Check if profile already exists
risk_profile = self.get_profile(profile_name)
-
if risk_profile is None:
# Create a new risk profile
- risk_profile = RiskProfile(
- profile_json=profile_json,
- profile_format=self._profile_format)
+ risk_profile = RiskProfile(profile_json=profile_json,
+ profile_format=self._profile_format)
self._profiles.append(risk_profile)
else:
@@ -524,19 +641,128 @@ def update_profile(self, profile_json):
return risk_profile
- def check_profile_status(self, profile):
+ def _remove_invalid_questions(self, questions):
+ """Remove unrecognised questions from the profile"""
+
+ # Store valid questions
+ valid_questions = []
+
+ # Remove any additional (outdated questions from the profile)
+ for question in questions:
- if profile.status == 'Valid':
+ # Check if question exists in the profile format
+ if self.get_profile_format_question(
+ question=question['question']) is not None:
- # Check expiry
- created_date = profile.created.timestamp()
+ # Add the question to the valid_questions
+ valid_questions.append(question)
- today = datetime.datetime.now().timestamp()
+ else:
+ LOGGER.debug(f'Removed unrecognised question: {question["question"]}')
- if created_date < (today - SECONDS_IN_YEAR):
- profile.status = 'Expired'
+ # Return the list of valid questions
+ return valid_questions
- return profile.status
+ def validate_profile_json(self, profile_json):
+ """Validate properties in profile update requests"""
+
+ # Get the status field
+ valid = False
+ if 'status' in profile_json and profile_json.get('status') == 'Valid':
+ valid = True
+
+ # Check if 'name' exists in profile
+ if 'name' not in profile_json:
+ LOGGER.error('Missing "name" in profile')
+ return False
+
+ # Check if 'name' field not empty
+ elif len(profile_json.get('name').strip()) == 0:
+ LOGGER.error('Name field left empty')
+ return False
+
+ # Error handling if 'questions' not in request
+ if 'questions' not in profile_json and valid:
+ LOGGER.error('Missing "questions" field in profile')
+ return False
+
+ # Validating the questions section
+ for question in profile_json.get('questions'):
+
+ # Check if the question field is present
+ if 'question' not in question:
+ LOGGER.error('The "question" field is missing')
+ return False
+
+ # Check if 'question' field not empty
+ elif len(question.get('question').strip()) == 0:
+ LOGGER.error('A question is missing from "question" field')
+ return False
+
+ # Check if question is a recognised question
+ format_q = self.get_profile_format_question(
+ question.get('question'))
+
+ if format_q is None:
+ LOGGER.error(f'Unrecognised question: {question.get("question")}')
+ # Just ignore additional questions
+ continue
+
+ # Error handling if 'answer' is missing
+ if 'answer' not in question and valid:
+ LOGGER.error('The answer field is missing')
+ return False
+
+ # If answer is present, check the validation rules
+ else:
+
+ # Extract the answer out of the profile
+ answer = question.get('answer')
+
+ # Get the validation rules
+ field_type = format_q.get('type')
+
+ # Check if type is string or single select, answer should be a string
+ if ((field_type in ['string', 'select'])
+ and not isinstance(answer, str)):
+ LOGGER.error(f'''Answer for question \
+{question.get('question')} is incorrect data type''')
+ return False
+
+ # Check if type is select, answer must be from list
+ if field_type == 'select' and valid:
+ possible_answers = format_q.get('options')
+ if answer not in possible_answers:
+ LOGGER.error(f'''Answer for question \
+{question.get('question')} is not valid''')
+ return False
+
+ # Validate select multiple field types
+ if field_type == 'select-multiple':
+
+ if not isinstance(answer, list):
+ LOGGER.error(f'''Answer for question \
+{question.get('question')} is incorrect data type''')
+ return False
+
+ question_options_len = len(format_q.get('options'))
+
+ # We know it is a list, now check the indexes
+ for index in answer:
+
+ # Check if the index is an integer
+ if not isinstance(index, int):
+ LOGGER.error(f'''Answer for question \
+{question.get('question')} is incorrect data type''')
+ return False
+
+ # Check if index is 0 or above and less than the num of options
+ if index < 0 or index >= question_options_len:
+ LOGGER.error(f'''Invalid index provided as answer for \
+question {question.get('question')}''')
+ return False
+
+ return True
def delete_profile(self, profile):
@@ -551,13 +777,14 @@ def delete_profile(self, profile):
return True
- except Exception as e:
+ except Exception as e: # pylint: disable=W0703
LOGGER.error('An error occurred whilst deleting a profile')
LOGGER.debug(e)
return False
def reset(self):
- self.set_status('Idle')
+ self.set_status(TestrunStatus.IDLE)
+ self.set_description(None)
self.set_target_device(None)
self._report_url = None
self._total_tests = 0
@@ -565,6 +792,7 @@ def reset(self):
self._results = []
self._started = None
self._finished = None
+ self._ifaces = IPControl.get_sys_interfaces()
def to_json(self):
@@ -589,6 +817,9 @@ def to_json(self):
if self._report_url is not None:
session_json['report'] = self.get_report_url()
+ if self._description is not None:
+ session_json['description'] = self._description
+
return session_json
def get_timezone(self):
@@ -601,17 +832,30 @@ def upload_cert(self, filename, content):
# Parse bytes into x509 object
cert = x509.load_pem_x509_certificate(content, default_backend())
- # Extract required properties
- common_name = cert.subject.get_attributes_for_oid(
- NameOID.COMMON_NAME)[0].value
+ # Retrieve the common name attributes from the subject
+ common_name_attr = cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)
+
+ # Raise an error if the common name attribute is missing
+ if not common_name_attr:
+ raise ValueError('Certificate is missing the common name')
+
+ # Extract the organization name value
+ common_name = common_name_attr[0].value
# Check if any existing certificates have the same common name
for cur_cert in self._certs:
if common_name == cur_cert['name']:
raise ValueError('A certificate with that name already exists')
- issuer = cert.issuer.get_attributes_for_oid(
- NameOID.ORGANIZATION_NAME)[0].value
+ # Retrieve the organization name attributes from issuer
+ issuer_attr = cert.issuer.get_attributes_for_oid(NameOID.ORGANIZATION_NAME)
+
+ # Raise an error if the organization name attribute is missing
+ if not issuer_attr:
+ raise ValueError('Certificate is missing the organization name')
+
+ # Extract the organization name value
+ issuer = issuer_attr[0].value
status = 'Valid'
if now > cert.not_valid_after_utc:
@@ -650,6 +894,11 @@ def load_certs(self):
self._certs = []
for cert_file in os.listdir(CERTS_PATH):
+
+ # Ignore directories
+ if os.path.isdir(os.path.join(CERTS_PATH, cert_file)):
+ continue
+
LOGGER.debug(f'Loading certificate {cert_file}')
try:
@@ -685,7 +934,7 @@ def load_certs(self):
self._certs.append(cert_obj)
LOGGER.debug(f'Successfully loaded {cert_file}')
- except Exception as e:
+ except Exception as e: # pylint: disable=W0703
LOGGER.error(f'An error occurred whilst loading {cert_file}')
LOGGER.debug(e)
@@ -705,10 +954,32 @@ def delete_cert(self, filename):
self._certs.remove(cert)
return True
- except Exception as e:
+ except Exception as e: # pylint: disable=W0703
LOGGER.error('An error occurred whilst deleting the certificate')
LOGGER.debug(e)
return False
def get_certs(self):
return self._certs
+
+ def detect_network_adapters_change(self) -> dict:
+ adapters = {}
+ ifaces_new = IPControl.get_sys_interfaces()
+
+ # Difference between stored and newly received network interfaces
+ diff = util.diff_dicts(self._ifaces, ifaces_new)
+ if diff:
+ if 'items_added' in diff:
+ adapters['adapters_added'] = diff['items_added']
+ if 'items_removed' in diff:
+ adapters['adapters_removed'] = diff['items_removed']
+ # Save new network interfaces to session
+ LOGGER.debug(f'Network adapters change detected: {adapters}')
+ self._ifaces = ifaces_new
+ return adapters
+
+ def get_mqtt_client(self):
+ return self._mqtt_client
+
+ def get_ifaces(self):
+ return self._ifaces
diff --git a/framework/python/src/core/tasks.py b/framework/python/src/core/tasks.py
new file mode 100644
index 000000000..5da0b40c9
--- /dev/null
+++ b/framework/python/src/core/tasks.py
@@ -0,0 +1,78 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Periodic background tasks"""
+
+from contextlib import asynccontextmanager
+import datetime
+import logging
+
+from apscheduler.schedulers.asyncio import AsyncIOScheduler
+from fastapi import FastAPI
+
+from common import logger
+
+# Check adapters period seconds
+# Check adapters period seconds
+CHECK_NETWORK_ADAPTERS_PERIOD = 5
+CHECK_INTERNET_PERIOD = 2
+INTERNET_CONNECTION_TOPIC = 'events/internet'
+NETWORK_ADAPTERS_TOPIC = 'events/adapter'
+
+LOGGER = logger.get_logger('tasks')
+
+
+class PeriodicTasks:
+ """Background periodic tasks
+ """
+ def __init__(
+ self, testrun_obj,
+ ) -> None:
+ self._testrun = testrun_obj
+ self._mqtt_client = self._testrun.get_mqtt_client()
+ local_tz = datetime.datetime.now().astimezone().tzinfo
+ self._scheduler = AsyncIOScheduler(timezone=local_tz)
+ # Prevent scheduler warnings
+ self._scheduler._logger.setLevel(logging.ERROR)
+
+ self.adapters_checker_job = self._scheduler.add_job(
+ func=self._testrun.get_net_orc().network_adapters_checker,
+ kwargs={
+ 'mqtt_client': self._mqtt_client,
+ 'topic': NETWORK_ADAPTERS_TOPIC
+ },
+ trigger='interval',
+ seconds=CHECK_NETWORK_ADAPTERS_PERIOD,
+ )
+ # add internet connection cheking job only in single-intf mode
+ if 'single_intf' not in self._testrun.get_session().get_runtime_params():
+ self.internet_shecker = self._scheduler.add_job(
+ func=self._testrun.get_net_orc().internet_conn_checker,
+ kwargs={
+ 'mqtt_client': self._mqtt_client,
+ 'topic': INTERNET_CONNECTION_TOPIC
+ },
+ trigger='interval',
+ seconds=CHECK_INTERNET_PERIOD,
+ )
+
+ @asynccontextmanager
+ async def start(self, app: FastAPI): # pylint: disable=unused-argument
+ """Start background tasks
+
+ Args:
+ app (FastAPI): app instance
+ """
+ # Job that checks for changes in network adapters
+ self._scheduler.start()
+ yield
diff --git a/framework/python/src/core/test_runner.py b/framework/python/src/core/test_runner.py
index 870e97752..0a8f69e43 100644
--- a/framework/python/src/core/test_runner.py
+++ b/framework/python/src/core/test_runner.py
@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
"""Wrapper for the Testrun that simplifies
virtual testing procedure by allowing direct calling
from the command line.
@@ -25,6 +24,7 @@
from testrun import Testrun
from common import logger
import signal
+import io
LOGGER = logger.get_logger("runner")
@@ -37,13 +37,17 @@ def __init__(self,
validate=False,
net_only=False,
single_intf=False,
- no_ui=False):
+ no_ui=False,
+ target=None,
+ firmware=None):
self._register_exits()
- self.test_run = Testrun(config_file=config_file,
+ self._testrun = Testrun(config_file=config_file,
validate=validate,
net_only=net_only,
single_intf=single_intf,
- no_ui=no_ui)
+ no_ui=no_ui,
+ target_mac=target,
+ firmware=firmware)
def _register_exits(self):
signal.signal(signal.SIGINT, self._exit_handler)
@@ -62,7 +66,7 @@ def _exit_handler(self, signum, arg): # pylint: disable=unused-argument
sys.exit(1)
def stop(self):
- self.test_run.stop()
+ self._testrun.stop()
def parse_args():
@@ -73,8 +77,7 @@ def parse_args():
"-f",
"--config-file",
default=None,
- help="Define the configuration file for Testrun and Network Orchestrator"
- )
+ help="Define the configuration file for Testrun and Network Orchestrator")
parser.add_argument(
"--validate",
default=False,
@@ -91,7 +94,38 @@ def parse_args():
default=False,
action="store_true",
help="Do not launch the user interface")
+ parser.add_argument("--target",
+ default=None,
+ type=str,
+ help="MAC address of the target device")
+ parser.add_argument("-fw",
+ "--firmware",
+ default=None,
+ type=str,
+ help="Firmware version to be tested")
+
parsed_args = parser.parse_known_args()[0]
+
+ if (parsed_args.no_ui and not parsed_args.net_only
+ and (parsed_args.target is None or parsed_args.firmware is None)):
+ # Capture help text
+ help_text = io.StringIO()
+ parser.print_help(file=help_text)
+
+ # Get help text as lines and find where "Testrun" starts (skip usage)
+ help_lines = help_text.getvalue().splitlines()
+ start_index = next(
+ (i for i, line in enumerate(help_lines) if "Testrun" in line), 0)
+
+ # Join only lines starting from "Testrun" and print without extra newlines
+ help_message = "\n".join(line.rstrip() for line in help_lines[start_index:])
+ print(help_message)
+
+ print(
+ "Error: --target and --firmware are required when --no-ui is specified",
+ file=sys.stderr)
+ sys.exit(1)
+
return parsed_args
@@ -101,4 +135,6 @@ def parse_args():
validate=args.validate,
net_only=args.net_only,
single_intf=args.single_intf,
- no_ui=args.no_ui)
+ no_ui=args.no_ui,
+ target=args.target,
+ firmware=args.firmware)
diff --git a/framework/python/src/core/testrun.py b/framework/python/src/core/testrun.py
index 5b43cfd65..5d4e78e9c 100644
--- a/framework/python/src/core/testrun.py
+++ b/framework/python/src/core/testrun.py
@@ -11,14 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-"""The overall control of the Test Run application.
-
+"""The overall control of the Testrun application.
This file provides the integration between all of the
-Test Run components, such as net_orc, test_orc and test_ui.
-
-Run using the provided command scripts in the cmd folder.
-E.g sudo cmd/start
+Testrun components, such as net_orc, test_orc and test_ui.
"""
import docker
import json
@@ -27,10 +22,11 @@
import signal
import sys
import time
-from common import logger, util
+from common import logger, util, mqtt
from common.device import Device
-from common.session import TestrunSession
from common.testreport import TestReport
+from common.statuses import TestrunStatus
+from session import TestrunSession
from api.api import Api
from net_orc.listener import NetworkEvent
from net_orc import network_orchestrator as net_orc
@@ -38,14 +34,7 @@
from docker.errors import ImageNotFound
-# Locate parent directory
-current_dir = os.path.dirname(os.path.realpath(__file__))
-
-# Locate the test-run root directory, 4 levels, src->python->framework->test-run
-root_dir = os.path.dirname(os.path.dirname(
- os.path.dirname(os.path.dirname(current_dir))))
-
-LOGGER = logger.get_logger('test_run')
+LOGGER = logger.get_logger('testrun')
DEFAULT_CONFIG_FILE = 'local/system.json'
EXAMPLE_CONFIG_FILE = 'local/system.json.example'
@@ -58,10 +47,16 @@
DEVICE_MODEL = 'model'
DEVICE_MAC_ADDR = 'mac_addr'
DEVICE_TEST_MODULES = 'test_modules'
+DEVICE_TYPE_KEY = 'type'
+DEVICE_TECHNOLOGY_KEY = 'technology'
+DEVICE_TEST_PACK_KEY = 'test_pack'
+DEVICE_ADDITIONAL_INFO_KEY = 'additional_info'
+
MAX_DEVICE_REPORTS_KEY = 'max_device_reports'
+
class Testrun: # pylint: disable=too-few-public-methods
- """Test Run controller.
+ """Testrun controller.
Creates an instance of the network orchestrator, test
orchestrator and user interface.
@@ -72,8 +67,19 @@ def __init__(self,
validate=False,
net_only=False,
single_intf=False,
- no_ui=False):
+ no_ui=False,
+ target_mac=None,
+ firmware=None):
+
+ # Locate parent directory
+ current_dir = os.path.dirname(os.path.realpath(__file__))
+ # Locate the test-run root directory, 4 levels,
+ # src->python->framework->test-run
+ self._root_dir = os.path.dirname(
+ os.path.dirname(os.path.dirname(os.path.dirname(current_dir))))
+
+ # Determine config file
if config_file is None:
self._config_file = self._get_config_abs(DEFAULT_CONFIG_FILE)
else:
@@ -81,13 +87,15 @@ def __init__(self,
self._net_only = net_only
self._single_intf = single_intf
- self._no_ui = no_ui
+ # Network only option only works if UI is also
+ # disbled so need to set no_ui if net_only is selected
+ self._no_ui = no_ui or net_only
# Catch any exit signals
self._register_exits()
# Create session
- self._session = TestrunSession(root_dir=root_dir)
+ self._session = TestrunSession(root_dir=self._root_dir)
# Register runtime parameters
if single_intf:
@@ -97,18 +105,35 @@ def __init__(self,
if validate:
self._session.add_runtime_param('validate')
- self._net_orc = net_orc.NetworkOrchestrator(
- session=self._session)
- self._test_orc = test_orc.TestOrchestrator(
- self._session,
- self._net_orc)
+ self._net_orc = net_orc.NetworkOrchestrator(session=self._session)
+ self._test_orc = test_orc.TestOrchestrator(self._session, self._net_orc)
# Load device repository
self.load_all_devices()
+ # If no_ui selected and not network only mode,
+ # load the target device into the session
+ if self._no_ui and not net_only:
+ target_device = self._session.get_device(target_mac)
+ if target_device is not None:
+ target_device.firmware = firmware
+ self._session.set_target_device(target_device)
+ else:
+ print(
+ f'Target device specified does not exist in device registry: '
+ f'{target_mac}',
+ file=sys.stderr)
+ sys.exit(1)
+
# Load test modules
self._test_orc.start()
+ # Start websockets server
+ self.start_ws()
+
+ # Init MQTT client
+ self._mqtt_client = mqtt.MQTT()
+
if self._no_ui:
# Check Testrun is able to start
@@ -131,6 +156,9 @@ def __init__(self,
while True:
time.sleep(1)
+ def get_root_dir(self):
+ return self._root_dir
+
def get_version(self):
return self.get_session().get_version()
@@ -150,25 +178,33 @@ def _load_devices(self, device_dir):
for device_folder in os.listdir(device_dir):
- device_config_file_path = os.path.join(device_dir,
- device_folder,
+ device_config_file_path = os.path.join(device_dir, device_folder,
DEVICE_CONFIG)
# Check if device config file exists before loading
if not os.path.exists(device_config_file_path):
LOGGER.error('Device configuration file missing ' +
- f'from device {device_folder}')
+ f'for device {device_folder}')
continue
# Open device config file
with open(device_config_file_path,
encoding='utf-8') as device_config_file:
- device_config_json = json.load(device_config_file)
+
+ try:
+ device_config_json = json.load(device_config_file)
+ except json.decoder.JSONDecodeError as e:
+ LOGGER.error('Invalid JSON found in ' +
+ f'device configuration {device_config_file_path}')
+ LOGGER.debug(e)
+ continue
device_manufacturer = device_config_json.get(DEVICE_MANUFACTURER)
device_model = device_config_json.get(DEVICE_MODEL)
mac_addr = device_config_json.get(DEVICE_MAC_ADDR)
test_modules = device_config_json.get(DEVICE_TEST_MODULES)
+
+ # Load max device reports
max_device_reports = None
if 'max_device_reports' in device_config_json:
max_device_reports = device_config_json.get(MAX_DEVICE_REPORTS_KEY)
@@ -183,6 +219,25 @@ def _load_devices(self, device_dir):
max_device_reports=max_device_reports,
device_folder=device_folder)
+ # Load in the additional fields
+ if DEVICE_TYPE_KEY in device_config_json:
+ device.type = device_config_json.get(DEVICE_TYPE_KEY)
+
+ if DEVICE_TECHNOLOGY_KEY in device_config_json:
+ device.technology = device_config_json.get(DEVICE_TECHNOLOGY_KEY)
+
+ if DEVICE_TEST_PACK_KEY in device_config_json:
+ device.test_pack = device_config_json.get(DEVICE_TEST_PACK_KEY)
+
+ if DEVICE_ADDITIONAL_INFO_KEY in device_config_json:
+ device.additional_info = device_config_json.get(
+ DEVICE_ADDITIONAL_INFO_KEY)
+
+ if None in [device.type, device.technology, device.test_pack]:
+ LOGGER.warning(
+ 'Device is outdated and requires further configuration')
+ device.status = 'Invalid'
+
self._load_test_reports(device)
# Add device to device repository
@@ -192,37 +247,35 @@ def _load_devices(self, device_dir):
def _load_test_reports(self, device):
- LOGGER.debug(f'Loading test reports for device {device.model}')
+ LOGGER.debug('Loading test reports for device ' +
+ f'{device.manufacturer} {device.model}')
# Remove the existing reports in memory
device.clear_reports()
# Locate reports folder
- reports_folder = os.path.join(root_dir,
- LOCAL_DEVICES_DIR,
- device.device_folder, 'reports')
+ reports_folder = self.get_reports_folder(device)
# Check if reports folder exists (device may have no reports)
if not os.path.exists(reports_folder):
return
- LOGGER.info(f'Loading reports from {reports_folder}')
-
for report_folder in os.listdir(reports_folder):
# 1.3 file path
- report_json_file_path = os.path.join(
- reports_folder,
- report_folder,
- 'test',
- device.mac_addr.replace(':',''),
- 'report.json')
-
+ report_json_file_path = os.path.join(reports_folder, report_folder,
+ 'test',
+ device.mac_addr.replace(':', ''),
+ 'report.json')
+
if not os.path.isfile(report_json_file_path):
# Revert to pre 1.3 file path
- report_json_file_path = os.path.join(
- reports_folder,
- report_folder,
- 'report.json')
+ report_json_file_path = os.path.join(reports_folder, report_folder,
+ 'report.json')
+
+ if not os.path.isfile(report_json_file_path):
+ # Revert to pre 1.3 file path
+ report_json_file_path = os.path.join(reports_folder, report_folder,
+ 'report.json')
# Check if the report.json file exists
if not os.path.isfile(report_json_file_path):
@@ -236,18 +289,17 @@ def _load_test_reports(self, device):
test_report.set_mac_addr(device.mac_addr)
device.add_report(test_report)
+ def get_reports_folder(self, device):
+ """Return the reports folder path for the device"""
+ return os.path.join(self._root_dir, LOCAL_DEVICES_DIR, device.device_folder,
+ 'reports')
+
def delete_report(self, device: Device, timestamp):
LOGGER.debug(f'Deleting test report for device {device.model} ' +
f'at {timestamp}')
# Locate reports folder
- reports_folder = os.path.join(root_dir,
- LOCAL_DEVICES_DIR,
- device.device_folder, 'reports')
-
- # Check if reports folder exists (device may have no reports)
- if not os.path.exists(reports_folder):
- return False
+ reports_folder = self.get_reports_folder(device)
for report_folder in os.listdir(reports_folder):
if report_folder == timestamp:
@@ -261,15 +313,13 @@ def delete_report(self, device: Device, timestamp):
def create_device(self, device: Device):
# Define the device folder location
- device_folder_path = os.path.join(root_dir,
- LOCAL_DEVICES_DIR,
+ device_folder_path = os.path.join(self._root_dir, LOCAL_DEVICES_DIR,
device.device_folder)
# Create the directory
os.makedirs(device_folder_path)
- config_file_path = os.path.join(device_folder_path,
- DEVICE_CONFIG)
+ config_file_path = os.path.join(device_folder_path, DEVICE_CONFIG)
with open(config_file_path, 'w', encoding='utf-8') as config_file:
config_file.writelines(json.dumps(device.to_config_json(), indent=4))
@@ -282,23 +332,12 @@ def create_device(self, device: Device):
return device.to_config_json()
- def save_device(self, device: Device, device_json):
+ def save_device(self, device: Device):
"""Edit and save an existing device config."""
- # Update device properties
- device.manufacturer = device_json['manufacturer']
- device.model = device_json['model']
-
- if 'test_modules' in device_json:
- device.test_modules = device_json['test_modules']
- else:
- device.test_modules = {}
-
# Obtain the config file path
- config_file_path = os.path.join(root_dir,
- LOCAL_DEVICES_DIR,
- device.device_folder,
- DEVICE_CONFIG)
+ config_file_path = os.path.join(self._root_dir, LOCAL_DEVICES_DIR,
+ device.device_folder, DEVICE_CONFIG)
with open(config_file_path, 'w+', encoding='utf-8') as config_file:
config_file.writelines(json.dumps(device.to_config_json(), indent=4))
@@ -311,9 +350,8 @@ def save_device(self, device: Device, device_json):
def delete_device(self, device: Device):
# Obtain the config file path
- device_folder = os.path.join(root_dir,
- LOCAL_DEVICES_DIR,
- device.device_folder)
+ device_folder = os.path.join(self._root_dir, LOCAL_DEVICES_DIR,
+ device.device_folder)
# Delete the device directory
shutil.rmtree(device_folder)
@@ -328,17 +366,13 @@ def start(self):
self._start_network()
self.get_net_orc().get_listener().register_callback(
- self._device_discovered,
- [NetworkEvent.DEVICE_DISCOVERED]
- )
+ self._device_discovered, [NetworkEvent.DEVICE_DISCOVERED])
if self._net_only:
LOGGER.info('Network only option configured, no tests will be run')
else:
self.get_net_orc().get_listener().register_callback(
- self._device_stable,
- [NetworkEvent.DEVICE_STABLE]
- )
+ self._device_stable, [NetworkEvent.DEVICE_STABLE])
self.get_net_orc().start_listener()
LOGGER.info('Waiting for devices on the network...')
@@ -349,15 +383,21 @@ def start(self):
def stop(self):
+ # First, change the status to stopping
+ self.get_session().stop()
+
# Prevent discovering new devices whilst stopping
if self.get_net_orc().get_listener() is not None:
self.get_net_orc().get_listener().stop_listener()
- self.get_session().stop()
-
self._stop_tests()
+
+ self.get_session().set_status(TestrunStatus.CANCELLED)
+
+ # Disconnect before WS server stops to prevent error
+ self._mqtt_client.disconnect()
+
self._stop_network(kill=True)
- self.get_session().set_status('Cancelled')
def _register_exits(self):
signal.signal(signal.SIGINT, self._exit_handler)
@@ -369,6 +409,7 @@ def shutdown(self):
LOGGER.info('Shutting down Testrun')
self.stop()
self._stop_ui()
+ self._stop_ws()
def _exit_handler(self, signum, arg): # pylint: disable=unused-argument
LOGGER.debug('Exit signal received: ' + str(signum))
@@ -380,7 +421,7 @@ def _exit_handler(self, signum, arg): # pylint: disable=unused-argument
def _get_config_abs(self, config_file=None):
if config_file is None:
# If not defined, use relative pathing to local file
- config_file = os.path.join(root_dir, self._config_file)
+ config_file = os.path.join(self._root_dir, self._config_file)
# Expand the config file to absolute pathing
return os.path.abspath(config_file)
@@ -406,6 +447,9 @@ def _stop_network(self, kill=True):
def _stop_tests(self):
self._test_orc.stop()
+ def get_mqtt_client(self):
+ return self._mqtt_client
+
def get_device(self, mac_addr):
"""Returns a loaded device object from the device mac address."""
for device in self.get_session().get_device_repository():
@@ -435,16 +479,17 @@ def _device_discovered(self, mac_addr):
def _device_stable(self, mac_addr):
# Do not continue testing if Testrun has cancelled during monitor phase
- if self.get_session().get_status() == 'Cancelled':
+ if self.get_session().get_status() == TestrunStatus.CANCELLED:
self._stop_network()
return
LOGGER.info(f'Device with mac address {mac_addr} is ready for testing.')
- self._set_status('In Progress')
+ self._set_status(TestrunStatus.IN_PROGRESS)
result = self._test_orc.run_test_modules()
if result is not None:
self._set_status(result)
+
self._stop_network()
def get_session(self):
@@ -462,16 +507,12 @@ def start_ui(self):
client = docker.from_env()
try:
- client.containers.run(
- image='test-run/ui',
- auto_remove=True,
- name='tr-ui',
- hostname='testrun.io',
- detach=True,
- ports={
- '80': 8080
- }
- )
+ client.containers.run(image='testrun/ui',
+ auto_remove=True,
+ name='tr-ui',
+ hostname='testrun.io',
+ detach=True,
+ ports={'80': 8080})
except ImageNotFound as ie:
LOGGER.error('An error occured whilst starting the UI. ' +
'Please investigate and try again.')
@@ -489,4 +530,37 @@ def _stop_ui(self):
if container is not None:
container.kill()
except docker.errors.NotFound:
- return
+ pass
+
+ def start_ws(self):
+
+ self._stop_ws()
+
+ LOGGER.info('Starting WS server')
+
+ client = docker.from_env()
+
+ try:
+ client.containers.run(image='testrun/ws',
+ auto_remove=True,
+ name='tr-ws',
+ detach=True,
+ ports={
+ '9001': 9001,
+ '1883': 1883
+ })
+ except ImageNotFound as ie:
+ LOGGER.error('An error occured whilst starting the websockets server. ' +
+ 'Please investigate and try again.')
+ LOGGER.error(ie)
+ sys.exit(1)
+
+ def _stop_ws(self):
+ LOGGER.info('Stopping websockets server')
+ client = docker.from_env()
+ try:
+ container = client.containers.get('tr-ws')
+ if container is not None:
+ container.kill()
+ except docker.errors.NotFound:
+ pass
diff --git a/framework/python/src/net_orc/ip_control.py b/framework/python/src/net_orc/ip_control.py
index 506b23a95..aa07283af 100644
--- a/framework/python/src/net_orc/ip_control.py
+++ b/framework/python/src/net_orc/ip_control.py
@@ -12,9 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""IP Control Module"""
+import psutil
+import typing as t
from common import logger
from common import util
import re
+import socket
LOGGER = logger.get_logger('ip_ctrl')
@@ -43,10 +46,7 @@ def add_namespace(self, namespace):
def check_interface_status(self, interface_name):
output = util.run_command(cmd=f'ip link show {interface_name}', output=True)
- if 'state DOWN ' in output[0]:
- return False
- else:
- return True
+ return 'state UP ' in output[0]
def delete_link(self, interface_name):
"""Delete an ip link"""
@@ -89,6 +89,16 @@ def get_iface_connection_stats(self, iface):
else:
return None
+ @staticmethod
+ def get_iface_mac_address(iface):
+ net_if_addrs = psutil.net_if_addrs()
+ if iface in net_if_addrs:
+ for addr_info in net_if_addrs[iface]:
+ # AF_LINK corresponds to the MAC address
+ if addr_info.family == psutil.AF_LINK:
+ return addr_info.address
+ return None
+
def get_iface_port_stats(self, iface):
"""Extract information about packets connection"""
response = util.run_command(f'ethtool -S {iface}')
@@ -97,9 +107,17 @@ def get_iface_port_stats(self, iface):
else:
return None
+ def get_ip_address(self, iface):
+ addrs = psutil.net_if_addrs()
+ if iface in addrs:
+ for addr in addrs[iface]:
+ if addr.family == socket.AF_INET:
+ return addr.address
+ return None
+
def get_namespaces(self):
result = util.run_command('ip netns list')
- #Strip ID's from the namespace results
+ # Strip ID's from the namespace results
namespaces = re.findall(r'(\S+)(?:\s+\(id: \d+\))?', result[0])
return namespaces
@@ -237,3 +255,30 @@ def configure_container_interface(self,
LOGGER.error(f'Failed to set interface up {namespace_intf}')
return False
return True
+
+ def ping_via_gateway(self, host):
+ """Ping the host trough the gateway container"""
+ command = f'timeout 3 docker exec tr-ct-gateway ping -W 1 -c 1 {host}'
+ output = util.run_command(command)
+ if '0% packet loss' in output[0]:
+ return True
+ return False
+
+ @staticmethod
+ def get_sys_interfaces() -> t.Dict[str, t.Dict[str, str]]:
+ """ Retrieves all Ethernet network interfaces from the host system
+ Returns:
+ t.Dict[str, str]
+ """
+ addrs = psutil.net_if_addrs()
+ ifaces = {}
+
+ for key in addrs:
+ nic = addrs[key]
+ # Ignore any interfaces that are not ethernet
+ if not (key.startswith('en') or key.startswith('eth')):
+ continue
+
+ ifaces[key] = nic[0].address
+
+ return ifaces
diff --git a/framework/python/src/net_orc/network_orchestrator.py b/framework/python/src/net_orc/network_orchestrator.py
index f20093a28..b8e7befd2 100644
--- a/framework/python/src/net_orc/network_orchestrator.py
+++ b/framework/python/src/net_orc/network_orchestrator.py
@@ -17,18 +17,20 @@
import json
import os
from scapy.all import sniff, wrpcap, BOOTP, AsyncSniffer
+from scapy.error import Scapy_Exception
import shutil
import subprocess
import sys
-import docker
import time
-from docker.types import Mount
-from common import logger, util
+import traceback
+from common import logger, util, mqtt
+from common.statuses import TestrunStatus
from net_orc.listener import Listener
from net_orc.network_event import NetworkEvent
from net_orc.network_validator import NetworkValidator
from net_orc.ovs_control import OVSControl
from net_orc.ip_control import IPControl
+from core.docker.network_docker_module import NetworkModule
LOGGER = logger.get_logger('net_orc')
RUNTIME_DIR = 'runtime'
@@ -66,6 +68,10 @@ def __init__(self, session):
self._ovs = OVSControl(self._session)
self._ip_ctrl = IPControl()
+ # Load subnet information into the session
+ self._session.set_subnets(self.network_config.ipv4_network,
+ self.network_config.ipv6_network)
+
def start(self):
"""Start the network orchestrator."""
@@ -137,6 +143,9 @@ def start_network(self):
# Get network ready (via Network orchestrator)
LOGGER.debug('Network is ready')
+ def get_ip_address(self, iface):
+ return self._ip_ctrl.get_ip_address(iface)
+
def get_listener(self):
return self._listener
@@ -190,7 +199,7 @@ def _device_discovered(self, mac_addr):
test_dir = os.path.join(RUNTIME_DIR, TEST_DIR)
device_tests = os.listdir(test_dir)
for device_test in device_tests:
- device_test_path = os.path.join(RUNTIME_DIR,TEST_DIR,device_test)
+ device_test_path = os.path.join(RUNTIME_DIR, TEST_DIR, device_test)
if os.path.isdir(device_test_path):
shutil.rmtree(device_test_path, ignore_errors=True)
@@ -215,7 +224,7 @@ def _device_discovered(self, mac_addr):
if device.ip_addr is None:
LOGGER.info(
f'Timed out whilst waiting for {mac_addr} to obtain an IP address')
- self._session.set_status('Cancelled')
+ self._session.set_status(TestrunStatus.CANCELLED)
return
LOGGER.info(
f'Device with mac addr {device.mac_addr} has obtained IP address '
@@ -223,7 +232,9 @@ def _device_discovered(self, mac_addr):
#self._ovs.add_arp_inspection_filter(ip_address=device.ip_addr,
# mac_address=device.mac_addr)
- self._start_device_monitor(device)
+ # Don't monitor devices when in network only mode
+ if 'net_only' not in self._session.get_runtime_params():
+ self._start_device_monitor(device)
def _get_conn_stats(self):
""" Extract information about the physical connection
@@ -273,7 +284,7 @@ def _dhcp_lease_ack(self, packet):
def _start_device_monitor(self, device):
"""Start a timer until the steady state has been reached and
callback the steady state method for this device."""
- self.get_session().set_status('Monitoring')
+ self.get_session().set_status(TestrunStatus.MONITORING)
self._monitor_packets = []
LOGGER.info(f'Monitoring device with mac addr {device.mac_addr} '
f'for {str(self._session.get_monitor_period())} seconds')
@@ -290,15 +301,22 @@ def _start_device_monitor(self, device):
time.sleep(1)
# Check Testrun hasn't been cancelled
- if self._session.get_status() == 'Cancelled':
+ if self._session.get_status() in (
+ TestrunStatus.STOPPING,
+ TestrunStatus.CANCELLED
+ ):
sniffer.stop()
return
if not self._ip_ctrl.check_interface_status(
self._session.get_device_interface()):
- sniffer.stop()
- self._session.set_status('Cancelled')
- LOGGER.error('Device interface disconnected, cancelling Testrun')
+ try:
+ sniffer.stop()
+ except Scapy_Exception:
+ LOGGER.error('Device adapter disconnected whilst monitoring.')
+ finally:
+ self._session.set_status(TestrunStatus.CANCELLED)
+ LOGGER.error('Device interface disconnected, cancelling Testrun')
LOGGER.debug('Writing packets to monitor.pcap')
wrpcap(os.path.join(device_runtime_dir, 'monitor.pcap'),
@@ -330,26 +348,6 @@ def _ping(self, net_module):
success = util.run_command(cmd, output=False)
return success
- def _create_private_net(self):
- client = docker.from_env()
- try:
- network = client.networks.get(PRIVATE_DOCKER_NET)
- network.remove()
- except docker.errors.NotFound:
- pass
-
- # TODO: These should be made into variables
- ipam_pool = docker.types.IPAMPool(subnet='100.100.0.0/16',
- iprange='100.100.100.0/24')
-
- ipam_config = docker.types.IPAMConfig(pool_configs=[ipam_pool])
-
- client.networks.create(PRIVATE_DOCKER_NET,
- ipam=ipam_config,
- internal=True,
- check_duplicate=True,
- driver='macvlan')
-
def _ci_pre_network_create(self):
""" Stores network properties to restore network after
network creation and flushes internet interface
@@ -436,79 +434,33 @@ def load_network_modules(self):
for module_dir in os.listdir(net_modules_dir):
- if self._get_network_module(module_dir) is None:
+ if (self._get_network_module(module_dir) is None and
+ module_dir != 'template'):
loaded_module = self._load_network_module(module_dir)
loaded_modules += loaded_module.dir_name + ' '
LOGGER.info(loaded_modules)
def _load_network_module(self, module_dir):
+ """Import module configuration from module_config.json."""
- net_modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR)
+ # Make sure we only load each module once since some modules will
+ # depend on the same module
+ if not any(m.dir_name == module_dir for m in self._net_modules):
- net_module = NetworkModule()
-
- # Load module information
- with open(os.path.join(self._path, net_modules_dir, module_dir,
- NETWORK_MODULE_METADATA),
- 'r',
- encoding='UTF-8') as module_file_open:
- net_module_json = json.load(module_file_open)
-
- net_module.name = net_module_json['config']['meta']['name']
- net_module.display_name = net_module_json['config']['meta']['display_name']
- net_module.description = net_module_json['config']['meta']['description']
- net_module.dir = os.path.join(self._path, net_modules_dir, module_dir)
- net_module.dir_name = module_dir
- net_module.build_file = module_dir + '.Dockerfile'
- net_module.container_name = 'tr-ct-' + net_module.dir_name
- net_module.image_name = 'test-run/' + net_module.dir_name
-
- # Attach folder mounts to network module
- if 'docker' in net_module_json['config']:
-
- if 'mounts' in net_module_json['config']['docker']:
- for mount_point in net_module_json['config']['docker']['mounts']:
- net_module.mounts.append(
- Mount(target=mount_point['target'],
- source=os.path.join(os.getcwd(), mount_point['source']),
- type='bind'))
-
- if 'depends_on' in net_module_json['config']['docker']:
- depends_on_module = net_module_json['config']['docker']['depends_on']
- if self._get_network_module(depends_on_module) is None:
- self._load_network_module(depends_on_module)
-
- # Determine if this is a container or just an image/template
- if 'enable_container' in net_module_json['config']['docker']:
- net_module.enable_container = net_module_json['config']['docker'][
- 'enable_container']
-
- # Determine if this is a template
- if 'template' in net_module_json['config']['docker']:
- net_module.template = net_module_json['config']['docker']['template']
-
- # Load network service networking configuration
- if net_module.enable_container:
-
- net_module.net_config.enable_wan = net_module_json['config']['network'][
- 'enable_wan']
- net_module.net_config.ip_index = net_module_json['config']['network'][
- 'ip_index']
-
- net_module.net_config.host = False if not 'host' in net_module_json[
- 'config']['network'] else net_module_json['config']['network']['host']
-
- net_module.net_config.ipv4_address = self.network_config.ipv4_network[
- net_module.net_config.ip_index]
- net_module.net_config.ipv4_network = self.network_config.ipv4_network
-
- net_module.net_config.ipv6_address = self.network_config.ipv6_network[
- net_module.net_config.ip_index]
- net_module.net_config.ipv6_network = self.network_config.ipv6_network
-
- self._net_modules.append(net_module)
- return net_module
+ LOGGER.debug(f'Loading network module {module_dir}')
+
+ modules_dir = os.path.join(self._path, NETWORK_MODULES_DIR)
+
+ module_conf_file = os.path.join(self._path, modules_dir, module_dir,
+ NETWORK_MODULE_METADATA)
+
+ module = NetworkModule(module_conf_file, self._session)
+ if module.depends_on is not None:
+ self._load_network_module(module.depends_on)
+ self._net_modules.append(module)
+
+ return module
def build_network_modules(self):
LOGGER.info('Building network modules...')
@@ -518,12 +470,7 @@ def build_network_modules(self):
def _build_module(self, net_module):
LOGGER.debug('Building network module ' + net_module.dir_name)
- client = docker.from_env()
- client.images.build(dockerfile=os.path.join(net_module.dir,
- net_module.build_file),
- path=self._path,
- forcerm=True,
- tag='test-run/' + net_module.dir_name)
+ net_module.build()
def _get_network_module(self, name):
for net_module in self._net_modules:
@@ -535,65 +482,17 @@ def _get_network_module(self, name):
def _start_network_service(self, net_module):
LOGGER.debug('Starting network service ' + net_module.display_name)
- network = 'host' if net_module.net_config.host else PRIVATE_DOCKER_NET
+ network = 'host' if net_module.net_config.host else 'bridge'
LOGGER.debug(f"""Network: {network}, image name: {net_module.image_name},
container name: {net_module.container_name}""")
- try:
- client = docker.from_env()
- net_module.container = client.containers.run(
- net_module.image_name,
- auto_remove=True,
- cap_add=['NET_ADMIN'],
- name=net_module.container_name,
- hostname=net_module.container_name,
- # Undetermined version of docker seems to have broken
- # DNS configuration (/etc/resolv.conf) Re-add when/if
- # this network is utilized and DNS issue is resolved
- #network=PRIVATE_DOCKER_NET,
- network_mode='none',
- privileged=True,
- detach=True,
- mounts=net_module.mounts,
- environment={
- 'TZ': self.get_session().get_timezone(),
- 'HOST_USER': util.get_host_user()
- })
- except docker.errors.ContainerError as error:
- LOGGER.error('Container run error')
- LOGGER.error(error)
-
- if network != 'host':
+ net_module.start()
+ if net_module.get_network() != 'host':
self._attach_service_to_network(net_module)
def _stop_service_module(self, net_module, kill=False):
LOGGER.debug('Stopping network container ' + net_module.container_name)
- try:
- container = self._get_service_container(net_module)
- if container is not None:
- if kill:
- LOGGER.debug('Killing container: ' + net_module.container_name)
- container.kill()
- else:
- LOGGER.debug('Stopping container: ' + net_module.container_name)
- container.stop()
- LOGGER.debug('Container stopped: ' + net_module.container_name)
- except Exception as error: # pylint: disable=W0703
- LOGGER.error('Container stop error')
- LOGGER.error(error)
-
- def _get_service_container(self, net_module):
- LOGGER.debug('Resolving service container: ' + net_module.container_name)
- container = None
- try:
- client = docker.from_env()
- container = client.containers.get(net_module.container_name)
- except docker.errors.NotFound:
- LOGGER.debug('Container ' + net_module.container_name + ' not found')
- except Exception as e: # pylint: disable=W0703
- LOGGER.error('Failed to resolve container')
- LOGGER.error(e)
- return container
+ net_module.stop(kill=kill)
def stop_networking_services(self, kill=False):
LOGGER.info('Stopping network services')
@@ -758,13 +657,10 @@ def restore_net(self):
if self.get_listener() is not None and self.get_listener().is_running():
self.get_listener().stop_listener()
- client = docker.from_env()
-
# Stop all network containers if still running
for net_module in self._net_modules:
try:
- container = client.containers.get('tr-ct-' + net_module.dir_name)
- container.kill()
+ net_module.stop(kill=True)
except Exception: # pylint: disable=W0703
continue
@@ -786,53 +682,52 @@ def restore_net(self):
def get_session(self):
return self._session
-
-class NetworkModule:
- """Define all the properties of a Network Module"""
-
- def __init__(self):
- self.name = None
- self.display_name = None
- self.description = None
-
- self.container = None
- self.container_name = None
- self.image_name = None
- self.template = False
-
- # Absolute path
- self.dir = None
- self.dir_name = None
- self.build_file = None
- self.mounts = []
-
- self.enable_container = True
-
- self.net_config = NetworkModuleNetConfig()
-
-
-class NetworkModuleNetConfig:
- """Define all the properties of the network config
- for a network module"""
-
- def __init__(self):
-
- self.enable_wan = False
-
- self.ip_index = 0
- self.ipv4_address = None
- self.ipv4_network = None
- self.ipv6_address = None
- self.ipv6_network = None
-
- self.host = False
-
- def get_ipv4_addr_with_prefix(self):
- return format(self.ipv4_address) + '/' + str(self.ipv4_network.prefixlen)
-
- def get_ipv6_addr_with_prefix(self):
- return format(self.ipv6_address) + '/' + str(self.ipv6_network.prefixlen)
-
+ def network_adapters_checker(self, mqtt_client: mqtt.MQTT, topic: str):
+ """Checks for changes in network adapters
+ and sends a message to the frontend
+ """
+ try:
+ adapters = self._session.detect_network_adapters_change()
+ if adapters:
+ mqtt_client.send_message(topic, adapters)
+ except Exception: # pylint: disable=W0703
+ LOGGER.error(traceback.format_exc())
+
+ def is_device_connected(self):
+ """Check if device connected"""
+ return self._ip_ctrl.check_interface_status(
+ self._session.get_device_interface()
+ )
+
+ def internet_conn_checker(self, mqtt_client: mqtt.MQTT, topic: str):
+ """Checks internet connection and sends a status to frontend"""
+
+ # Default message
+ message = {'connection': False}
+
+ # Only check if Testrun is running
+ if self.get_session().get_status() not in [
+ TestrunStatus.WAITING_FOR_DEVICE,
+ TestrunStatus.MONITORING,
+ TestrunStatus.IN_PROGRESS
+ ]:
+ message['connection'] = None
+
+ # Only run if single intf mode not used
+ elif 'single_intf' not in self._session.get_runtime_params():
+ iface = self._session.get_internet_interface()
+
+ # Check that an internet intf has been selected
+ if iface and iface in self._ip_ctrl.get_sys_interfaces():
+
+ # Ping google.com from gateway container
+ internet_connection = self._ip_ctrl.ping_via_gateway('google.com')
+
+ if internet_connection:
+ message['connection'] = True
+
+ # Broadcast via MQTT client
+ mqtt_client.send_message(topic, message)
class NetworkConfig:
"""Define all the properties of the network configuration"""
diff --git a/framework/python/src/net_orc/network_validator.py b/framework/python/src/net_orc/network_validator.py
index df9b96b1d..d760970a3 100644
--- a/framework/python/src/net_orc/network_validator.py
+++ b/framework/python/src/net_orc/network_validator.py
@@ -106,7 +106,7 @@ def _load_devices(self):
device.dir_name = module_dir
device.build_file = module_dir + '.Dockerfile'
device.container_name = 'tr-ct-' + device.dir_name
- device.image_name = 'test-run/' + device.dir_name
+ device.image_name = 'testrun/' + device.dir_name
runtime_source = os.path.join(os.getcwd(), OUTPUT_DIR, device.name)
conf_source = os.path.join(os.getcwd(), CONF_DIR)
diff --git a/framework/python/src/test_orc/test_case.py b/framework/python/src/test_orc/test_case.py
index cf0d6593a..6f4e3434b 100644
--- a/framework/python/src/test_orc/test_case.py
+++ b/framework/python/src/test_orc/test_case.py
@@ -14,6 +14,7 @@
"""Represents an individual test case."""
from dataclasses import dataclass, field
+from common.statuses import TestResult
@dataclass
@@ -24,25 +25,25 @@ class TestCase: # pylint: disable=too-few-public-methods,too-many-instance-attr
description: str = ""
expected_behavior: str = ""
required_result: str = "Recommended"
- result: str = "Non-Compliant"
+ result: str = TestResult.NON_COMPLIANT
recommendations: list = field(default_factory=lambda: [])
+ optional_recommendations: list = field(default_factory=lambda: [])
def to_dict(self):
+ test_dict = {
+ "name": self.name,
+ "description": self.description,
+ "expected_behavior": self.expected_behavior,
+ "required_result": self.required_result,
+ "result": self.result
+ }
+
if self.recommendations is not None and len(self.recommendations) > 0:
- return {
- "name": self.name,
- "description": self.description,
- "expected_behavior": self.expected_behavior,
- "required_result": self.required_result,
- "result": self.result,
- "recommendations": self.recommendations
- }
-
- return {
- "name": self.name,
- "description": self.description,
- "expected_behavior": self.expected_behavior,
- "required_result": self.required_result,
- "result": self.result
- }
+ test_dict["recommendations"] = self.recommendations
+
+ if (self.optional_recommendations is not None
+ and len(self.optional_recommendations) > 0):
+ test_dict["optional_recommendations"] = self.optional_recommendations
+
+ return test_dict
diff --git a/framework/python/src/test_orc/test_orchestrator.py b/framework/python/src/test_orc/test_orchestrator.py
index d38f888a1..d133fefd9 100644
--- a/framework/python/src/test_orc/test_orchestrator.py
+++ b/framework/python/src/test_orc/test_orchestrator.py
@@ -20,23 +20,32 @@
import shutil
import docker
from datetime import datetime
-from docker.types import Mount
from common import logger, util
from common.testreport import TestReport
-from test_orc.module import TestModule
+from common.statuses import TestrunStatus, TestResult
+from core.docker.test_docker_module import TestModule
from test_orc.test_case import TestCase
+from test_orc.test_pack import TestPack
import threading
+from typing import List
LOG_NAME = "test_orc"
LOGGER = logger.get_logger("test_orc")
+
RUNTIME_DIR = "runtime"
-RUNTIME_TEST_DIR = os.path.join(RUNTIME_DIR,"test")
+RESOURCES_DIR = "resources"
+
+RUNTIME_TEST_DIR = os.path.join(RUNTIME_DIR, "test")
+TEST_PACKS_DIR = os.path.join(RESOURCES_DIR, "test_packs")
+
TEST_MODULES_DIR = "modules/test"
MODULE_CONFIG = "conf/module_config.json"
-LOG_REGEX = r"^[A-Z][a-z]{2} [0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} test_"
+
SAVED_DEVICE_REPORTS = "report/{device_folder}/"
LOCAL_DEVICE_REPORTS = "local/devices/{device_folder}/reports"
DEVICE_ROOT_CERTS = "local/root_certs"
+
+LOG_REGEX = r"^[A-Z][a-z]{2} [0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} test_"
API_URL = "http://localhost:8000"
@@ -44,22 +53,25 @@ class TestOrchestrator:
"""Manages and controls the test modules."""
def __init__(self, session, net_orc):
- self._test_modules = []
+
+ self._test_modules: List[TestModule] = []
+ self._test_packs: List[TestPack] = []
+
self._container_logs = []
self._session = session
- self._api_url = (self._session.get_api_url() + ":" +
- str(self._session.get_api_port()))
+
+ self._api_url = (self.get_session().get_api_url() + ":" +
+ str(self.get_session().get_api_port()))
+
self._net_orc = net_orc
self._test_in_progress = False
- self._path = os.path.dirname(
- os.path.dirname(
- os.path.dirname(
- os.path.dirname(os.path.dirname(os.path.realpath(__file__))))))
self._root_path = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))))))
+ self._test_modules_running = []
+ self._current_module = 0
def start(self):
LOGGER.debug("Starting test orchestrator")
@@ -73,6 +85,7 @@ def start(self):
os.makedirs(DEVICE_ROOT_CERTS, exist_ok=True)
self._load_test_modules()
+ self._load_test_packs()
def stop(self):
"""Stop any running tests"""
@@ -82,39 +95,87 @@ def run_test_modules(self):
"""Iterates through each test module and starts the container."""
# Do not start test modules if status is not in progress, e.g. Stopping
- if self.get_session().get_status() != "In Progress":
+ if self.get_session().get_status() != TestrunStatus.IN_PROGRESS:
return
- device = self._session.get_target_device()
+ device = self.get_session().get_target_device()
+ test_pack_name = device.test_pack
+ test_pack = self.get_test_pack(test_pack_name)
+ LOGGER.debug("Using test pack " + test_pack.name)
+
self._test_in_progress = True
+
LOGGER.info(
f"Running test modules on device with mac addr {device.mac_addr}")
test_modules = []
+
for module in self._test_modules:
+ # Ignore test modules that are just base images etc
if module is None or not module.enable_container:
continue
+ # Ignore test modules that are disabled for this device
if not self._is_module_enabled(module, device):
continue
+ num_tests = 0
+
+ # Add module to list of modules to run
test_modules.append(module)
- self.get_session().add_total_tests(len(module.tests))
- for module in test_modules:
+ for test in module.tests:
+
+ # Duplicate test obj so we don't alter the source
+ test_copy = copy.deepcopy(test)
+
+ # Do not add test if it is not enabled
+ if not self._is_test_enabled(test_copy.name, device):
+ continue
+
+ # Set result to Not Started
+ test_copy.result = TestResult.NOT_STARTED
+
+ # We don't want steps to resolve for not started tests
+ if hasattr(test_copy, "recommendations"):
+ test_copy.recommendations = None
+
+ # Set the required result from the correct test pack
+ required_result = test_pack.get_required_result(test.name)
+
+ test_copy.required_result = required_result
+
+ # Add test result to the session
+ self.get_session().add_test_result(test_copy)
+
+ # Increment number of tests being run by this module
+ num_tests += 1
+
+ # Increment number of tests that will be run
+ self.get_session().add_total_tests(num_tests)
+
+ # Store enabled test modules in the TestOrchectrator object
+ self._test_modules_running = test_modules
+ self._current_module = 0
+
+ for index, module in enumerate(test_modules):
+
+ self._current_module = index
self._run_test_module(module)
LOGGER.info("All tests complete")
- self._session.finish()
+ self.get_session().finish()
# Do not carry on (generating a report) if Testrun has been stopped
- if self.get_session().get_status() != "In Progress":
- return "Cancelled"
+ if self.get_session().get_status() != TestrunStatus.IN_PROGRESS:
+ return TestrunStatus.CANCELLED
report = TestReport()
- report.from_json(self._generate_report())
+
+ generated_report_json = self._generate_report()
+ report.from_json(generated_report_json)
report.add_module_reports(self.get_session().get_module_reports())
device.add_report(report)
@@ -122,6 +183,19 @@ def run_test_modules(self):
self._test_in_progress = False
self.get_session().set_report_url(report.get_report_url())
+ # Set testing description
+ test_pack: TestPack = self.get_test_pack(device.test_pack)
+
+ # Default message is empty (better than an error message).
+ # This should never be shown
+ message: str = ""
+ if report.get_status() == TestrunStatus.COMPLIANT:
+ message = test_pack.get_message("compliant_description")
+ elif report.get_status() == TestrunStatus.NON_COMPLIANT:
+ message = test_pack.get_message("non_compliant_description")
+
+ self.get_session().set_description(message)
+
# Move testing output from runtime to local device folder
self._timestamp_results(device)
@@ -136,7 +210,7 @@ def _write_reports(self, test_report):
out_dir = os.path.join(
self._root_path, RUNTIME_TEST_DIR,
- self._session.get_target_device().mac_addr.replace(":", ""))
+ self.get_session().get_target_device().mac_addr.replace(":", ""))
LOGGER.debug(f"Writing reports to {out_dir}")
@@ -157,9 +231,7 @@ def _write_reports(self, test_report):
def _generate_report(self):
report = {}
- report["testrun"] = {
- "version": self.get_session().get_version()
- }
+ report["testrun"] = {"version": self.get_session().get_version()}
report["mac_addr"] = self.get_session().get_target_device().mac_addr
report["device"] = self.get_session().get_target_device().to_dict()
@@ -178,16 +250,22 @@ def _generate_report(self):
return report
def _calculate_result(self):
- result = "Compliant"
- for test_result in self._session.get_test_results():
+ result = TestResult.COMPLIANT
+ for test_result in self.get_session().get_test_results():
+
# Check Required tests
if (test_result.required_result.lower() == "required"
- and test_result.result.lower() != "compliant"):
- result = "Non-Compliant"
+ and test_result.result not in [
+ TestResult.COMPLIANT,
+ TestResult.ERROR
+ ]):
+ result = TestResult.NON_COMPLIANT
+
# Check Required if Applicable tests
elif (test_result.required_result.lower() == "required if applicable"
- and test_result.result.lower() == "non-compliant"):
- result = "Non-Compliant"
+ and test_result.result == TestResult.NON_COMPLIANT):
+ result = TestResult.NON_COMPLIANT
+
return result
def _cleanup_old_test_results(self, device):
@@ -195,7 +273,7 @@ def _cleanup_old_test_results(self, device):
if device.max_device_reports is not None:
max_device_reports = device.max_device_reports
else:
- max_device_reports = self._session.get_max_device_reports()
+ max_device_reports = self.get_session().get_max_device_reports()
if max_device_reports > 0:
completed_results_dir = os.path.join(
@@ -270,25 +348,20 @@ def _timestamp_results(self, device):
return completed_results_dir
- def zip_results(self,
- device,
- timestamp,
- profile):
+ def zip_results(self, device, timestamp, profile):
try:
LOGGER.debug("Archiving test results")
- src_path = os.path.join(LOCAL_DEVICE_REPORTS.replace(
- "{device_folder}",
- device.device_folder),
- timestamp)
+ src_path = os.path.join(
+ LOCAL_DEVICE_REPORTS.replace("{device_folder}", device.device_folder),
+ timestamp)
# Define temp directory to store files before zipping
results_dir = os.path.join(f"/tmp/testrun/{time.time()}")
# Define where to save the zip file
- zip_location = os.path.join("/tmp/testrun",
- timestamp)
+ zip_location = os.path.join("/tmp/testrun", timestamp)
# Delete zip_temp if it already exists
if os.path.exists(results_dir):
@@ -298,16 +371,13 @@ def zip_results(self,
if os.path.exists(zip_location + ".zip"):
os.remove(zip_location + ".zip")
- shutil.copytree(src_path,results_dir)
+ shutil.copytree(src_path, results_dir)
# Include profile if specified
if profile is not None:
- LOGGER.debug(
- f"Copying profile {profile.name} to results directory")
+ LOGGER.debug(f"Copying profile {profile.name} to results directory")
shutil.copy(profile.get_file_path(),
- os.path.join(
- results_dir,
- "profile.json"))
+ os.path.join(results_dir, "profile.json"))
with open(os.path.join(results_dir, "profile.pdf"), "wb") as f:
f.write(profile.to_pdf(device).getvalue())
@@ -320,14 +390,13 @@ def zip_results(self,
# Check that the ZIP was successfully created
zip_file = zip_location + ".zip"
- LOGGER.info(f'''Archive {'created at ' + zip_file
+ LOGGER.info(f"""Archive {"created at " + zip_file
if os.path.exists(zip_file)
- else'creation failed'}''')
-
+ else "creation failed"}""")
return zip_file
- except Exception as error: # pylint: disable=W0703
+ except Exception as error: # pylint: disable=W0703
LOGGER.error("Failed to create zip file")
LOGGER.debug(error)
return None
@@ -339,6 +408,7 @@ def _is_module_enabled(self, module, device):
# Enable module as fallback
enabled = True
+
if device.test_modules is not None:
test_modules = device.test_modules
if module.name in test_modules:
@@ -350,99 +420,50 @@ def _is_module_enabled(self, module, device):
return enabled
+ def _is_test_enabled(self, test, device):
+
+ test_pack_name = device.test_pack
+ test_pack = self.get_test_pack(test_pack_name)
+
+ return test_pack.get_test(test) is not None
+
def _run_test_module(self, module):
"""Start the test container and extract the results."""
# Check that Testrun is not stopping
- if self.get_session().get_status() != "In Progress":
+ if self.get_session().get_status() != TestrunStatus.IN_PROGRESS:
return
- device = self._session.get_target_device()
+ device = self.get_session().get_target_device()
LOGGER.info(f"Running test module {module.name}")
# Get all tests to be executed and set to in progress
- for test in module.tests:
+ for current_test, test in enumerate(module.tests):
+ # Check that device is connected
+ if not self._net_orc.is_device_connected():
+ LOGGER.error("Device was disconnected")
+ self._set_test_modules_error(current_test)
+ self.get_session().set_status(TestrunStatus.CANCELLED)
+ return
+
+ # Copy the test so we don't alter the source
test_copy = copy.deepcopy(test)
- test_copy.result = "In Progress"
+
+ # Update test status to in progress
+ test_copy.result = TestResult.IN_PROGRESS
# We don't want steps to resolve for in progress tests
if hasattr(test_copy, "recommendations"):
test_copy.recommendations = None
- self.get_session().add_test_result(test_copy)
+ # Only add/update the test if it is enabled
+ if self._is_test_enabled(test_copy.name, device):
+ self.get_session().add_test_result(test_copy)
- try:
-
- device_test_dir = os.path.join(self._root_path, RUNTIME_TEST_DIR,
- device.mac_addr.replace(":", ""))
-
- container_runtime_dir = os.path.join(device_test_dir, module.name)
- os.makedirs(container_runtime_dir, exist_ok=True)
-
- config_file = os.path.join(self._root_path, "local/system.json")
- root_certs_dir = os.path.join(self._root_path, "local/root_certs")
-
- container_log_file = os.path.join(container_runtime_dir, "module.log")
-
- network_runtime_dir = os.path.join(self._root_path, "runtime/network")
-
- device_startup_capture = os.path.join(device_test_dir, "startup.pcap")
- util.run_command(f"chown -R {self._host_user} {device_startup_capture}")
-
- device_monitor_capture = os.path.join(device_test_dir, "monitor.pcap")
- util.run_command(f"chown -R {self._host_user} {device_monitor_capture}")
-
- client = docker.from_env()
-
- module.container = client.containers.run(
- module.image_name,
- auto_remove=True,
- cap_add=["NET_ADMIN"],
- name=module.container_name,
- hostname=module.container_name,
- privileged=True,
- detach=True,
- mounts=[
- Mount(target="/testrun/system.json",
- source=config_file,
- type="bind",
- read_only=True),
- Mount(target="/testrun/root_certs",
- source=root_certs_dir,
- type="bind",
- read_only=True),
- Mount(target="/runtime/output",
- source=container_runtime_dir,
- type="bind"),
- Mount(target="/runtime/network",
- source=network_runtime_dir,
- type="bind",
- read_only=True),
- Mount(target="/runtime/device/startup.pcap",
- source=device_startup_capture,
- type="bind",
- read_only=True),
- Mount(target="/runtime/device/monitor.pcap",
- source=device_monitor_capture,
- type="bind",
- read_only=True)
- ],
- environment={
- "TZ": self.get_session().get_timezone(),
- "HOST_USER": self._host_user,
- "DEVICE_MAC": device.mac_addr,
- "IPV4_ADDR": device.ip_addr,
- "DEVICE_TEST_MODULES": json.dumps(device.test_modules),
- "IPV4_SUBNET": self._net_orc.network_config.ipv4_network,
- "IPV6_SUBNET": self._net_orc.network_config.ipv6_network
- })
- except (docker.errors.APIError,
- docker.errors.ContainerError) as container_error:
- LOGGER.error("Test module " + module.name + " has failed to start")
- LOGGER.debug(container_error)
- return
+ # Start the test module
+ module.start(device)
# Mount the test container to the virtual network if requried
if module.network:
@@ -451,7 +472,6 @@ def _run_test_module(self, module):
# Determine the module timeout time
test_module_timeout = time.time() + module.timeout
- status = self._get_module_status(module)
# Resolving container logs is blocking so we need to spawn a new thread
log_stream = module.container.logs(stream=True, stdout=True, stderr=True)
@@ -460,87 +480,86 @@ def _run_test_module(self, module):
log_thread.daemon = True
log_thread.start()
- while (status == "running" and self._session.get_status() == "In Progress"):
+ while (module.get_status() == "running"
+ and self.get_session().get_status() == TestrunStatus.IN_PROGRESS):
+
+ # Check that timeout has not exceeded
if time.time() > test_module_timeout:
LOGGER.error("Module timeout exceeded, killing module: " + module.name)
- self._stop_module(module=module, kill=True)
+ module.stop(kill=True)
break
- status = self._get_module_status(module)
# Save all container logs to file
- with open(container_log_file, "w", encoding="utf-8") as f:
+ with open(module.container_log_file, "w", encoding="utf-8") as f:
for line in self._container_logs:
f.write(line + "\n")
# Check that Testrun has not been stopped whilst this module was running
- if self.get_session().get_status() == "Stopping":
+ if self.get_session().get_status() == TestrunStatus.STOPPING:
# Discard results for this module
LOGGER.info(f"Test module {module.name} has forcefully quit")
return
- # Get test results from module
- container_runtime_dir = os.path.join(
- self._root_path,
- "runtime/test/" + device.mac_addr.replace(":", "") + "/" + module.name)
- results_file = f"{container_runtime_dir}/{module.name}-result.json"
+ results_file = f"{module.container_runtime_dir}/{module.name}-result.json"
try:
with open(results_file, "r", encoding="utf-8-sig") as f:
+
+ # Load results from JSON file
module_results_json = json.load(f)
module_results = module_results_json["results"]
for test_result in module_results:
- # Convert dict into TestCase object
+ # Convert dict from json into TestCase object
test_case = TestCase(
- name=test_result["name"],
- description=test_result["description"],
- expected_behavior=test_result["expected_behavior"],
- required_result=test_result["required_result"],
- result=test_result["result"])
- test_case.result=test_result["result"]
-
- if (test_case.result == "Non-Compliant" and
+ name=test_result["name"],
+ result=test_result["result"],
+ description=test_result["description"])
+
+ # Add steps to resolve if test is non-compliant
+ if (test_case.result == TestResult.NON_COMPLIANT and
"recommendations" in test_result):
test_case.recommendations = test_result["recommendations"]
else:
- test_case.recommendations = None
+ test_case.recommendations = []
- self._session.add_test_result(test_case)
+ self.get_session().add_test_result(test_case)
except (FileNotFoundError, PermissionError,
json.JSONDecodeError) as results_error:
LOGGER.error(
- f"Error occurred whilst obtaining results for module {module.name}")
+ f"Error occurred whilst obtaining results for module {module.name}")
LOGGER.error(results_error)
# Get the markdown report from the module if generated
- markdown_file = f"{container_runtime_dir}/{module.name}_report.md"
+ markdown_file = f"{module.container_runtime_dir}/{module.name}_report.md"
try:
with open(markdown_file, "r", encoding="utf-8") as f:
module_report = f.read()
- self._session.add_module_report(module_report)
+ self.get_session().add_module_report(module_report)
except (FileNotFoundError, PermissionError):
LOGGER.debug("Test module did not produce a markdown module report")
# Get the HTML report from the module if generated
- html_file = f"{container_runtime_dir}/{module.name}_report.html"
+ html_file = f"{module.container_runtime_dir}/{module.name}_report.html"
try:
with open(html_file, "r", encoding="utf-8") as f:
module_report = f.read()
LOGGER.debug(f"Adding module report for module {module.name}")
- self._session.add_module_report(module_report)
+ self.get_session().add_module_report(module_report)
except (FileNotFoundError, PermissionError):
LOGGER.debug("Test module did not produce a html module report")
LOGGER.info(f"Test module {module.name} has finished")
- # Resolve all current log data in the containers log_stream
- # this method is blocking so should be called in
- # a thread or within a proper blocking context
def _get_container_logs(self, log_stream):
+ """Resolve all current log data in the containers log_stream
+ this method is blocking so should be called in
+ a thread or within a proper blocking context"""
self._container_logs = []
for log_chunk in log_stream:
lines = log_chunk.decode("utf-8").splitlines()
+
# Process each line and strip blank space
processed_lines = [line.strip() for line in lines if line.strip()]
self._container_logs.extend(processed_lines)
@@ -574,12 +593,32 @@ def _get_module_container(self, module):
LOGGER.error(error)
return container
+ def _load_test_packs(self):
+
+ for test_pack_file in os.listdir(TEST_PACKS_DIR):
+
+ LOGGER.debug(f"Loading test pack {test_pack_file}")
+
+ with open(os.path.join(
+ self._root_path,
+ TEST_PACKS_DIR,
+ test_pack_file), encoding="utf-8") as f:
+ test_pack_json = json.load(f)
+
+ test_pack: TestPack = TestPack(
+ name = test_pack_json["name"],
+ tests = test_pack_json["tests"],
+ language = test_pack_json["language"]
+ )
+
+ self._test_packs.append(test_pack)
+
def _load_test_modules(self):
"""Load network modules from module_config.json."""
LOGGER.debug("Loading test modules from /" + TEST_MODULES_DIR)
loaded_modules = "Loaded the following test modules: "
- test_modules_dir = os.path.join(self._path, TEST_MODULES_DIR)
+ test_modules_dir = os.path.join(self._root_path, TEST_MODULES_DIR)
module_dirs = os.listdir(test_modules_dir)
# Check if the directory protocol exists and move it to the beginning
@@ -599,87 +638,42 @@ def _load_test_modules(self):
def _load_test_module(self, module_dir):
"""Import module configuration from module_config.json."""
- LOGGER.debug(f"Loading test module {module_dir}")
-
- modules_dir = os.path.join(self._path, TEST_MODULES_DIR)
-
- # Load basic module information
- module = TestModule()
- with open(os.path.join(self._path, modules_dir, module_dir, MODULE_CONFIG),
- encoding="UTF-8") as module_config_file:
- module_json = json.load(module_config_file)
-
- module.name = module_json["config"]["meta"]["name"]
- module.display_name = module_json["config"]["meta"]["display_name"]
- module.description = module_json["config"]["meta"]["description"]
-
- if "enabled" in module_json["config"]:
- module.enabled = module_json["config"]["enabled"]
-
- module.dir = os.path.join(self._path, modules_dir, module_dir)
- module.dir_name = module_dir
- module.build_file = module_dir + ".Dockerfile"
- module.container_name = "tr-ct-" + module.dir_name + "-test"
- module.image_name = "test-run/" + module.dir_name + "-test"
-
- # Load test cases
- if "tests" in module_json["config"]:
- module.total_tests = len(module_json["config"]["tests"])
- for test_case_json in module_json["config"]["tests"]:
- try:
- test_case = TestCase(
- name=test_case_json["name"],
- description=test_case_json["test_description"],
- expected_behavior=test_case_json["expected_behavior"],
- required_result=test_case_json["required_result"]
- )
-
- if "recommendations" in test_case_json:
- test_case.recommendations = test_case_json["recommendations"]
- module.tests.append(test_case)
- except Exception as error: # pylint: disable=W0718
- LOGGER.error("Failed to load test case. See error for details")
- LOGGER.error(error)
-
- if "timeout" in module_json["config"]["docker"]:
- module.timeout = module_json["config"]["docker"]["timeout"]
-
- # Determine if this is a container or just an image/template
- if "enable_container" in module_json["config"]["docker"]:
- module.enable_container = module_json["config"]["docker"][
- "enable_container"]
-
- # Determine if this module needs network access
- if "network" in module_json["config"]:
- module.network = module_json["config"]["network"]
-
- # Ensure container is built after any dependencies
- if "depends_on" in module_json["config"]["docker"]:
- depends_on_module = module_json["config"]["docker"]["depends_on"]
- if self._get_test_module(depends_on_module) is None:
- self._load_test_module(depends_on_module)
-
- self._test_modules.append(module)
- return module
-
- def build_test_modules(self):
- """Build all test modules."""
- LOGGER.info("Building test modules...")
- for module in self._test_modules:
- self._build_test_module(module)
-
- def _build_test_module(self, module):
- LOGGER.debug("Building docker image for module " + module.dir_name)
-
- client = docker.from_env()
- try:
- client.images.build(
- dockerfile=os.path.join(module.dir, module.build_file),
- path=self._path,
- forcerm=True, # Cleans up intermediate containers during build
- tag=module.image_name)
- except docker.errors.BuildError as error:
- LOGGER.error(error)
+ # Resolve the main docker interface (docker0) for host interaction
+ # Can't use device or internet iface since these are not in a stable
+ # state for this type of communication during testing but docker0 has
+ # to exist and should always be available
+ external_ip = self._net_orc.get_ip_address("docker0")
+ extra_hosts = {
+ "external.localhost": external_ip
+ } if external_ip is not None else {}
+
+ # Make sure we only load each module once since some modules will
+ # depend on the same module
+ if not any(m.dir_name == module_dir for m in self._test_modules):
+
+ modules_dir = os.path.join(self._root_path, TEST_MODULES_DIR)
+
+ module_conf_file = os.path.join(self._root_path, modules_dir, module_dir,
+ MODULE_CONFIG)
+
+ module = TestModule(module_conf_file,
+ self,
+ self.get_session(),
+ extra_hosts)
+ if module.depends_on is not None:
+ self._load_test_module(module.depends_on)
+ self._test_modules.append(module)
+
+ return module
+
+ def get_test_packs(self) -> List[TestPack]:
+ return self._test_packs
+
+ def get_test_pack(self, name: str) -> TestPack:
+ for test_pack in self._test_packs:
+ if test_pack.name.lower() == name.lower():
+ return test_pack
+ return None
def _stop_modules(self, kill=False):
LOGGER.info("Stopping test modules")
@@ -692,18 +686,7 @@ def _stop_modules(self, kill=False):
def _stop_module(self, module, kill=False):
LOGGER.debug("Stopping test module " + module.container_name)
- try:
- container = module.container
- if container is not None:
- if kill:
- LOGGER.debug("Killing container:" + module.container_name)
- container.kill()
- else:
- LOGGER.debug("Stopping container:" + module.container_name)
- container.stop()
- LOGGER.debug("Container stopped:" + module.container_name)
- except docker.errors.NotFound:
- pass
+ module.stop(kill=kill)
def get_test_modules(self):
return self._test_modules
@@ -729,3 +712,12 @@ def get_test_case(self, name):
def get_session(self):
return self._session
+
+ def _set_test_modules_error(self, current_test):
+ """Set all remaining tests to error"""
+ for i in range(self._current_module, len(self._test_modules_running)):
+ start_idx = current_test if i == self._current_module else 0
+ for j in range(start_idx, len(self._test_modules_running[i].tests)):
+ self.get_session().set_test_result_error(
+ self._test_modules_running[i].tests[j]
+ )
diff --git a/framework/python/src/test_orc/test_pack.py b/framework/python/src/test_orc/test_pack.py
new file mode 100644
index 000000000..a2e7c5f97
--- /dev/null
+++ b/framework/python/src/test_orc/test_pack.py
@@ -0,0 +1,58 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Represents a testing pack."""
+from typing import List, Dict
+from dataclasses import dataclass, field
+from collections import defaultdict
+
+
+@dataclass
+class TestPack: # pylint: disable=too-few-public-methods,too-many-instance-attributes
+ """Represents a test pack."""
+
+ name: str = "undefined"
+ description: str = ""
+ tests: List[dict] = field(default_factory=lambda: [])
+ language: Dict = field(default_factory=lambda: defaultdict(dict))
+
+ def get_test(self, test_name: str) -> str:
+ """Get details of a test from the test pack"""
+
+ for test in self.tests:
+ if "name" in test and test["name"].lower() == test_name.lower():
+ return test
+
+ def get_required_result(self, test_name: str) -> str:
+ """Fetch the required result of the test"""
+
+ test = self.get_test(test_name)
+
+ if test is not None and "required_result" in test:
+ return test["required_result"]
+
+ return "Informational"
+
+ def get_message(self, name: str) -> str:
+ if name in self.language:
+ return self.language[name]
+ return "Message not found"
+
+ def to_dict(self):
+ return {
+ "name": self.name,
+ "description": self.description,
+ "tests": self.tests,
+ "language": self.language
+ }
diff --git a/framework/requirements.txt b/framework/requirements.txt
index c31978d99..6f54d3a99 100644
--- a/framework/requirements.txt
+++ b/framework/requirements.txt
@@ -1,11 +1,11 @@
# Requirements for the core module
-requests<2.32.0
+requests==2.32.3
# Requirements for the net_orc module
-docker==7.0.0
+docker==7.1.0
ipaddress==1.0.23
netifaces==0.11.0
-scapy==2.5.0
+scapy==2.6.0
# Requirments for the test_orc module
weasyprint==61.2
@@ -21,13 +21,24 @@ pydantic==2.7.1
# Requirements for testing
pytest==7.4.4
pytest-timeout==2.2.0
+responses==0.25.3
+
# Requirements for the report
markdown==3.5.2
# Requirements for the session
-cryptography==42.0.7
+cryptography==43.0.1
pytz==2024.1
# Requirements for the risk profile
python-dateutil==2.9.0
+
+# Requirements for MQTT client
+paho-mqtt==2.1.0
+
+# Requirements for background tasks
+APScheduler==3.10.4
+
+# Requirements for reports generation
+Jinja2==3.1.4
diff --git a/local/system.json.example b/local/system.json.example
index 23023bead..df89b502f 100644
--- a/local/system.json.example
+++ b/local/system.json.example
@@ -1,10 +1,11 @@
{
"network": {
- "device_intf": "enx123456789123",
- "internet_intf": "enx123456789124"
+ "device_intf": "",
+ "internet_intf": ""
},
"log_level": "INFO",
"startup_timeout": 60,
"monitor_period": 300,
- "max_device_reports": 0
+ "max_device_reports": 0,
+ "org_name": ""
}
diff --git a/make/DEBIAN/control b/make/DEBIAN/control
index 488f69458..c822e65fd 100644
--- a/make/DEBIAN/control
+++ b/make/DEBIAN/control
@@ -1,5 +1,5 @@
Package: Testrun
-Version: 1.3.1
+Version: 2.0.1
Architecture: amd64
Maintainer: Google
Homepage: https://github.com/google/testrun
diff --git a/modules/devices/faux-dev/bin/start_network_service b/modules/devices/faux-dev/bin/start_network_service
index d4bb8a92d..7d689f9dd 100644
--- a/modules/devices/faux-dev/bin/start_network_service
+++ b/modules/devices/faux-dev/bin/start_network_service
@@ -35,7 +35,7 @@ else
INTF=$DEFINED_IFACE
fi
-#Create and set permissions on the output files
+# Create and set permissions on the output files
OUTPUT_DIR=/runtime/validation/
LOG_FILE=$OUTPUT_DIR/$MODULE_NAME.log
RESULT_FILE=$OUTPUT_DIR/result.json
diff --git a/modules/devices/faux-dev/faux-dev.Dockerfile b/modules/devices/faux-dev/faux-dev.Dockerfile
index ecfdfc5c2..18901a2a1 100644
--- a/modules/devices/faux-dev/faux-dev.Dockerfile
+++ b/modules/devices/faux-dev/faux-dev.Dockerfile
@@ -12,13 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Image name: test-run/faux-dev
-FROM test-run/base:latest
+# Image name: testrun/faux-dev
+FROM testrun/base:latest
ARG MODULE_NAME=faux-dev
ARG MODULE_DIR=modules/devices/$MODULE_NAME
+ARG COMMON_DIR=framework/python/src/common
-#Update and get all additional requirements not contained in the base image
+# Update and get all additional requirements not contained in the base image
RUN apt-get update --fix-missing
# NTP requireds interactive installation so we're going to turn that off
@@ -34,4 +35,4 @@ COPY $MODULE_DIR/conf /testrun/conf
COPY $MODULE_DIR/bin /testrun/bin
# Copy over all python files
-COPY $MODULE_DIR/python /testrun/python
\ No newline at end of file
+COPY $MODULE_DIR/python /testrun/python
diff --git a/modules/devices/faux-dev/python/src/logger.py b/modules/devices/faux-dev/python/src/logger.py
deleted file mode 100644
index a727ad7bb..000000000
--- a/modules/devices/faux-dev/python/src/logger.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Sets up the logger to be used for the faux-device."""
-
-import json
-import logging
-import os
-
-LOGGERS = {}
-_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s'
-_DATE_FORMAT = '%b %02d %H:%M:%S'
-_CONF_DIR = 'conf'
-_CONF_FILE_NAME = 'system.json'
-_LOG_DIR = '/runtime/validation'
-
-# Set log level
-with open(os.path.join(_CONF_DIR, _CONF_FILE_NAME),
- encoding='utf-8') as conf_file:
- system_conf_json = json.load(conf_file)
-
-log_level_str = system_conf_json['log_level']
-log_level = logging.getLevelName(log_level_str)
-
-log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT)
-
-
-def add_file_handler(log, log_file):
- """Add file handler to existing log."""
- handler = logging.FileHandler(os.path.join(_LOG_DIR, log_file + '.log'))
- handler.setFormatter(log_format)
- log.addHandler(handler)
-
-
-def add_stream_handler(log):
- """Add stream handler to existing log."""
- handler = logging.StreamHandler()
- handler.setFormatter(log_format)
- log.addHandler(handler)
-
-
-def get_logger(name, log_file=None):
- """Return logger for requesting class."""
- if name not in LOGGERS:
- LOGGERS[name] = logging.getLogger(name)
- LOGGERS[name].setLevel(log_level)
- add_stream_handler(LOGGERS[name])
- if log_file is not None:
- add_file_handler(LOGGERS[name], log_file)
- return LOGGERS[name]
diff --git a/modules/devices/faux-dev/python/src/util.py b/modules/devices/faux-dev/python/src/util.py
deleted file mode 100644
index 81f9d2ced..000000000
--- a/modules/devices/faux-dev/python/src/util.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Provides basic utilities for the faux-device."""
-import subprocess
-import shlex
-
-
-def run_command(cmd, logger, output=True):
- """Runs a process at the os level
- By default, returns the standard output and error output
- If the caller sets optional output parameter to False,
- will only return a boolean result indicating if it was
- successful in running the command. Failure is indicated
- by any return code from the process other than zero."""
-
- success = False
- with subprocess.Popen(
- shlex.split(cmd),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE) as process:
-
- stdout, stderr = process.communicate()
-
- if process.returncode != 0:
- err_msg = f'{stderr.strip()}. Code: {process.returncode}'
- logger.error('Command Failed: ' + cmd)
- logger.error('Error: ' + err_msg)
- else:
- success = True
- logger.debug('Command succeeded: ' + cmd)
- if output:
- out = stdout.strip().decode('utf-8')
- logger.debug('Command output: ' + out)
- return success, out
- else:
- return success, None
diff --git a/modules/network/base/base.Dockerfile b/modules/network/base/base.Dockerfile
index b30f6a7d9..7f6edb409 100644
--- a/modules/network/base/base.Dockerfile
+++ b/modules/network/base/base.Dockerfile
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Image name: test-run/base
-FROM ubuntu@sha256:e6173d4dc55e76b87c4af8db8821b1feae4146dd47341e4d431118c7dd060a74
+# Image name: testrun/base
+FROM ubuntu@sha256:77d57fd89366f7d16615794a5b53e124d742404e20f035c22032233f1826bd6a
RUN apt-get update
@@ -30,8 +30,9 @@ COPY $COMMON_DIR/ /testrun/python/src/common
# Setup the base python requirements
COPY $MODULE_DIR/python /testrun/python
-# Install all python requirements for the module
-RUN pip3 install -r /testrun/python/requirements.txt
+# Install all python requirements for the module
+# --break-system-packages flag used to bypass PEP668
+RUN pip3 install --break-system-packages -r /testrun/python/requirements.txt
# Add the bin files
COPY $MODULE_DIR/bin /testrun/bin
@@ -42,5 +43,5 @@ RUN dos2unix /testrun/bin/*
# Make sure all the bin files are executable
RUN chmod u+x /testrun/bin/*
-#Start the network module
+# Start the network module
ENTRYPOINT [ "/testrun/bin/start_module" ]
\ No newline at end of file
diff --git a/modules/network/base/bin/start_module b/modules/network/base/bin/start_module
index 8e8cb5e4b..fb2823afd 100644
--- a/modules/network/base/bin/start_module
+++ b/modules/network/base/bin/start_module
@@ -22,7 +22,7 @@ DEFAULT_IFACE=veth0
# Create a local user that matches the same as the host
# to be used for correct file ownership for various logs
-# HOST_USER mapped in via docker container environemnt variables
+# HOST_USER mapped in via docker container environment variables
useradd $HOST_USER
# Enable IPv6 for all containers
@@ -42,6 +42,7 @@ fi
# Extract the necessary config parameters
MODULE_NAME=$(echo "$CONF" | jq -r '.config.meta.name')
DEFINED_IFACE=$(echo "$CONF" | jq -r '.config.network.interface')
+HOST=$(echo "$CONF" | jq -r '.config.network.host')
GRPC=$(echo "$CONF" | jq -r '.config.grpc')
# Validate the module name is present
@@ -70,14 +71,19 @@ $BIN_DIR/setup_binaries $BIN_DIR
echo "Starting module $MODULE_NAME on local interface $INTF..."
-# Wait for interface to become ready
-$BIN_DIR/wait_for_interface $INTF
+# Only non-host containers will have a specific
+# interface for capturing
+if [[ "$HOST" != "true" ]]; then
-# Small pause to let the interface stabalize before starting the capture
-#sleep 1
+ # Wait for interface to become ready
+ $BIN_DIR/wait_for_interface $INTF
-# Start network capture
-$BIN_DIR/capture $MODULE_NAME $INTF
+ # Small pause to let the interface stabalize before starting the capture
+ #sleep 1
+
+ # Start network capture
+ $BIN_DIR/capture $MODULE_NAME $INTF
+fi
# Start the grpc server
if [[ ! -z $GRPC && ! $GRPC == "null" ]]
@@ -96,4 +102,4 @@ fi
sleep 3
# Start the networking service
-$BIN_DIR/start_network_service $MODULE_NAME $INTF
\ No newline at end of file
+$BIN_DIR/start_network_service $MODULE_NAME $INTF
diff --git a/modules/network/base/python/requirements.txt b/modules/network/base/python/requirements.txt
index 9d9473d74..01abf05ce 100644
--- a/modules/network/base/python/requirements.txt
+++ b/modules/network/base/python/requirements.txt
@@ -1,3 +1,10 @@
-grpcio
-grpcio-tools
-netifaces
\ No newline at end of file
+# Dependencies to user defined packages
+# Package dependencies should always be defined before the user defined
+# packages to prevent auto-upgrades of stable dependencies
+protobuf==5.28.3
+
+# User defined packages
+grpcio==1.67.1
+grpcio-tools==1.67.1
+netifaces==0.11.0
+
diff --git a/modules/network/base/python/src/grpc_server/start_server.py b/modules/network/base/python/src/grpc_server/start_server.py
index d372949e5..9c34ec736 100644
--- a/modules/network/base/python/src/grpc_server/start_server.py
+++ b/modules/network/base/python/src/grpc_server/start_server.py
@@ -46,6 +46,5 @@ def run():
print('gRPC server starting on port ' + port)
serve(port)
-
if __name__ == '__main__':
run()
diff --git a/modules/network/dhcp-1/dhcp-1.Dockerfile b/modules/network/dhcp-1/dhcp-1.Dockerfile
index e50ed9a95..7df94b4fd 100644
--- a/modules/network/dhcp-1/dhcp-1.Dockerfile
+++ b/modules/network/dhcp-1/dhcp-1.Dockerfile
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Image name: test-run/dhcp-primary
-FROM test-run/base:latest
+# Image name: testrun/dhcp-primary
+FROM testrun/base:latest
ARG MODULE_NAME=dhcp-1
ARG MODULE_DIR=modules/network/$MODULE_NAME
diff --git a/modules/network/dhcp-1/python/src/grpc_server/dhcp_leases.py b/modules/network/dhcp-1/python/src/grpc_server/dhcp_leases.py
index aa2945759..e2318ac02 100644
--- a/modules/network/dhcp-1/python/src/grpc_server/dhcp_leases.py
+++ b/modules/network/dhcp-1/python/src/grpc_server/dhcp_leases.py
@@ -68,8 +68,8 @@ def get_leases(self):
leases.append(lease)
except Exception as e: # pylint: disable=W0718
# Let non lease lines file without extra checks
- LOGGER.error('Making Lease Error: ' + str(e))
- LOGGER.error('Not a valid lease line: ' + line)
+ LOGGER.info('Not a valid lease line: ' + line)
+ LOGGER.error('Get lease error: ' + str(e))
return leases
def delete_lease(self, ip_addr):
diff --git a/modules/network/dhcp-2/dhcp-2.Dockerfile b/modules/network/dhcp-2/dhcp-2.Dockerfile
index 66ea857c3..4dcd7a819 100644
--- a/modules/network/dhcp-2/dhcp-2.Dockerfile
+++ b/modules/network/dhcp-2/dhcp-2.Dockerfile
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Image name: test-run/dhcp-primary
-FROM test-run/base:latest
+# Image name: testrun/dhcp-primary
+FROM testrun/base:latest
ARG MODULE_NAME=dhcp-2
ARG MODULE_DIR=modules/network/$MODULE_NAME
diff --git a/modules/network/dhcp-2/python/src/grpc_server/dhcp_leases.py b/modules/network/dhcp-2/python/src/grpc_server/dhcp_leases.py
index 08e6feabe..f6db83094 100644
--- a/modules/network/dhcp-2/python/src/grpc_server/dhcp_leases.py
+++ b/modules/network/dhcp-2/python/src/grpc_server/dhcp_leases.py
@@ -58,9 +58,9 @@ def get_leases(self):
leases = []
lease_list_raw = self._get_lease_list()
LOGGER.info('Raw Leases:\n' + str(lease_list_raw) + '\n')
- lease_list_start = lease_list_raw.find('=========',0)
- lease_list_start = lease_list_raw.find('\n',lease_list_start)
- lease_list = lease_list_raw[lease_list_start+1:]
+ lease_list_start = lease_list_raw.find('=========', 0)
+ lease_list_start = lease_list_raw.find('\n', lease_list_start)
+ lease_list = lease_list_raw[lease_list_start + 1:]
lines = lease_list.split('\n')
for line in lines:
try:
@@ -68,8 +68,8 @@ def get_leases(self):
leases.append(lease)
except Exception as e: # pylint: disable=W0718
# Let non lease lines file without extra checks
- LOGGER.error('Making Lease Error: ' + str(e))
- LOGGER.error('Not a valid lease line: ' + line)
+ LOGGER.info('Not a valid lease line: ' + line)
+ LOGGER.error('Get lease error: ' + str(e))
return leases
def delete_lease(self, ip_addr):
diff --git a/modules/network/dns/dns.Dockerfile b/modules/network/dns/dns.Dockerfile
index d59b8a391..2b46dfb4a 100644
--- a/modules/network/dns/dns.Dockerfile
+++ b/modules/network/dns/dns.Dockerfile
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Image name: test-run/dns
-FROM test-run/base:latest
+# Image name: testrun/dns
+FROM testrun/base:latest
ARG MODULE_NAME=dns
ARG MODULE_DIR=modules/network/$MODULE_NAME
diff --git a/modules/network/gateway/gateway.Dockerfile b/modules/network/gateway/gateway.Dockerfile
index 885e4a9f0..2b72174ab 100644
--- a/modules/network/gateway/gateway.Dockerfile
+++ b/modules/network/gateway/gateway.Dockerfile
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Image name: test-run/gateway
-FROM test-run/base:latest
+# Image name: testrun/gateway
+FROM testrun/base:latest
ARG MODULE_NAME=gateway
ARG MODULE_DIR=modules/network/$MODULE_NAME
diff --git a/testing/unit/build.sh b/modules/network/host/bin/start_network_service
similarity index 82%
rename from testing/unit/build.sh
rename to modules/network/host/bin/start_network_service
index db84e0299..b94b6ff7c 100644
--- a/testing/unit/build.sh
+++ b/modules/network/host/bin/start_network_service
@@ -1,4 +1,4 @@
-#!/bin/bash -e
+#!/bin/bash
# Copyright 2023 Google LLC
#
@@ -14,4 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-sudo docker build -f testing/unit/unit_test.Dockerfile -t testrun/unit-test .
\ No newline at end of file
+echo "Starting host service..."
+
+# Keep host container running until stopped
+while true; do
+ sleep 3
+done
diff --git a/modules/network/host/conf/module_config.json b/modules/network/host/conf/module_config.json
new file mode 100644
index 000000000..87ec39a35
--- /dev/null
+++ b/modules/network/host/conf/module_config.json
@@ -0,0 +1,24 @@
+{
+ "config": {
+ "meta": {
+ "name": "host",
+ "display_name": "Host",
+ "description": "Used to access host level networking operations"
+ },
+ "network": {
+ "host": true
+ },
+ "grpc":{
+ "port": 5001
+ },
+ "docker": {
+ "depends_on": "base",
+ "mounts": [
+ {
+ "source": "runtime/network",
+ "target": "/runtime/network"
+ }
+ ]
+ }
+ }
+}
\ No newline at end of file
diff --git a/modules/network/host/host.Dockerfile b/modules/network/host/host.Dockerfile
new file mode 100644
index 000000000..60c8bf59a
--- /dev/null
+++ b/modules/network/host/host.Dockerfile
@@ -0,0 +1,34 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Image name: testrun/host
+FROM testrun/base:latest
+
+ARG MODULE_NAME=host
+ARG MODULE_DIR=modules/network/$MODULE_NAME
+
+#Update and get all additional requirements not contained in the base image
+RUN apt-get update --fix-missing
+
+# Install all necessary packages
+RUN apt-get install -y net-tools ethtool
+
+# Copy over all configuration files
+COPY $MODULE_DIR/conf /testrun/conf
+
+# Copy over all binary files
+COPY $MODULE_DIR/bin /testrun/bin
+
+# Copy over all python files
+COPY $MODULE_DIR/python /testrun/python
diff --git a/modules/network/host/python/src/grpc_server/network_service.py b/modules/network/host/python/src/grpc_server/network_service.py
new file mode 100644
index 000000000..cbb3a1b7a
--- /dev/null
+++ b/modules/network/host/python/src/grpc_server/network_service.py
@@ -0,0 +1,120 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""gRPC Network Service for the Host network module"""
+import proto.grpc_pb2_grpc as pb2_grpc
+import proto.grpc_pb2 as pb2
+
+import traceback
+from common import logger
+from common import util
+
+LOG_NAME = 'network_service'
+LOGGER = None
+
+
+class NetworkService(pb2_grpc.HostNetworkModule):
+ """gRPC endpoints for the Host container"""
+
+ def __init__(self):
+ global LOGGER
+ LOGGER = logger.get_logger(LOG_NAME, 'host')
+
+ def CheckInterfaceStatus(self, request, context): # pylint: disable=W0613
+ try:
+ status = self.check_interface_status(request.iface_name)
+ return pb2.CheckInterfaceStatusResponse(code=200, status=status)
+ except Exception as e: # pylint: disable=W0718
+ fail_message = 'Failed to read iface status: ' + str(e)
+ LOGGER.error(fail_message)
+ LOGGER.error(traceback.format_exc())
+ return pb2.CheckInterfaceStatusResponse(code=500, status=False)
+
+ def GetIfaceConnectionStats(self, request, context): # pylint: disable=W0613
+ try:
+ stats = self.get_iface_connection_stats(request.iface_name)
+ return pb2.GetIfaceStatsResponse(code=200, stats=stats)
+ except Exception as e: # pylint: disable=W0718
+ fail_message = 'Failed to read connection stats: ' + str(e)
+ LOGGER.error(fail_message)
+ LOGGER.error(traceback.format_exc())
+ return pb2.GetIfaceStatsResponse(code=500, stats=False)
+
+ def GetIfacePortStats(self, request, context): # pylint: disable=W0613
+ try:
+ stats = self.get_iface_port_stats(request.iface_name)
+ return pb2.GetIfaceStatsResponse(code=200, stats=stats)
+ except Exception as e: # pylint: disable=W0718
+ fail_message = 'Failed to read port stats: ' + str(e)
+ LOGGER.error(fail_message)
+ LOGGER.error(traceback.format_exc())
+ return pb2.GetIfaceStatsResponse(code=500, stats=False)
+
+ def SetIfaceDown(self, request, context): # pylint: disable=W0613
+ try:
+ success = self.set_interface_down(request.iface_name)
+ return pb2.SetIfaceResponse(code=200, success=success)
+ except Exception as e: # pylint: disable=W0718
+ fail_message = 'Failed to set interface down: ' + str(e)
+ LOGGER.error(fail_message)
+ LOGGER.error(traceback.format_exc())
+ return pb2.SetIfaceResponse(code=500, success=False)
+
+ def SetIfaceUp(self, request, context): # pylint: disable=W0613
+ try:
+ success = self.set_interface_up(request.iface_name)
+ return pb2.SetIfaceResponse(code=200, success=success)
+ except Exception as e: # pylint: disable=W0718
+ fail_message = 'Failed to set interface up: ' + str(e)
+ LOGGER.error(fail_message)
+ LOGGER.error(traceback.format_exc())
+ return pb2.SetIfaceResponse(code=500, success=False)
+
+ def check_interface_status(self, interface_name):
+ output = util.run_command(cmd=f'ip link show {interface_name}', output=True)
+ if 'state DOWN ' in output[0]:
+ return False
+ else:
+ return True
+
+ def get_iface_connection_stats(self, iface):
+ """Extract information about the physical connection"""
+ response = util.run_command(f'ethtool {iface}')
+ if len(response[1]) == 0:
+ return response[0]
+ else:
+ return None
+
+ def get_iface_port_stats(self, iface):
+ """Extract information about packets connection"""
+ response = util.run_command(f'ethtool -S {iface}')
+ if len(response[1]) == 0:
+ return response[0]
+ else:
+ return None
+
+ def set_interface_up(self, interface_name):
+ """Set the interface to the up state"""
+ response = util.run_command('ip link set dev ' + interface_name + ' up')
+ if len(response[1]) == 0:
+ return response[0]
+ else:
+ return None
+
+ def set_interface_down(self, interface_name):
+ """Set the interface to the up state"""
+ response = util.run_command('ip link set dev ' + interface_name + ' down')
+ if len(response[1]) == 0:
+ return response[0]
+ else:
+ return None
diff --git a/modules/network/host/python/src/grpc_server/proto/grpc.proto b/modules/network/host/python/src/grpc_server/proto/grpc.proto
new file mode 100644
index 000000000..c881b13f7
--- /dev/null
+++ b/modules/network/host/python/src/grpc_server/proto/grpc.proto
@@ -0,0 +1,37 @@
+syntax = "proto3";
+
+service HostNetworkModule {
+
+ rpc CheckInterfaceStatus(CheckInterfaceStatusRequest) returns (CheckInterfaceStatusResponse) {};
+ rpc GetIfaceConnectionStats(GetIfaceStatsRequest) returns (GetIfaceStatsResponse) {};
+ rpc SetIfaceDown(SetIfaceRequest) returns (SetIfaceResponse) {};
+ rpc SetIfaceUp(SetIfaceRequest) returns (SetIfaceResponse) {};
+}
+
+message CheckInterfaceStatusRequest {
+ string iface_name = 1;
+}
+
+message CheckInterfaceStatusResponse {
+ int32 code = 1;
+ bool status = 2;
+}
+
+message GetIfaceStatsRequest {
+ string iface_name = 1;
+}
+
+message GetIfaceStatsResponse {
+ int32 code = 1;
+ string stats = 2;
+}
+
+message SetIfaceRequest {
+ string iface_name = 1;
+}
+
+message SetIfaceResponse {
+ int32 code = 1;
+ bool success = 2;
+}
+
diff --git a/modules/network/host/python/src/grpc_server/start_server.py b/modules/network/host/python/src/grpc_server/start_server.py
new file mode 100644
index 000000000..962277188
--- /dev/null
+++ b/modules/network/host/python/src/grpc_server/start_server.py
@@ -0,0 +1,50 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Base class for starting the gRPC server for a network module."""
+from concurrent import futures
+import grpc
+import proto.grpc_pb2_grpc as pb2_grpc
+from network_service import NetworkService
+import argparse
+
+DEFAULT_PORT = '5001'
+
+
+def serve(port):
+ server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+ pb2_grpc.add_HostNetworkModuleServicer_to_server(NetworkService(), server)
+ server.add_insecure_port('[::]:' + port)
+ server.start()
+ server.wait_for_termination()
+
+
+def run():
+ parser = argparse.ArgumentParser(
+ description='GRPC Server for Network Module',
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument('-p',
+ '--port',
+ default=DEFAULT_PORT,
+ help='Define the default port to run the server on.')
+
+ args = parser.parse_args()
+
+ port = args.port
+
+ print('gRPC server starting on port ' + port)
+ serve(port)
+
+if __name__ == '__main__':
+ run()
diff --git a/modules/network/ntp/ntp.Dockerfile b/modules/network/ntp/ntp.Dockerfile
index aa6f63e3f..d047770ef 100644
--- a/modules/network/ntp/ntp.Dockerfile
+++ b/modules/network/ntp/ntp.Dockerfile
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Image name: test-run/ntp
-FROM test-run/base:latest
+# Image name: testrun/ntp
+FROM testrun/base:latest
ARG MODULE_NAME=ntp
ARG MODULE_DIR=modules/network/$MODULE_NAME
@@ -21,6 +21,8 @@ ARG MODULE_DIR=modules/network/$MODULE_NAME
# Set DEBIAN_FRONTEND to noninteractive mode
ENV DEBIAN_FRONTEND=noninteractive
+RUN apt-get update
+
# Install all necessary packages
RUN apt-get install -y chrony
diff --git a/modules/network/ntp/python/src/ntp_server.py b/modules/network/ntp/python/src/ntp_server.py
index 42fe21e77..fbe3ac17e 100644
--- a/modules/network/ntp/python/src/ntp_server.py
+++ b/modules/network/ntp/python/src/ntp_server.py
@@ -38,7 +38,7 @@ def is_running(self):
if __name__ == '__main__':
ntp = NTPServer()
ntp.start()
- # give some time for the server to start
+ # Give some time for the server to start
running = False
for _ in range(10):
running = ntp.is_running()
diff --git a/modules/network/radius/bin/start_network_service b/modules/network/radius/bin/start_network_service
index d285c20d9..aad840c3a 100644
--- a/modules/network/radius/bin/start_network_service
+++ b/modules/network/radius/bin/start_network_service
@@ -27,7 +27,7 @@ cp $CONF_DIR/ca.crt /etc/ssl/certs/ca-certificates.crt
python3 -u $PYTHON_SRC_DIR/authenticator.py &
-#Create and set permissions on the log file
+# Create and set permissions on the log file
touch $LOG_FILE
chown $HOST_USER $LOG_FILE
diff --git a/modules/network/radius/python/requirements.txt b/modules/network/radius/python/requirements.txt
index 37d126cb1..c814da515 100644
--- a/modules/network/radius/python/requirements.txt
+++ b/modules/network/radius/python/requirements.txt
@@ -1,3 +1,11 @@
-eventlet
-pbr
-transitions
\ No newline at end of file
+# Dependencies to user defined packages
+# Package dependencies should always be defined before the user defined
+# packages to prevent auto-upgrades of stable dependencies
+dnspython==2.6.1
+greenlet==3.0.3
+six==1.16.0
+
+# User defined packages
+eventlet==0.36.1
+pbr==6.1.0
+transitions==0.9.2
diff --git a/modules/network/radius/radius.Dockerfile b/modules/network/radius/radius.Dockerfile
index 4c8f8fac5..802480ce9 100644
--- a/modules/network/radius/radius.Dockerfile
+++ b/modules/network/radius/radius.Dockerfile
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Image name: test-run/radius
-FROM test-run/base:latest
+# Image name: testrun/radius
+FROM testrun/base:latest
ARG MODULE_NAME=radius
ARG MODULE_DIR=modules/network/$MODULE_NAME
@@ -25,7 +25,8 @@ RUN apt-get update && apt-get install -y openssl freeradius git
RUN git clone --branch 0.0.25 https://github.com/faucetsdn/chewie
# Install chewie as Python module
-RUN pip3 install chewie/
+# --break-system-packages flag used to bypass PEP668
+RUN pip3 install --break-system-packages chewie/
EXPOSE 1812/udp
EXPOSE 1813/udp
@@ -40,4 +41,5 @@ COPY $MODULE_DIR/bin /testrun/bin
COPY $MODULE_DIR/python /testrun/python
# Install all python requirements for the module
-RUN pip3 install -r /testrun/python/requirements.txt
\ No newline at end of file
+# --break-system-packages flag used to bypass PEP668
+RUN pip3 install --break-system-packages -r /testrun/python/requirements.txt
\ No newline at end of file
diff --git a/modules/network/template/template.Dockerfile b/modules/network/template/template.Dockerfile
index 1c3060496..265725258 100644
--- a/modules/network/template/template.Dockerfile
+++ b/modules/network/template/template.Dockerfile
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Image name: test-run/template
-FROM test-run/base:latest
+# Image name: testrun/template
+FROM testrun/base:latest
ARG MODULE_NAME=template
ARG MODULE_DIR=modules/network/$MODULE_NAME
diff --git a/modules/test/base/README.md b/modules/test/base/README.md
index e7f05d80e..24a725607 100644
--- a/modules/test/base/README.md
+++ b/modules/test/base/README.md
@@ -14,6 +14,13 @@ The ```config/module_config.json``` provides the name and description of the mod
Within the ```python/src``` directory, basic logging and environment variables are provided to the test module.
+Within the ```usr/local/etc``` directory there is a local copy of the MAC OUI database. This is just in case a new copy is unable to be downloaded during the install or update process.
+
+## GRPC server
+Within the python directory, GRPC client code is provided to allow test modules to programmatically modify the various network services provided by Testrun.
+
+These currently include obtaining information about and controlling the DHCP servers in failover configuration.
+
## Tests covered
No tests are run by this module
\ No newline at end of file
diff --git a/modules/test/base/base.Dockerfile b/modules/test/base/base.Dockerfile
index 4d8c0399a..253270ea9 100644
--- a/modules/test/base/base.Dockerfile
+++ b/modules/test/base/base.Dockerfile
@@ -12,17 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Image name: test-run/base-test
-FROM ubuntu@sha256:e6173d4dc55e76b87c4af8db8821b1feae4146dd47341e4d431118c7dd060a74
+# Builder stage
+# Image name: testrun/base-test
+FROM python:3.10-slim AS builder
ARG MODULE_NAME=base
ARG MODULE_DIR=modules/test/$MODULE_NAME
ARG COMMON_DIR=framework/python/src/common
-RUN apt-get update
+# Install additional requirements needed to build python packages
+RUN apt-get update && \
+ apt-get install -y gcc dos2unix
-# Install common software
-RUN DEBIAN_FRONTEND=noninteractive apt-get install -yq net-tools iputils-ping tzdata tcpdump iproute2 jq python3 python3-pip dos2unix nmap wget --fix-missing
+# Create the virtual environment
+RUN python -m venv /opt/venv
+
+# Activate the virtual environment
+ENV PATH="/opt/venv/bin:$PATH"
# Install common python modules
COPY $COMMON_DIR/ /testrun/python/src/common
@@ -31,7 +37,7 @@ COPY $COMMON_DIR/ /testrun/python/src/common
COPY $MODULE_DIR/python /testrun/python
# Install all python requirements for the module
-RUN pip3 install -r /testrun/python/requirements.txt
+RUN pip install -r /testrun/python/requirements.txt
# Copy over all binary files
COPY $MODULE_DIR/bin /testrun/bin
@@ -49,6 +55,7 @@ ARG CONTAINER_PROTO_DIR=testrun/python/src/grpc_server/proto
COPY $NET_MODULE_DIR/dhcp-1/$NET_MODULE_PROTO_DIR $CONTAINER_PROTO_DIR/dhcp1/
COPY $NET_MODULE_DIR/dhcp-2/$NET_MODULE_PROTO_DIR $CONTAINER_PROTO_DIR/dhcp2/
+COPY $NET_MODULE_DIR/host/$NET_MODULE_PROTO_DIR $CONTAINER_PROTO_DIR/host/
# Copy the cached version of oui.txt incase the download fails
RUN mkdir -p /usr/local/etc
@@ -57,5 +64,21 @@ COPY $MODULE_DIR/usr/local/etc/oui.txt /usr/local/etc/oui.txt
# Update the oui.txt file from ieee
RUN wget https://standards-oui.ieee.org/oui.txt -O /usr/local/etc/oui.txt || echo "Unable to update the MAC OUI database"
+# Operational stage
+FROM python:3.10-slim
+
+# Install common software
+RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -yq net-tools iputils-ping tzdata tcpdump iproute2 jq dos2unix nmap wget procps --fix-missing
+
+# Get the virtual environment from builder stage
+COPY --from=builder /opt/venv /opt/venv
+
+# Copy over all testrun files from the builder stage
+COPY --from=builder /testrun /testrun
+COPY --from=builder /usr/local/etc/oui.txt /usr/local/etc/oui.txt
+
+# Activate the virtual environment by setting the PATH
+ENV PATH="/opt/venv/bin:$PATH"
+
# Start the test module
ENTRYPOINT [ "/testrun/bin/start" ]
\ No newline at end of file
diff --git a/modules/test/base/bin/setup b/modules/test/base/bin/setup
new file mode 100644
index 000000000..514466548
--- /dev/null
+++ b/modules/test/base/bin/setup
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define the local mount point to store local files to
+export OUTPUT_DIR="/runtime/output"
+
+# Directory where all binaries will be loaded
+export BIN_DIR="/testrun/bin"
+
+# Default interface should be veth0 for all containers
+export IFACE=veth0
+
+# Assign the current host user
+export HOST_USER=$(whoami)
+
+# Create a local user that matches the same as the host
+# to be used for correct file ownership for various logs
+# HOST_USER mapped in via docker container environment variables
+if ! id "$HOST_USER" &>/dev/null; then
+ useradd "$HOST_USER"
+else
+ echo User $HOST_USER already exists
+fi
+
+# Create the output directory
+mkdir -p "$OUTPUT_DIR"
+
+# Set permissions on the output files
+chown -R $HOST_USER $OUTPUT_DIR
+
+# Enable IPv6 for all containers
+sysctl net.ipv6.conf.all.disable_ipv6=0
+sysctl -p
+
+# Read in the config file
+CONF_FILE="/testrun/conf/module_config.json"
+CONF=`cat $CONF_FILE`
+
+if [[ -z $CONF ]]
+then
+ echo "No config file present at $CONF_FILE. Exiting startup."
+ exit 1
+fi
+
+# Extract the necessary config parameters
+export MODULE_NAME=$(echo "$CONF" | jq -r '.config.meta.name')
+export NETWORK_REQUIRED=$(echo "$CONF" | jq -r '.config.network')
+export GRPC=$(echo "$CONF" | jq -r '.config.grpc')
+
+# Validate the module name is present
+if [[ -z "$MODULE_NAME" || "$MODULE_NAME" == "null" ]]
+then
+ echo "No module name present in $CONF_FILE. Exiting startup."
+ exit 1
+fi
+
+# Setup the PYTHONPATH so all imports work as expected
+echo "Setting up PYTHONPATH..."
+export PYTHONPATH=$($BIN_DIR/setup_python_path)
+echo "PYTHONPATH: $PYTHONPATH"
+
+echo "Configuring binary files..."
+$BIN_DIR/setup_binaries $BIN_DIR
+
+# Build all gRPC files from the proto for use in
+# gRPC clients for communications to network modules
+echo "Building gRPC files from available proto files..."
+$BIN_DIR/setup_grpc_clients
\ No newline at end of file
diff --git a/modules/test/base/bin/start b/modules/test/base/bin/start
index 37902b868..d1f29989f 100755
--- a/modules/test/base/bin/start
+++ b/modules/test/base/bin/start
@@ -14,4 +14,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-/testrun/bin/start_module
\ No newline at end of file
+# Allow one argument which is the unit test file to run
+# instead of running the test module
+UNIT_TEST_FILE=$1
+
+source /testrun/bin/setup
+
+# Conditionally run start_module based on RUN
+if [[ -z "$UNIT_TEST_FILE" ]];then
+ /testrun/bin/start_module
+else
+ python3 $UNIT_TEST_FILE
+fi
diff --git a/modules/test/base/bin/start_module b/modules/test/base/bin/start_module
index 0ee68fa6a..fb79cc018 100644
--- a/modules/test/base/bin/start_module
+++ b/modules/test/base/bin/start_module
@@ -1,102 +1,46 @@
-#!/bin/bash
-
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define the local mount point to store local files to
-OUTPUT_DIR="/runtime/output"
-
-# Directory where all binaries will be loaded
-BIN_DIR="/testrun/bin"
-
-# Default interface should be veth0 for all containers
-IFACE=veth0
-
-# Create a local user that matches the same as the host
-# to be used for correct file ownership for various logs
-# HOST_USER mapped in via docker container environemnt variables
-useradd $HOST_USER
-
-# Set permissions on the output files
-chown -R $HOST_USER $OUTPUT_DIR
-
-# Enable IPv6 for all containers
-sysctl net.ipv6.conf.all.disable_ipv6=0
-sysctl -p
-
-# Read in the config file
-CONF_FILE="/testrun/conf/module_config.json"
-CONF=`cat $CONF_FILE`
-
-if [[ -z $CONF ]]
-then
- echo "No config file present at $CONF_FILE. Exiting startup."
- exit 1
-fi
-
-# Extract the necessary config parameters
-MODULE_NAME=$(echo "$CONF" | jq -r '.config.meta.name')
-NETWORK_REQUIRED=$(echo "$CONF" | jq -r '.config.network')
-GRPC=$(echo "$CONF" | jq -r '.config.grpc')
-
-# Validate the module name is present
-if [[ -z "$MODULE_NAME" || "$MODULE_NAME" == "null" ]]
-then
- echo "No module name present in $CONF_FILE. Exiting startup."
- exit 1
-fi
-
-# Setup the PYTHONPATH so all imports work as expected
-echo "Setting up PYTHONPATH..."
-export PYTHONPATH=$($BIN_DIR/setup_python_path)
-echo "PYTHONPATH: $PYTHONPATH"
-
-# Build all gRPC files from the proto for use in
-# gRPC clients for communications to network modules
-echo "Building gRPC files from available proto files..."
-$BIN_DIR/setup_grpc_clients
-
-echo "Configuring binary files..."
-$BIN_DIR/setup_binaries $BIN_DIR
-
-echo "Starting module $MODULE_NAME..."
-
-# Only start network services if the test container needs
-# a network connection to run its tests
-if [ $NETWORK_REQUIRED == "true" ];then
- # Wait for interface to become ready
- $BIN_DIR/wait_for_interface $IFACE
-
- # Start network capture
- $BIN_DIR/capture $MODULE_NAME $IFACE
-fi
-
-# Start the grpc server
-if [[ ! -z $GRPC && ! $GRPC == "null" ]]
-then
- GRPC_PORT=$(echo "$GRPC" | jq -r '.port')
- if [[ ! -z $GRPC_PORT && ! $GRPC_PORT == "null" ]]
- then
- echo "gRPC port resolved from config: $GRPC_PORT"
- $BIN_DIR/start_grpc "-p $GRPC_PORT"
- else
- $BIN_DIR/start_grpc
- fi
-fi
-
-# Small pause to let all core services stabalize
-sleep 3
-
-# Start the test module
+#!/bin/bash
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+echo "Starting module $MODULE_NAME..."
+
+# Only start network services if the test container needs
+# a network connection to run its tests
+if [ $NETWORK_REQUIRED == "true" ];then
+ # Wait for interface to become ready
+ $BIN_DIR/wait_for_interface $IFACE
+
+ # Start network capture
+ $BIN_DIR/capture $MODULE_NAME $IFACE
+fi
+
+# Start the grpc server
+if [[ ! -z $GRPC && ! $GRPC == "null" ]]
+then
+ GRPC_PORT=$(echo "$GRPC" | jq -r '.port')
+ if [[ ! -z $GRPC_PORT && ! $GRPC_PORT == "null" ]]
+ then
+ echo "gRPC port resolved from config: $GRPC_PORT"
+ $BIN_DIR/start_grpc "-p $GRPC_PORT"
+ else
+ $BIN_DIR/start_grpc
+ fi
+fi
+
+# Small pause to let all core services stabalize
+sleep 3
+
+# Start the test module
$BIN_DIR/start_test_module $MODULE_NAME $IFACE
\ No newline at end of file
diff --git a/modules/test/base/python/requirements.txt b/modules/test/base/python/requirements.txt
index 9d9473d74..0ed8a792d 100644
--- a/modules/test/base/python/requirements.txt
+++ b/modules/test/base/python/requirements.txt
@@ -1,3 +1,9 @@
-grpcio
-grpcio-tools
-netifaces
\ No newline at end of file
+# Dependencies to user defined packages
+# Package dependencies should always be defined before the user defined
+# packages to prevent auto-upgrades of stable dependencies
+protobuf==5.28.0
+
+# User defined packages
+grpcio==1.67.1
+grpcio-tools==1.67.1
+netifaces==0.11.0
\ No newline at end of file
diff --git a/modules/test/base/python/src/grpc/proto/host/client.py b/modules/test/base/python/src/grpc/proto/host/client.py
new file mode 100644
index 000000000..e08d3376a
--- /dev/null
+++ b/modules/test/base/python/src/grpc/proto/host/client.py
@@ -0,0 +1,63 @@
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+"""gRPC client module for the secondary DHCP Server"""
+import grpc
+import host.grpc_pb2_grpc as pb2_grpc
+import host.grpc_pb2 as pb2
+
+DEFAULT_PORT = '5001'
+DEFAULT_HOST = 'external.localhost' # Default DHCP2 server
+
+
+class Client():
+ """gRPC Client for the secondary DHCP server"""
+ def __init__(self, port=DEFAULT_PORT, host=DEFAULT_HOST):
+ self._port = port
+ self._host = host
+
+ # Create a gRPC channel to connect to the server
+ self._channel = grpc.insecure_channel(self._host + ':' + self._port)
+
+ # Create a gRPC stub
+ self._stub = pb2_grpc.HostNetworkModuleStub(self._channel)
+
+ def check_interface_status(self, iface_name):
+ # Create a request message
+ request = pb2.CheckInterfaceStatusRequest()
+ request.iface_name = iface_name
+
+ # Make the RPC call
+ response = self._stub.CheckInterfaceStatus(request)
+
+ return response
+
+ def set_iface_down(self, iface_name):
+ # Create a request message
+ request = pb2.SetIfaceRequest()
+ request.iface_name = iface_name
+
+ # Make the RPC call
+ response = self._stub.SetIfaceDown(request)
+
+ return response
+
+ def set_iface_up(self, iface_name):
+ # Create a request message
+ request = pb2.SetIfaceRequest()
+ request.iface_name = iface_name
+
+ # Make the RPC call
+ response = self._stub.SetIfaceUp(request)
+
+ return response
diff --git a/modules/test/base/python/src/logger.py b/modules/test/base/python/src/logger.py
deleted file mode 100644
index e6a2b004c..000000000
--- a/modules/test/base/python/src/logger.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Sets up the logger to be used for the test modules."""
-import json
-import logging
-
-LOGGERS = {}
-_LOG_FORMAT = '%(asctime)s %(name)-8s %(levelname)-7s %(message)s'
-_DATE_FORMAT = '%b %02d %H:%M:%S'
-_DEFAULT_LEVEL = logging.INFO
-_CONF_FILE_NAME = 'testrun/system.json'
-_LOG_DIR = '/runtime/output/'
-
-# Set log level
-try:
- with open(_CONF_FILE_NAME,
- encoding='UTF-8') as config_json_file:
- system_conf_json = json.load(config_json_file)
-
- log_level_str = system_conf_json['log_level']
- log_level = logging.getLevelName(log_level_str)
-except OSError:
- # TODO: Print out warning that log level is incorrect or missing
- log_level = _DEFAULT_LEVEL
-
-log_format = logging.Formatter(fmt=_LOG_FORMAT, datefmt=_DATE_FORMAT)
-
-
-def add_file_handler(log, log_file, log_dir=_LOG_DIR):
- handler = logging.FileHandler(log_dir + log_file + '.log')
- handler.setFormatter(log_format)
- log.addHandler(handler)
-
-
-def add_stream_handler(log):
- handler = logging.StreamHandler()
- handler.setFormatter(log_format)
- log.addHandler(handler)
-
-
-def get_logger(name, log_file=None, log_dir=_LOG_DIR):
- if name not in LOGGERS:
- LOGGERS[name] = logging.getLogger(name)
- LOGGERS[name].setLevel(log_level)
- add_stream_handler(LOGGERS[name])
- if log_file is not None:
- log_dir = log_dir if log_dir is not None else _LOG_DIR
- add_file_handler(LOGGERS[name], log_file, log_dir=log_dir)
- return LOGGERS[name]
diff --git a/modules/test/base/python/src/test_module.py b/modules/test/base/python/src/test_module.py
index 00f74df82..21de78143 100644
--- a/modules/test/base/python/src/test_module.py
+++ b/modules/test/base/python/src/test_module.py
@@ -17,6 +17,9 @@
import os
import util
from datetime import datetime
+import traceback
+
+from common.statuses import TestResult
LOGGER = None
RESULTS_DIR = '/runtime/output/'
@@ -29,7 +32,6 @@ class TestModule:
def __init__(self,
module_name,
log_name,
- log_dir=None,
conf_file=CONF_FILE,
results_dir=RESULTS_DIR):
self._module_name = module_name
@@ -38,19 +40,17 @@ def __init__(self,
self._ipv4_addr = os.environ.get('IPV4_ADDR', '')
self._ipv4_subnet = os.environ.get('IPV4_SUBNET', '')
self._ipv6_subnet = os.environ.get('IPV6_SUBNET', '')
- self._add_logger(log_name=log_name,
- module_name=module_name,
- log_dir=log_dir)
+ self._dev_iface_mac = os.environ.get('DEV_IFACE_MAC', '')
+ self._device_test_pack = json.loads(os.environ.get('DEVICE_TEST_PACK', ''))
+ self._add_logger(log_name=log_name)
self._config = self._read_config(
conf_file=conf_file if conf_file is not None else CONF_FILE)
self._device_ipv4_addr = None
self._device_ipv6_addr = None
- def _add_logger(self, log_name, module_name, log_dir=None):
+ def _add_logger(self, log_name):
global LOGGER
- LOGGER = logger.get_logger(name=log_name,
- log_file=module_name,
- log_dir=log_dir) # pylint: disable=E1123
+ LOGGER = logger.get_logger(name=log_name)
def generate_module_report(self):
pass
@@ -64,22 +64,48 @@ def _get_tests(self):
def _get_device_tests(self, device_test_module):
module_tests = self._config['config']['tests']
- if device_test_module is None:
- return module_tests
- elif not device_test_module['enabled']:
- return []
- else:
- for test in module_tests:
- # Resolve device specific configurations for the test if it exists
- # and update module test config with device config options
- if 'tests' in device_test_module:
- if test['name'] in device_test_module['tests']:
- dev_test_config = device_test_module['tests'][test['name']]
- if 'enabled' in dev_test_config:
- test['enabled'] = dev_test_config['enabled']
- if 'config' in test and 'config' in dev_test_config:
- test['config'].update(dev_test_config['config'])
- return module_tests
+ tests_to_run = module_tests
+
+ # If no device specific tests have been provided, add all
+ if device_test_module is not None:
+ # Do not run any tests if module is disabled for this device
+ if not device_test_module['enabled']:
+ return []
+
+ # Tests that will be removed because they are not in the test pack
+ remove_tests = []
+
+ # Check if all tests are in the test pack and enabled for the device
+ for test in tests_to_run:
+
+ # Resolve device specific configurations for the test if it exists
+ # and update module test config with device config options
+ if 'tests' in device_test_module:
+
+ if test['name'] in device_test_module['tests']:
+ dev_test_config = device_test_module['tests'][test['name']]
+
+ # Check if the test is enabled in the device config
+ if 'enabled' in dev_test_config:
+ test['enabled'] = dev_test_config['enabled']
+
+ # Copy over any device specific test configuration
+ if 'config' in test and 'config' in dev_test_config:
+ test['config'].update(dev_test_config['config'])
+
+ # Search for the module test in the test pack
+ found = False
+ for test_pack_test in self._device_test_pack['tests']:
+ if test_pack_test['name'] == test['name']:
+ # Test is in the test pack
+ found = True
+
+ if not found:
+ remove_tests.append(test)
+ for test in remove_tests:
+ tests_to_run.remove(test)
+
+ return tests_to_run
def _get_device_test_module(self):
if 'DEVICE_TEST_MODULES' in os.environ:
@@ -113,21 +139,27 @@ def run_tests(self):
except Exception as e: # pylint: disable=W0718
LOGGER.error(f'An error occurred whilst running {test["name"]}')
LOGGER.error(e)
+ traceback.print_exc()
else:
- LOGGER.info(f'Test {test["name"]} not implemented. Skipping')
+ LOGGER.error(f'Test {test["name"]} has not been implemented')
+ result = TestResult.ERROR, 'This test could not be found'
else:
LOGGER.debug(f'Test {test["name"]} is disabled')
+ result = (TestResult.DISABLED,
+ 'This test did not run because it is disabled')
+ # Check if the test module has returned a result
if result is not None:
+
# Compliant or non-compliant as a boolean only
if isinstance(result, bool):
- test['result'] = 'Compliant' if result else 'Non-Compliant'
+ test['result'] = (TestResult.COMPLIANT
+ if result else TestResult.NON_COMPLIANT)
test['description'] = 'No description was provided for this test'
else:
- # TODO: This is assuming that result is an array but haven't checked
# Error result
if result[0] is None:
- test['result'] = 'Error'
+ test['result'] = TestResult.ERROR
if len(result) > 1:
test['description'] = result[1]
else:
@@ -135,13 +167,14 @@ def run_tests(self):
# Compliant / Non-Compliant result
elif isinstance(result[0], bool):
- test['result'] = 'Compliant' if result[0] else 'Non-Compliant'
+ test['result'] = (TestResult.COMPLIANT
+ if result[0] else TestResult.NON_COMPLIANT)
# Result may be a string, e.g Error, Feature Not Detected
elif isinstance(result[0], str):
test['result'] = result[0]
else:
LOGGER.error(f'Unknown result detected: {result[0]}')
- test['result'] = 'Error'
+ test['result'] = TestResult.ERROR
# Check that description is a string
if isinstance(result[1], str):
@@ -152,12 +185,17 @@ def run_tests(self):
# Check if details were provided
if len(result)>2:
test['details'] = result[2]
+
+ # Check if tags were provided
+ if len(result)>3:
+ test['tags'] = result[3]
else:
- test['result'] = 'Error'
+ LOGGER.debug('No result was returned from the test module')
+ test['result'] = TestResult.ERROR
test['description'] = 'An error occured whilst running this test'
# Remove the steps to resolve if compliant already
- if (test['result'] == 'Compliant' and 'recommendations' in test):
+ if (test['result'] == TestResult.COMPLIANT and 'recommendations' in test):
test.pop('recommendations')
test['end'] = datetime.now().isoformat()
@@ -182,7 +220,7 @@ def _write_results(self, results):
def _get_device_ipv4(self):
command = f"""/testrun/bin/get_ipv4_addr {self._ipv4_subnet}
{self._device_mac.upper()}"""
- text = util.run_command(command)[0]
+ text = util.run_command(command)[0] # pylint: disable=E1120
if text:
return text.split('\n')[0]
return None
diff --git a/modules/test/base/python/src/util.py b/modules/test/base/python/src/util.py
deleted file mode 100644
index 006648037..000000000
--- a/modules/test/base/python/src/util.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Provides basic utilities for a test module."""
-import subprocess
-import shlex
-import logger
-
-LOGGER = logger.get_logger('util')
-
-def run_command(cmd, output=True):
- """Runs a process at the os level
- By default, returns the standard output and error output
- If the caller sets optional output parameter to False,
- will only return a boolean result indicating if it was
- successful in running the command. Failure is indicated
- by any return code from the process other than zero."""
-
- success = False
- with subprocess.Popen(shlex.split(cmd),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE) as process:
- stdout, stderr = process.communicate()
- if process.returncode != 0 and output:
- err_msg = f'{stderr.strip()}. Code: {process.returncode}'
- LOGGER.error('Command Failed: ' + cmd)
- LOGGER.error('Error: ' + err_msg)
- else:
- success = True
- LOGGER.debug('Command succeeded: ' + cmd)
- if output:
- out = stdout.strip().decode('utf-8')
- LOGGER.debug('Command output: ' + out)
- return out, stderr
- else:
- return success
diff --git a/modules/test/baseline/baseline.Dockerfile b/modules/test/baseline/baseline.Dockerfile
index f7d21f8c8..7a83c8de2 100644
--- a/modules/test/baseline/baseline.Dockerfile
+++ b/modules/test/baseline/baseline.Dockerfile
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Image name: test-run/baseline-test
-FROM test-run/base-test:latest
+# Image name: testrun/baseline-test
+FROM testrun/base-test:latest
ARG MODULE_NAME=baseline
ARG MODULE_DIR=modules/test/$MODULE_NAME
diff --git a/modules/test/baseline/bin/start_test_module b/modules/test/baseline/bin/start_test_module
index a529c2fcf..c3209261a 100644
--- a/modules/test/baseline/bin/start_test_module
+++ b/modules/test/baseline/bin/start_test_module
@@ -41,11 +41,8 @@ else
fi
# Create and set permissions on the log files
-LOG_FILE=/runtime/output/$MODULE_NAME.log
RESULT_FILE=/runtime/output/$MODULE_NAME-result.json
-touch $LOG_FILE
touch $RESULT_FILE
-chown $HOST_USER $LOG_FILE
chown $HOST_USER $RESULT_FILE
# Run the python scrip that will execute the tests for this module
diff --git a/modules/test/baseline/conf/module_config.json b/modules/test/baseline/conf/module_config.json
index f28f74a06..d120f1c71 100644
--- a/modules/test/baseline/conf/module_config.json
+++ b/modules/test/baseline/conf/module_config.json
@@ -16,20 +16,17 @@
{
"name": "baseline.compliant",
"test_description": "Simulate a compliant test",
- "expected_behavior": "A compliant test result is generated",
- "required_result": "Required"
+ "expected_behavior": "A compliant test result is generated"
},
{
"name": "baseline.non_compliant",
"test_description": "Simulate a non-compliant test",
- "expected_behavior": "A non-compliant test result is generated",
- "required_result": "Recommended"
+ "expected_behavior": "A non-compliant test result is generated"
},
{
"name": "baseline.skipped",
"test_description": "Simulate a skipped test",
- "expected_behavior": "A skipped test result is generated",
- "required_result": "Skipped"
+ "expected_behavior": "A skipped test result is generated"
}
]
}
diff --git a/modules/test/conn/README.md b/modules/test/conn/README.md
index c2f6377c6..10c7e6917 100644
--- a/modules/test/conn/README.md
+++ b/modules/test/conn/README.md
@@ -17,11 +17,15 @@ Within the ```python/src``` directory, the below tests are executed. A few dhcp
| connection.port_link | The network switch port connected to the device has an active link without errors | When the etherent cable is connected to the port, the device triggers the port to its enabled \"Link UP\" (LEDs illuminate on device and switch ports if present) state, and the switch shows no errors with the LEDs and when interrogated with a \"show interface\" command on most network switches. | Required |
| connection.port_speed | The network switch port connected to the device has auto-negotiated a speed that is 10 Mbps or higher | When the ethernet cable is connected to the port, the device autonegotiates a speed that can be checked with the \"show interface\" command on most network switches. The output of this command must also show that the \"configured speed\" is set to \"auto\". | Required |
| connection.port_duplex | The network switch port connected to the device has auto-negotiated full-duplex. | When the ethernet cable is connected to the port, the device autonegotiates a full-duplex connection. | Required |
+| connection.switch.arp_inspection | The device implements ARP correctly as per RFC826 | Device continues to operate correctly when ARP inspection is enabled on the switch. No functionality is lost with ARP inspection enabled. | Required |
+| connection.switch.dhcp_snooping | The device operates as a DHCP client and operates correctly when DHCP snooping is enabled on a switch. | Device continues to operate correctly when DHCP snooping is enabled on the switch. | Required |
| connection.dhcp_address | The device under test has received an IP address from the DHCP server and responds to an ICMP echo (ping) request | The device is not set up with a static IP address. The device accepts an IP address from a DHCP server (RFC 2131) and responds successfully to an ICMP echo (ping) request. | Required |
| connection.mac_address | Check and note device physical address. | N/A | Required |
| connection.mac_oui | The device under test has a MAC address prefix that is registered against a known manufacturer. | The MAC address prefix is registered in the IEEE Organizationally Unique Identifier database. | Required |
| connection.private_address | The device under test accepts an IP address that is compliant with RFC 1918 Address Allocation for Private Internets. | The device under test accepts IP addresses within all ranges specified in RFC 1918 and communicates using these addresses. The Internet Assigned Numbers Authority (IANA) has reserved the following three blocks of the IP address space for private internets: 10.0.0.0 - 10.255.255.255 (10/8 prefix), 172.16.0.0 - 172.31.255.255 (172.16/12 prefix), 192.168.0.0 - 192.168.255.255 (192.168/16 prefix). | Required |
| connection.shared_address | Ensure the device supports RFC 6598 IANA-Reserved IPv4 Prefix for Shared Address Space | The device under test accepts IP addresses within the range specified in RFC 6598 and communicates using these addresses. | Required |
+| connection.dhcp_disconnect | The device under test issues a new DHCPREQUEST packet after a port ph ysical disconnection and reconnection. | A client SHOULD use DHCP to reacquire or verify its IP address and network parameters whenever the local network parameters may have changed; e.g., at system boot time or after a disconnection from the local network, as the local network configuration may change without the client's or user's knowledge. If a client has knowledge ofa previous network address and is unable to contact a local DHCP server, the client may continue to use the previous network addres until the lease for that address expires. If the lease expires before the client can contact a DHCP server, the client must immediately discontinue use of the previous network address and may inform local users of the problem. | Required |
+| connection.dhcp_disconnect_ip_change | When device is disconnected, update device IP on the DHCP server and reconnect the device. Ensure device received new IP address. | If IP address for a device was changed on the DHCP server while the device was disconnected then the device should request and update the new IP upon reconnecting to the network | Required |
| connection.single_ip | The network switch port connected to the device reports only one IP address for the device under test. | The device under test does not behave as a network switch and only requests one IP address. This test is to avoid that devices implement network switches that allow connecting strings of daisy-chained devices to one single network port, as this would not make 802.1x port-based authentication possible. | Required |
| connection.target_ping | The device under test responds to an ICMP echo (ping) request. | The device under test responds to an ICMP echo (ping) request. | Required |
| connection.ipaddr.ip_change | The device responds to a ping (ICMP echo request) to the new IP address it has received after the initial DHCP lease has expired. | If the lease expires before the client receives a DHCPACK, the client moves to the INIT state, MUST immediately stop any other network processing, and requires network initialization parameters as if the client were uninitialized. If the client then receives a DHCPACK allocating the client its previous network address, the client SHOULD continue network processing. If the client is given a new network address, it MUST NOT continue using the previous network address and SHOULD notify the local users of the problem. | Required |
diff --git a/modules/test/conn/bin/get_packet_counts.sh b/modules/test/conn/bin/get_packet_counts.sh
new file mode 100644
index 000000000..fab933a08
--- /dev/null
+++ b/modules/test/conn/bin/get_packet_counts.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+# Check if MAC address and pcap file arguments are provided
+if [ -z "$1" ] || [ -z "$2" ]; then
+ echo "Usage: $0 "
+ exit 1
+fi
+
+# Assign MAC address and pcap file from arguments
+PCAP_FILE="$1"
+MAC_ADDRESS="$2"
+
+# Check if the pcap file exists
+if [ ! -f "$PCAP_FILE" ]; then
+ echo "Error: File $PCAP_FILE does not exist."
+ exit 1
+fi
+
+# Count multicast packets from the MAC address
+multicast_from_count=$(tshark -r "$PCAP_FILE" -Y "(eth.dst[0] == 1) && eth.src == $MAC_ADDRESS" -T fields -e frame.number | wc -l)
+
+# Count multicast packets to the MAC address
+multicast_to_count=$(tshark -r "$PCAP_FILE" -Y "(eth.dst[0] == 1) && eth.dst == $MAC_ADDRESS" -T fields -e frame.number | wc -l)
+
+# Count broadcast packets from the MAC address (broadcast MAC address is FF:FF:FF:FF:FF:FF)
+broadcast_from_count=$(tshark -r "$PCAP_FILE" -Y "eth.dst == ff:ff:ff:ff:ff:ff && eth.src == $MAC_ADDRESS" -T fields -e frame.number | wc -l)
+
+# Count broadcast packets to the MAC address
+broadcast_to_count=$(tshark -r "$PCAP_FILE" -Y "eth.dst == ff:ff:ff:ff:ff:ff && eth.dst == $MAC_ADDRESS" -T fields -e frame.number | wc -l)
+
+# Count unicast packets from the MAC address
+unicast_from_count=$(tshark -r "$PCAP_FILE" -Y "eth.dst != ff:ff:ff:ff:ff:ff && (eth.dst[0] & 1) == 0 && eth.src == $MAC_ADDRESS" -T fields -e frame.number | wc -l)
+
+# Count unicast packets to the MAC address
+unicast_to_count=$(tshark -r "$PCAP_FILE" -Y "eth.dst != ff:ff:ff:ff:ff:ff && (eth.dst[0] & 1) == 0 && eth.dst == $MAC_ADDRESS" -T fields -e frame.number | wc -l)
+
+# Output the results as a JSON object
+echo "{
+ \"mac_address\": \"$MAC_ADDRESS\",
+ \"multicast\": {
+ \"from\": $multicast_from_count,
+ \"to\": $multicast_to_count
+ },
+ \"broadcast\": {
+ \"from\": $broadcast_from_count,
+ \"to\": $broadcast_to_count
+ },
+ \"unicast\": {
+ \"from\": $unicast_from_count,
+ \"to\": $unicast_to_count
+ }
+}"
diff --git a/modules/test/conn/bin/start_test_module b/modules/test/conn/bin/start_test_module
index d85ae7d6b..4cdba0ae9 100644
--- a/modules/test/conn/bin/start_test_module
+++ b/modules/test/conn/bin/start_test_module
@@ -40,9 +40,7 @@ fi
# Create and set permissions on the log files
LOG_FILE=/runtime/output/$MODULE_NAME.log
RESULT_FILE=/runtime/output/$MODULE_NAME-result.json
-touch $LOG_FILE
touch $RESULT_FILE
-chown $HOST_USER $LOG_FILE
chown $HOST_USER $RESULT_FILE
# Run the python script that will execute the tests for this module
diff --git a/modules/test/conn/conf/module_config.json b/modules/test/conn/conf/module_config.json
index 5289e7eb0..03ae51d89 100644
--- a/modules/test/conn/conf/module_config.json
+++ b/modules/test/conn/conf/module_config.json
@@ -16,48 +16,42 @@
{
"name": "connection.port_link",
"test_description": "The network switch port connected to the device has an active link without errors",
- "expected_behavior": "When the etherent cable is connected to the port, the device triggers the port to its enabled \"Link UP\" (LEDs illuminate on device and switch ports if present) state, and the switch shows no errors with the LEDs and when interrogated with a \"show interface\" command on most network switches.",
- "required_result": "Required"
+ "expected_behavior": "When the ethernet cable is connected to the port, the device triggers the port to its enabled \"Link UP\" (LEDs illuminate on device and switch ports if present) state, and the switch shows no errors with the LEDs and when interrogated with a \"show interface\" command on most network switches."
},
{
"name": "connection.port_speed",
"test_description": "The network switch port connected to the device has auto-negotiated a speed that is 10 Mbps or higher",
- "expected_behavior": "When the ethernet cable is connected to the port, the device autonegotiates a speed that can be checked with the \"show interface\" command on most network switches. The output of this command must also show that the \"configured speed\" is set to \"auto\".",
- "required_result": "Required"
+ "expected_behavior": "When the ethernet cable is connected to the port, the device autonegotiates a speed that can be checked with the \"show interface\" command on most network switches. The output of this command must also show that the \"configured speed\" is set to \"auto\"."
},
{
"name": "connection.port_duplex",
"test_description": "The network switch port connected to the device has auto-negotiated full-duplex",
- "expected_behavior": "When the ethernet cable is connected to the port, the device autonegotiates a full-duplex connection.",
- "required_result": "Required"
+ "expected_behavior": "When the ethernet cable is connected to the port, the device autonegotiates a full-duplex connection."
},
{
"name": "connection.switch.arp_inspection",
"test_description": "The device implements ARP correctly as per RFC826",
- "expected_behavior": "Device continues to operate correctly when ARP inspection is enabled on the switch. No functionality is lost with ARP inspection enabled.",
- "required_result": "Required"
+ "expected_behavior": "Device continues to operate correctly when ARP inspection is enabled on the switch. No functionality is lost with ARP inspection enabled."
},
{
"name": "connection.switch.dhcp_snooping",
"test_description": "The device operates as a DHCP client and operates correctly when DHCP snooping is enabled on a switch.",
- "expected_behavior": "Device continues to operate correctly when DHCP snooping is enabled on the switch.",
- "required_result": "Required"
+ "expected_behavior": "Device continues to operate correctly when DHCP snooping is enabled on the switch."
},
{
"name": "connection.dhcp_address",
"test_description": "The device under test has received an IP address from the DHCP server and responds to an ICMP echo (ping) request",
- "expected_behavior": "The device is not setup with a static IP address. The device accepts an IP address from a DHCP server (RFC 2131) and responds succesfully to an ICMP echo (ping) request.",
- "required_result": "Required",
+ "expected_behavior": "The device is not setup with a static IP address. The device accepts an IP address from a DHCP server (RFC 2131) and responds successfully to an ICMP echo (ping) request.",
"recommendations": [
"Enable DHCP",
- "Install a DHCP client"
+ "Install a DHCP client",
+ "Ensure that your DHCP client renews its lease at the correct time"
]
},
{
"name": "connection.mac_address",
"test_description": "Check and note device physical address.",
"expected_behavior": "N/A",
- "required_result": "Required",
"recommendations": [
"Ensure that the MAC address is set by hardware only"
]
@@ -66,7 +60,6 @@
"name": "connection.mac_oui",
"test_description": "The device under test has a MAC address prefix that is registered against a known manufacturer.",
"expected_behavior": "The MAC address prefix is registered in the IEEE Organizationally Unique Identifier database.",
- "required_result": "Required",
"recommendations": [
"Register the device MAC address with IEEE"
]
@@ -75,7 +68,6 @@
"name": "connection.private_address",
"test_description": "The device under test accepts an IP address that is compliant with RFC 1918 Address Allocation for Private Internets.",
"expected_behavior": "The device under test accepts IP addresses within all ranges specified in RFC 1918 and communicates using these addresses. The Internet Assigned Numbers Authority (IANA) has reserved the following three blocks of the IP address space for private internets. 10.0.0.0 - 10.255.255.255.255 (10/8 prefix). 172.16.0.0 - 172.31.255.255 (172.16/12 prefix). 192.168.0.0 - 192.168.255.255 (192.168/16 prefix)",
- "required_result": "Required",
"config": {
"lease_wait_time_sec": 60,
"ranges": [
@@ -101,7 +93,6 @@
"name": "connection.shared_address",
"test_description": "Ensure the device supports RFC 6598 IANA-Reserved IPv4 Prefix for Shared Address Space",
"expected_behavior": "The device under test accepts IP addresses within the ranges specified in RFC 6598 and communicates using these addresses",
- "required_result": "Required",
"config": {
"lease_wait_time_sec": 60,
"ranges": [
@@ -116,11 +107,20 @@
"Enable shared address space support in the DHCP client"
]
},
+ {
+ "name": "connection.dhcp_disconnect",
+ "test_description": "The device under test issues a new DHCPREQUEST packet after a port physical disconnection and reconnection",
+ "expected_behavior": "A client SHOULD use DHCP to reacquire or verify its IP address and network parameters whenever the local network parameters may have changed; e.g., at system boot time or after a disconnection from the local network, as the local network configuration may change without the client's or user's knowledge. If a client has knowledge ofa previous network address and is unable to contact a local DHCP server, the client may continue to use the previous network address until the lease for that address expires. If the lease expires before the client can contact a DHCP server, the client must immediately discontinue use of the previous network address and may inform local users of the problem."
+ },
+ {
+ "name": "connection.dhcp_disconnect_ip_change",
+ "test_description": "When device is disconnected, update device IP on the DHCP server and reconnect the device. Ensure device received new IP address",
+ "expected_behavior": "If IP address for a device was changed on the DHCP server while the device was disconnected then the device should request and update the new IP upon reconnecting to the network"
+ },
{
"name": "connection.single_ip",
"test_description": "The network switch port connected to the device reports only one IP address for the device under test.",
- "expected_behavior": "The device under test does not behave as a network switch and only requets one IP address. This test is to avoid that devices implement network switches that allow connecting strings of daisy chained devices to one single network port, as this would not make 802.1x port based authentication possible.",
- "required_result": "Required",
+ "expected_behavior": "The device under test does not behave as a network switch and only requests one IP address. This test is to avoid that devices implement network switches that allow connecting strings of daisy chained devices to one single network port, as this would not make 802.1x port based authentication possible.",
"recommendations": [
"Ensure that all ports on the device are isolated",
"Ensure only one DHCP client is running"
@@ -130,7 +130,6 @@
"name": "connection.target_ping",
"test_description": "The device under test responds to an ICMP echo (ping) request.",
"expected_behavior": "The device under test responds to an ICMP echo (ping) request.",
- "required_result": "Required",
"recommendations": [
"Configure device to allow ICMP requests (ping)",
"Create a firewall exception to allow ICMP requests from LAN"
@@ -139,8 +138,7 @@
{
"name": "connection.ipaddr.ip_change",
"test_description": "The device responds to a ping (ICMP echo request) to the new IP address it has received after the initial DHCP lease has expired.",
- "expected_behavior": "If the lease expires before the client receiveds a DHCPACK, the client moves to INIT state, MUST immediately stop any other network processing and requires network initialization parameters as if the client were uninitialized. If the client then receives a DHCPACK allocating the client its previous network addres, the client SHOULD continue network processing. If the client is given a new network address, it MUST NOT continue using the previous network address and SHOULD notify the local users of the problem.",
- "required_result": "Required",
+ "expected_behavior": "If the lease expires before the client receives a DHCPACK, the client moves to INIT state, MUST immediately stop any other network processing and requires network initialization parameters as if the client were uninitialized. If the client then receives a DHCPACK allocating the client its previous network address, the client SHOULD continue network processing. If the client is given a new network address, it MUST NOT continue using the previous network address and SHOULD notify the local users of the problem.",
"config":{
"lease_wait_time_sec": 60
},
@@ -152,7 +150,6 @@
"name": "connection.ipaddr.dhcp_failover",
"test_description": "The device has requested a DHCPREQUEST/REBIND to the DHCP failover server after the primary DHCP server has been brought down.",
"expected_behavior": "",
- "required_result": "Required",
"config":{
"lease_wait_time_sec": 60
},
@@ -164,7 +161,6 @@
"name": "connection.ipv6_slaac",
"test_description": "The device forms a valid IPv6 address as a combination of the IPv6 router prefix and the device interface identifier",
"expected_behavior": "The device under test complies with RFC4862 and forms a valid IPv6 SLAAC address",
- "required_result": "Required",
"recommendations": [
"Install a network manager that supports IPv6",
"Disable DHCPv6"
@@ -174,11 +170,15 @@
"name": "connection.ipv6_ping",
"test_description": "The device responds to an IPv6 ping (ICMPv6 Echo) request to the SLAAC address",
"expected_behavior": "The device responds to the ping as per RFC4443",
- "required_result": "Required",
"recommendations": [
"Enable ping response to IPv6 ICMP requests in network manager settings",
"Create a firewall exception to allow ICMPv6 via LAN"
]
+ },
+ {
+ "name": "communication.network.type",
+ "test_description": "How does the device communicate (flow type) - Unicast, multicast broadcast?",
+ "expected_behavior": "Informational - One or more of these flow types are used"
}
]
}
diff --git a/modules/test/conn/conn.Dockerfile b/modules/test/conn/conn.Dockerfile
index a9f523e44..cda0858c9 100644
--- a/modules/test/conn/conn.Dockerfile
+++ b/modules/test/conn/conn.Dockerfile
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Image name: test-run/conn-test
-FROM test-run/base-test:latest
+# Image name: testrun/conn-test
+FROM testrun/base-test:latest
ARG MODULE_NAME=conn
ARG MODULE_DIR=modules/test/$MODULE_NAME
@@ -21,13 +21,13 @@ ARG GRPC_PROTO_DIR=/testrun/python/src/grpc/proto/dhcp
ARG GRPC_PROTO_FILE="grpc.proto"
# Install all necessary packages
-RUN apt-get install -y wget
+RUN apt-get install -y wget tshark
# Load the requirements file
COPY $MODULE_DIR/python/requirements.txt /testrun/python
# Install all python requirements for the module
-RUN pip3 install -r /testrun/python/requirements.txt
+RUN pip install -r /testrun/python/requirements.txt
# Copy over all configuration files
COPY $MODULE_DIR/conf /testrun/conf
@@ -35,5 +35,11 @@ COPY $MODULE_DIR/conf /testrun/conf
# Copy over all binary files
COPY $MODULE_DIR/bin /testrun/bin
+# Remove incorrect line endings
+RUN dos2unix /testrun/bin/*
+
+# Make sure all the bin files are executable
+RUN chmod u+x /testrun/bin/*
+
# Copy over all python files
COPY $MODULE_DIR/python /testrun/python
\ No newline at end of file
diff --git a/modules/test/conn/python/requirements.txt b/modules/test/conn/python/requirements.txt
index 7244e9e75..50d56eb37 100644
--- a/modules/test/conn/python/requirements.txt
+++ b/modules/test/conn/python/requirements.txt
@@ -1,3 +1,12 @@
-pyOpenSSL
-scapy
-python-dateutil
\ No newline at end of file
+# Dependencies to user defined packages
+# Package dependencies should always be defined before the user defined
+# packages to prevent auto-upgrades of stable dependencies
+cffi==1.17.1
+cryptography==43.0.1
+pycparser==2.22
+six==1.16.0
+
+# User defined packages
+pyOpenSSL==24.2.1
+scapy==2.6.0
+python-dateutil==2.9.0.post0
diff --git a/modules/test/conn/python/src/connection_module.py b/modules/test/conn/python/src/connection_module.py
index 5e8b78ec3..cfdbad89b 100644
--- a/modules/test/conn/python/src/connection_module.py
+++ b/modules/test/conn/python/src/connection_module.py
@@ -15,15 +15,20 @@
import util
import time
import traceback
-from scapy.all import rdpcap, DHCP, ARP, Ether, IPv6, ICMPv6ND_NS
+import os
+from scapy.error import Scapy_Exception
+from scapy.all import rdpcap, DHCP, ARP, Ether, ICMP, IPv6, ICMPv6ND_NS
from test_module import TestModule
from dhcp1.client import Client as DHCPClient1
from dhcp2.client import Client as DHCPClient2
+from host.client import Client as HostClient
from dhcp_util import DHCPUtil
from port_stats_util import PortStatsUtil
+import json
LOG_NAME = 'test_connection'
OUI_FILE = '/usr/local/etc/oui.txt'
+DEFAULT_BIN_DIR = '/testrun/bin'
STARTUP_CAPTURE_FILE = '/runtime/device/startup.pcap'
MONITOR_CAPTURE_FILE = '/runtime/device/monitor.pcap'
DHCP_CAPTURE_FILE = '/runtime/network/dhcp-1.pcap'
@@ -39,19 +44,29 @@
class ConnectionModule(TestModule):
"""Connection Test module"""
- def __init__(self, module, log_dir=None, conf_file=None, results_dir=None):
+ def __init__(self,
+ module,
+ conf_file=None,
+ results_dir=None,
+ startup_capture_file=STARTUP_CAPTURE_FILE,
+ monitor_capture_file=MONITOR_CAPTURE_FILE,
+ bin_dir=DEFAULT_BIN_DIR):
+
super().__init__(module_name=module,
log_name=LOG_NAME,
- log_dir=log_dir,
conf_file=conf_file,
results_dir=results_dir)
global LOGGER
LOGGER = self._get_logger()
+ self.startup_capture_file = startup_capture_file
+ self.monitor_capture_file = monitor_capture_file
self._port_stats = PortStatsUtil(logger=LOGGER)
self.dhcp1_client = DHCPClient1()
self.dhcp2_client = DHCPClient2()
+ self.host_client = HostClient()
self._dhcp_util = DHCPUtil(self.dhcp1_client, self.dhcp2_client, LOGGER)
self._lease_wait_time_sec = LEASE_WAIT_TIME_DEFAULT
+ self._bin_dir = bin_dir
# ToDo: Move this into some level of testing, leave for
# reference until tests are implemented with these calls
@@ -106,7 +121,8 @@ def _connection_switch_arp_inspection(self):
no_arp = True
# Read all the pcap files
- packets = rdpcap(STARTUP_CAPTURE_FILE) + rdpcap(MONITOR_CAPTURE_FILE)
+ packets = rdpcap(self.startup_capture_file) + rdpcap(
+ self.monitor_capture_file)
for packet in packets:
# We are not interested in packets unless they are ARP packets
@@ -123,12 +139,8 @@ def _connection_switch_arp_inspection(self):
# Check MAC address matches IP address
if (arp_packet.hwsrc == self._device_mac
- and (arp_packet.psrc not in (
- self._device_ipv4_addr,
- '0.0.0.0'
- )) and not arp_packet.psrc.startswith(
- '169.254'
- )):
+ and (arp_packet.psrc not in (self._device_ipv4_addr, '0.0.0.0'))
+ and not arp_packet.psrc.startswith('169.254')):
LOGGER.info(f'Bad ARP packet detected for MAC: {self._device_mac}')
LOGGER.info(f'''ARP packet from IP {arp_packet.psrc}
does not match {self._device_ipv4_addr}''')
@@ -137,7 +149,7 @@ def _connection_switch_arp_inspection(self):
if no_arp:
return None, 'No ARP packets from the device found'
- return True, 'Device uses ARP'
+ return True, 'Device uses ARP correctly'
def _connection_switch_dhcp_snooping(self):
LOGGER.info('Running connection.switch.dhcp_snooping')
@@ -145,7 +157,8 @@ def _connection_switch_dhcp_snooping(self):
disallowed_dhcp_types = [2, 4, 5, 6, 9, 10, 12, 13, 15, 17]
# Read all the pcap files
- packets = rdpcap(STARTUP_CAPTURE_FILE) + rdpcap(MONITOR_CAPTURE_FILE)
+ packets = rdpcap(self.startup_capture_file) + rdpcap(
+ self.monitor_capture_file)
for packet in packets:
# We are not interested in packets unless they are DHCP packets
@@ -158,6 +171,11 @@ def _connection_switch_dhcp_snooping(self):
dhcp_type = self._get_dhcp_type(packet)
if dhcp_type in disallowed_dhcp_types:
+
+ # Check if packet is responding with port unreachable
+ if ICMP in packet and packet[ICMP].type == 3:
+ continue
+
return False, 'Device has sent disallowed DHCP message'
return True, 'Device does not act as a DHCP server'
@@ -189,8 +207,10 @@ def _connection_dhcp_address(self):
LOGGER.info('No IP information found in lease: ' + self._device_mac)
return False, 'No IP information found in lease: ' + self._device_mac
else:
- LOGGER.info('No DHCP lease could be found: ' + self._device_mac)
- return False, 'No DHCP lease could be found: ' + self._device_mac
+ LOGGER.info('No DHCP lease could be found for MAC ' + self._device_mac +
+ ' at the time of this test')
+ return (False, 'No DHCP lease could be found for MAC ' +
+ self._device_mac + ' at the time of this test')
def _connection_mac_address(self):
LOGGER.info('Running connection.mac_address')
@@ -220,7 +240,8 @@ def _connection_single_ip(self):
return result, 'No MAC address found.'
# Read all the pcap files containing DHCP packet information
- packets = rdpcap(STARTUP_CAPTURE_FILE) + rdpcap(MONITOR_CAPTURE_FILE)
+ packets = rdpcap(self.startup_capture_file) + rdpcap(
+ self.monitor_capture_file)
# Extract MAC addresses from DHCP packets
mac_addresses = set()
@@ -230,7 +251,8 @@ def _connection_single_ip(self):
if self._get_dhcp_type(packet) == 3:
mac_address = packet[Ether].src
LOGGER.info('DHCPREQUEST detected MAC address: ' + mac_address)
- if not mac_address.startswith(TR_CONTAINER_MAC_PREFIX):
+ if (not mac_address.startswith(TR_CONTAINER_MAC_PREFIX)
+ and mac_address != self._dev_iface_mac):
mac_addresses.add(mac_address.upper())
# Check if the device mac address is in the list of DHCPREQUESTs
@@ -306,8 +328,10 @@ def _connection_ipaddr_ip_change(self, config):
else:
result = None, 'Failed to create reserved lease for device'
else:
- LOGGER.info('Device has no current DHCP lease')
- result = None, 'Device has no current DHCP lease'
+ LOGGER.info('Device has no current DHCP lease so ' +
+ 'this test could not be run')
+ result = None, ('Device has no current DHCP lease so ' +
+ 'this test could not be run')
# Restore the network
self._dhcp_util.restore_failover_dhcp_server()
LOGGER.info('Waiting 30 seconds for reserved lease to expire')
@@ -360,12 +384,198 @@ def _connection_ipaddr_dhcp_failover(self, config):
else:
result = False, 'Device did not respond to ping'
else:
- result = None, 'Device has no current DHCP lease'
+ result = (
+ None,
+ 'Device has no current DHCP lease so this test could not be run')
else:
LOGGER.error('Network is not ready for this test. Skipping')
result = None, 'Network is not ready for this test'
return result
+ def _connection_dhcp_disconnect(self):
+ LOGGER.info('Running connection.dhcp.disconnect')
+ result = None
+ description = ''
+ dev_iface = os.getenv('DEV_IFACE')
+
+ try:
+ iface_status = self.host_client.check_interface_status(dev_iface)
+ if iface_status.code == 200:
+ LOGGER.info('Successfully resolved iface status')
+ if iface_status.status:
+ lease = self._dhcp_util.get_cur_lease(mac_address=self._device_mac,
+ timeout=self._lease_wait_time_sec)
+ if lease is not None:
+ LOGGER.info('Current device lease resolved')
+ if self._dhcp_util.is_lease_active(lease):
+
+ # Disable the device interface
+ iface_down = self.host_client.set_iface_down(dev_iface)
+ if iface_down:
+ LOGGER.info('Device interface set to down state')
+
+ # Wait for the lease to expire
+ self._dhcp_util.wait_for_lease_expire(lease,
+ self._lease_wait_time_sec)
+
+ # Wait an additonal 10 seconds to better test a true disconnect
+ # state
+ LOGGER.info('Waiting 10 seconds before bringing iface back up')
+ time.sleep(10)
+
+ # Enable the device interface
+ iface_up = self.host_client.set_iface_up(dev_iface)
+ if iface_up:
+ LOGGER.info('Device interface set to up state')
+
+ # Confirm device receives a new lease
+ if self._dhcp_util.get_cur_lease(
+ mac_address=self._device_mac,
+ timeout=self._lease_wait_time_sec):
+ if self._dhcp_util.is_lease_active(lease):
+ result = True
+ description = (
+ 'Device received a DHCP lease after disconnect')
+ else:
+ result = False
+ description = (
+ 'Could not confirm DHCP lease active after disconnect')
+ else:
+ result = False
+ description = (
+ 'Device did not recieve a DHCP lease after disconnect')
+ else:
+ result = 'Error'
+ description = 'Failed to set device interface to up state'
+ else:
+ result = 'Error'
+ description = 'Failed to set device interface to down state'
+ else:
+ result = 'Error'
+ description = 'No active lease available for device'
+ else:
+ result = 'Error'
+ description = 'Device interface is down'
+ else:
+ result = 'Error'
+ description = 'Device interface could not be resolved'
+
+ except Exception:
+ LOGGER.error('Unable to connect to gRPC server')
+ result = 'Error'
+ description = (
+ 'Unable to connect to gRPC server'
+ )
+ return result, description
+
+ def _connection_dhcp_disconnect_ip_change(self):
+ LOGGER.info('Running connection.dhcp.disconnect_ip_change')
+ result = None
+ description = ''
+ reserved_lease = None
+ dev_iface = os.getenv('DEV_IFACE')
+ if self._dhcp_util.setup_single_dhcp_server():
+ try:
+ iface_status = self.host_client.check_interface_status(dev_iface)
+ if iface_status.code == 200:
+ LOGGER.info('Successfully resolved iface status')
+ if iface_status.status:
+ lease = self._dhcp_util.get_cur_lease(
+ mac_address=self._device_mac, timeout=self._lease_wait_time_sec)
+ if lease is not None:
+ LOGGER.info('Current device lease resolved')
+ if self._dhcp_util.is_lease_active(lease):
+
+ # Add a reserved lease with a different IP
+ ip_address = '10.10.10.30'
+ reserved_lease = self._dhcp_util.add_reserved_lease(
+ lease['hostname'], self._device_mac, ip_address)
+
+ # Disable the device interface
+ iface_down = self.host_client.set_iface_down(dev_iface)
+ if iface_down:
+ LOGGER.info('Device interface set to down state')
+
+ # Wait for the lease to expire
+ self._dhcp_util.wait_for_lease_expire(lease,
+ self._lease_wait_time_sec)
+
+ if reserved_lease:
+ # Wait an additonal 10 seconds to better test a true
+ # disconnect state
+ LOGGER.info(
+ 'Waiting 10 seconds before bringing iface back up')
+ time.sleep(10)
+
+ # Enable the device interface
+ iface_up = self.host_client.set_iface_up(dev_iface)
+ if iface_up:
+ LOGGER.info('Device interface set to up state')
+ # Confirm device receives a new lease
+ reserved_lease_accepted = False
+ LOGGER.info('Checking device accepted new ip')
+ for _ in range(5):
+ LOGGER.info('Pinging device at IP: ' + ip_address)
+ if self._ping(ip_address):
+ LOGGER.debug('Ping success')
+ LOGGER.debug(
+ 'Reserved lease confirmed active in device')
+ reserved_lease_accepted = True
+ break
+ else:
+ LOGGER.info('Device did not respond to ping')
+ time.sleep(5) # Wait 5 seconds before trying again
+
+ if reserved_lease_accepted:
+ result = True
+ description = ('Device received expected IP address '
+ 'after disconnect')
+ else:
+ result = False
+ description = (
+ 'Could not confirm DHCP lease active after disconnect'
+ )
+ else:
+ result = 'Error'
+ description = 'Failed to set device interface to up state'
+ else:
+ result = 'Error'
+ description = (
+ 'Failed to set reserved address in DHCP server'
+ )
+ else:
+ result = 'Error'
+ description = 'Failed to set device interface to down state'
+ else:
+ result = 'Error'
+ description = 'No active lease available for device'
+ else:
+ result = 'Error'
+ description = 'Device interface is down'
+ else:
+ result = 'Error'
+ description = 'Device interface could not be resolved'
+ except Exception:
+ LOGGER.error('Unable to connect to gRPC server')
+ result = 'Error'
+ description = (
+ 'Unable to connect to gRPC server'
+ )
+ else:
+ result = 'Error'
+ description = 'Failed to configure network for test'
+
+ if reserved_lease:
+ self._dhcp_util.delete_reserved_lease(self._device_mac)
+
+ # Restore the network
+ self._dhcp_util.restore_failover_dhcp_server()
+ LOGGER.info('Waiting 30 seconds for reserved lease to expire')
+ time.sleep(30)
+ self._dhcp_util.get_cur_lease(mac_address=self._device_mac,
+ timeout=self._lease_wait_time_sec)
+ return result, description
+
def _get_oui_manufacturer(self, mac_address):
# Do some quick fixes on the format of the mac_address
# to match the oui file pattern
@@ -381,10 +591,12 @@ def _connection_ipv6_slaac(self):
LOGGER.info('Running connection.ipv6_slaac')
result = None
- slac_test, sends_ipv6 = self._has_slaac_addres()
+ slac_test, sends_ipv6 = self._has_slaac_address()
if slac_test:
result = True, f'Device has formed SLAAC address {self._device_ipv6_addr}'
- if result is None:
+ elif slac_test is None:
+ result = 'Error', 'An error occurred whilst running this test'
+ else:
if sends_ipv6:
LOGGER.info('Device does not support IPv6 SLAAC')
result = False, 'Device does not support IPv6 SLAAC'
@@ -393,9 +605,15 @@ def _connection_ipv6_slaac(self):
result = False, 'Device does not support IPv6'
return result
- def _has_slaac_addres(self):
- packet_capture = (rdpcap(STARTUP_CAPTURE_FILE) +
- rdpcap(MONITOR_CAPTURE_FILE) + rdpcap(DHCP_CAPTURE_FILE))
+ def _has_slaac_address(self):
+ packet_capture = (rdpcap(self.startup_capture_file) +
+ rdpcap(self.monitor_capture_file))
+
+ try:
+ packet_capture += rdpcap(DHCP_CAPTURE_FILE)
+ except (FileNotFoundError, Scapy_Exception):
+ LOGGER.error('dhcp-1.pcap not found or empty, ignoring')
+
sends_ipv6 = False
for packet_number, packet in enumerate(packet_capture, start=1):
if IPv6 in packet and packet.src == self._device_mac:
@@ -432,7 +650,7 @@ def _ping(self, host, ipv6=False):
cmd += ' -6 ' if ipv6 else ''
cmd += str(host)
#cmd = 'ping -c 1 ' + str(host)
- success = util.run_command(cmd, output=False)
+ success = util.run_command(cmd, output=False) # pylint: disable=E1120
return success
def restore_failover_dhcp_server(self, subnet):
@@ -478,6 +696,67 @@ def setup_single_dhcp_server(self):
else:
return False, 'Secondary DHCP server stop command failed'
+ def _communication_network_type(self):
+ try:
+ result = 'Informational'
+ description = ''
+ details = ''
+ packets = self.get_network_packet_types()
+ details = packets
+ # Initialize a list for detected packet types
+ packet_types = []
+
+ # Check for the presence of each packet type and append to the list
+ if (packets['multicast']['from'] > 0) or (packets['multicast']['to'] > 0):
+ packet_types.append('Multicast')
+ if (packets['broadcast']['from'] > 0) or (packets['broadcast']['to'] > 0):
+ packet_types.append('Broadcast')
+ if (packets['unicast']['from'] > 0) or (packets['unicast']['to'] > 0):
+ packet_types.append('Unicast')
+
+ # Construct the description if any packet types were detected
+ if packet_types:
+ description = 'Packet types detected: ' + ', '.join(packet_types)
+ else:
+ description = 'No multicast, broadcast or unicast detected'
+
+ except Exception as e: # pylint: disable=W0718
+ LOGGER.error(e)
+ result = 'Error'
+ return result, description, details
+
+ def get_network_packet_types(self):
+ combined_results = {
+ 'mac_address': self._device_mac,
+ 'multicast': {
+ 'from': 0,
+ 'to': 0
+ },
+ 'broadcast': {
+ 'from': 0,
+ 'to': 0
+ },
+ 'unicast': {
+ 'from': 0,
+ 'to': 0
+ },
+ }
+ capture_files = [self.startup_capture_file, self.monitor_capture_file]
+ for capture_file in capture_files:
+ bin_file = self._bin_dir + '/get_packet_counts.sh'
+ args = f'"{capture_file}" "{self._device_mac}"'
+ command = f'{bin_file} {args}'
+ response = util.run_command(command)
+ packets = json.loads(response[0].strip())
+ # Combine results
+ combined_results['multicast']['from'] += packets['multicast']['from']
+ combined_results['multicast']['to'] += packets['multicast']['to']
+ combined_results['broadcast']['from'] += packets['broadcast']['from']
+ combined_results['broadcast']['to'] += packets['broadcast']['to']
+ combined_results['unicast']['from'] += packets['unicast']['from']
+ combined_results['unicast']['to'] += packets['unicast']['to']
+ return combined_results
+
def enable_failover(self):
# Move primary DHCP server to primary failover
LOGGER.info('Configuring primary failover DHCP server')
@@ -500,6 +779,7 @@ def is_ip_in_range(self, ip, start_ip, end_ip):
return start_int <= ip_int <= end_int
def _run_subnet_test(self, config):
+
# Resolve the configured dhcp subnet ranges
ranges = None
if 'ranges' in config:
@@ -514,6 +794,7 @@ def _run_subnet_test(self, config):
response = self.dhcp1_client.get_dhcp_range()
cur_range = {}
+
if response.code == 200:
cur_range['start'] = response.start
cur_range['end'] = response.end
@@ -526,16 +807,21 @@ def _run_subnet_test(self, config):
results = []
dhcp_setup = self.setup_single_dhcp_server()
+
if dhcp_setup[0]:
LOGGER.info(dhcp_setup[1])
lease = self._dhcp_util.get_cur_lease(mac_address=self._device_mac,
timeout=self._lease_wait_time_sec)
+
if lease is not None:
if self._dhcp_util.is_lease_active(lease):
results = self.test_subnets(ranges)
else:
- LOGGER.info('Failed to confirm a valid active lease for the device')
- return None, 'Failed to confirm a valid active lease for the device'
+ LOGGER.info('Device has no current DHCP lease ' +
+ 'so this test could not be run')
+ return (
+ None,
+ 'Device has no current DHCP lease so this test could not be run')
else:
LOGGER.error(dhcp_setup[1])
return None, 'Failed to setup DHCP server for test'
@@ -549,7 +835,7 @@ def _run_subnet_test(self, config):
else:
if result['result'] is not None:
final_result &= result['result']
- if result['result']:
+ if not result['result']:
final_result_details += result['details'] + '\n'
if final_result:
@@ -562,10 +848,17 @@ def _run_subnet_test(self, config):
# Wait for the current lease to expire
lease = self._dhcp_util.get_cur_lease(mac_address=self._device_mac,
timeout=self._lease_wait_time_sec)
- self._dhcp_util.wait_for_lease_expire(lease, self._lease_wait_time_sec)
+
+ # Check if lease is active
+ if lease is not None:
+ self._dhcp_util.wait_for_lease_expire(lease, self._lease_wait_time_sec)
+ else:
+ # If not, wait for 30 seconds as a fallback
+ time.sleep(30)
# Wait for a new lease to be provided before exiting test
# to prevent other test modules from failing
+
LOGGER.info('Checking for new lease')
# Subnet changes tend to take longer to pick up so we'll allow
# for twice the lease wait time
@@ -580,9 +873,8 @@ def _run_subnet_test(self, config):
else:
LOGGER.info('New lease not found. Waiting to check again')
- except Exception as e: # pylint: disable=W0718
- LOGGER.error('Failed to restore DHCP server configuration: ' + str(e))
- LOGGER.error(traceback.format_exc())
+ except Exception: # pylint: disable=W0718
+ LOGGER.error('Failed to restore DHCP server configuration')
return final_result, final_result_details
diff --git a/modules/test/conn/python/src/dhcp_util.py b/modules/test/conn/python/src/dhcp_util.py
index be5f0cac2..22880dab0 100644
--- a/modules/test/conn/python/src/dhcp_util.py
+++ b/modules/test/conn/python/src/dhcp_util.py
@@ -207,7 +207,7 @@ def is_lease_active(self, lease):
def ping(self, host):
cmd = 'ping -c 1 ' + str(host)
- success = util.run_command(cmd, output=False)
+ success = util.run_command(cmd, output=False) # pylint: disable=E1120
return success
def add_reserved_lease(self,
@@ -256,21 +256,30 @@ def setup_single_dhcp_server(self):
return False
def wait_for_lease_expire(self, lease, max_wait_time=30):
- expiration_utc = datetime.strptime(lease['expires'], '%Y-%m-%d %H:%M:%S')
- # lease information stored in UTC so we need to convert to local time
- expiration = self.utc_to_local(expiration_utc)
- time_to_expire = expiration - datetime.now(tz=tz.tzlocal())
- # Wait until the expiration time and padd 5 seconds
- # If wait time is longer than max_wait_time, only wait
- # for the max wait time
- wait_time = min(max_wait_time,
- time_to_expire.total_seconds() +
- 5) if time_to_expire.total_seconds() > 0 else 0
- LOGGER.info('Time until lease expiration: ' + str(wait_time))
- LOGGER.info('Waiting for current lease to expire: ' + str(expiration))
- if wait_time > 0:
- time.sleep(wait_time)
- LOGGER.info('Current lease expired.')
+
+ try:
+ expiration_utc = datetime.strptime(lease['expires'], '%Y-%m-%d %H:%M:%S')
+
+ # Lease information stored in UTC so we need to convert to local time
+ expiration = self.utc_to_local(expiration_utc)
+ time_to_expire = expiration - datetime.now(tz=tz.tzlocal())
+
+ # Wait until the expiration time and padd 5 seconds
+ # If wait time is longer than max_wait_time, only wait
+ # for the max wait time
+ wait_time = min(max_wait_time,
+ time_to_expire.total_seconds() +
+ 5) if time_to_expire.total_seconds() > 0 else 0
+
+ LOGGER.info('Time until lease expiration: ' + str(wait_time))
+ LOGGER.info('Waiting for current lease to expire: ' + str(expiration))
+
+ if wait_time > 0:
+ time.sleep(wait_time)
+ LOGGER.info('Current lease expired')
+
+ except TypeError:
+ LOGGER.error('Device does not have an active lease')
# Convert from a UTC datetime to the local time zone
def utc_to_local(self, utc_datetime):
diff --git a/modules/test/conn/python/src/port_stats_util.py b/modules/test/conn/python/src/port_stats_util.py
index d923501eb..a1f68cb03 100644
--- a/modules/test/conn/python/src/port_stats_util.py
+++ b/modules/test/conn/python/src/port_stats_util.py
@@ -41,7 +41,7 @@ def __init__(self,
self.conn_stats = self._read_stats_file(self.ethtool_conn_stats_file)
def is_autonegotiate(self):
- auto_negotiation = False
+ auto_negotiation = None
auto_negotiation_status = self._get_stat_option(stats=self.conn_stats,
option='Auto-negotiation:')
if auto_negotiation_status is not None:
@@ -66,15 +66,22 @@ def connection_port_link_test(self):
option='rx_errors:')
rx_errors_post = self._get_stat_option(stats=stats_post,
option='rx_errors:')
- tx_errors = int(tx_errors_post) - int(tx_errors_pre)
- rx_errors = int(rx_errors_post) - int(rx_errors_pre)
- if tx_errors > 0 or rx_errors > 0:
- result = False
- description = 'Port errors detected'
- details = f'TX errors: {tx_errors}, RX errors: {rx_errors}'
+
+ # Check that the above have been resolved correctly
+ if (tx_errors_pre is None or tx_errors_post is None or
+ rx_errors_pre is None or rx_errors_post is None):
+ result = 'Error'
+ description = 'Port stats not available'
else:
- result = True
- description = 'No port errors detected'
+ tx_errors = int(tx_errors_post) - int(tx_errors_pre)
+ rx_errors = int(rx_errors_post) - int(rx_errors_pre)
+ if tx_errors > 0 or rx_errors > 0:
+ result = False
+ description = 'Port errors detected'
+ details = f'TX errors: {tx_errors}, RX errors: {rx_errors}'
+ else:
+ result = True
+ description = 'No port errors detected'
return result, description, details
def connection_port_duplex_test(self):
@@ -83,7 +90,10 @@ def connection_port_duplex_test(self):
result = None
description = ''
details = ''
- if not auto_negotiation:
+ if auto_negotiation is None:
+ result = 'Error'
+ description = 'Port stats not available'
+ elif not auto_negotiation:
result = False
description = 'Interface not configured for auto-negotiation'
else:
@@ -104,7 +114,10 @@ def connection_port_speed_test(self):
result = None
description = ''
details = ''
- if not auto_negotiation:
+ if auto_negotiation is None:
+ result = 'Error'
+ description = 'Port stats not available'
+ elif not auto_negotiation:
result = False
description = 'Interface not configured for auto-negotiation'
else:
diff --git a/modules/test/dns/README.md b/modules/test/dns/README.md
index 13f0df5fd..c2a917b13 100644
--- a/modules/test/dns/README.md
+++ b/modules/test/dns/README.md
@@ -15,4 +15,5 @@ Within the ```python/src``` directory, the below tests are executed.
| ID | Description | Expected behavior | Required result
|---|---|---|---|
| dns.network.hostname_resolution | Verifies that the device resolves hostnames | The device sends DNS requests | Required |
-| dns.network.from_dhcp | Verifies that the device allows for a DNS server to be provided by the DHCP server | The device sends DNS requests to the DNS server provided by the DHCP server | Roadmap |
\ No newline at end of file
+| dns.network.from_dhcp | Verifies that the device allows for a DNS server to be provided by the DHCP server | The device sends DNS requests to the DNS server provided by the DHCP server | Informational |
+| dns.mdns | Does the device has MDNS (or any kind of IP multicast) | Device may send MDNS requests | Informational |
\ No newline at end of file
diff --git a/modules/test/dns/bin/start_test_module b/modules/test/dns/bin/start_test_module
index a529c2fcf..c3209261a 100644
--- a/modules/test/dns/bin/start_test_module
+++ b/modules/test/dns/bin/start_test_module
@@ -41,11 +41,8 @@ else
fi
# Create and set permissions on the log files
-LOG_FILE=/runtime/output/$MODULE_NAME.log
RESULT_FILE=/runtime/output/$MODULE_NAME-result.json
-touch $LOG_FILE
touch $RESULT_FILE
-chown $HOST_USER $LOG_FILE
chown $HOST_USER $RESULT_FILE
# Run the python scrip that will execute the tests for this module
diff --git a/modules/test/dns/conf/module_config.json b/modules/test/dns/conf/module_config.json
index 13c9b3236..662273cd7 100644
--- a/modules/test/dns/conf/module_config.json
+++ b/modules/test/dns/conf/module_config.json
@@ -16,7 +16,6 @@
"name": "dns.network.hostname_resolution",
"test_description": "Verify the device sends DNS requests",
"expected_behavior": "The device sends DNS requests.",
- "required_result": "Required",
"recommendations": [
"Install a supported DNS client",
"Ensure DNS servers have been set correctly",
@@ -27,10 +26,14 @@
"name": "dns.network.from_dhcp",
"test_description": "Verify the device allows for a DNS server to be entered automatically",
"expected_behavior": "The device sends DNS requests to the DNS server provided by the DHCP server",
- "required_result": "Informational",
"recommendations": [
"Install a DNS client that supports fetching DNS servers from DHCP options"
]
+ },
+ {
+ "name": "dns.mdns",
+ "test_description": "Does the device have MDNS (or any kind of IP multicast)",
+ "expected_behavior": "Device may send MDNS requests"
}
]
}
diff --git a/modules/test/dns/dns.Dockerfile b/modules/test/dns/dns.Dockerfile
index 0197fd72e..461e87899 100644
--- a/modules/test/dns/dns.Dockerfile
+++ b/modules/test/dns/dns.Dockerfile
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Image name: test-run/conn-test
-FROM test-run/base-test:latest
+# Image name: testrun/conn-test
+FROM testrun/base-test:latest
ARG MODULE_NAME=dns
ARG MODULE_DIR=modules/test/$MODULE_NAME
@@ -22,7 +22,7 @@ ARG MODULE_DIR=modules/test/$MODULE_NAME
COPY $MODULE_DIR/python/requirements.txt /testrun/python
# Install all python requirements for the module
-RUN pip3 install -r /testrun/python/requirements.txt
+RUN pip install -r /testrun/python/requirements.txt
# Copy over all configuration files
COPY $MODULE_DIR/conf /testrun/conf
diff --git a/modules/test/dns/python/requirements.txt b/modules/test/dns/python/requirements.txt
index 93b351f44..f61132516 100644
--- a/modules/test/dns/python/requirements.txt
+++ b/modules/test/dns/python/requirements.txt
@@ -1 +1,6 @@
-scapy
\ No newline at end of file
+# Dependencies to user defined packages
+# Package dependencies should always be defined before the user defined
+# packages to prevent auto-upgrades of stable dependencies
+
+# User defined packages
+scapy==2.6.0
diff --git a/modules/test/dns/python/src/dns_module.py b/modules/test/dns/python/src/dns_module.py
index 607a026b5..fe244f0a7 100644
--- a/modules/test/dns/python/src/dns_module.py
+++ b/modules/test/dns/python/src/dns_module.py
@@ -13,12 +13,13 @@
# limitations under the License.
"""DNS test module"""
import subprocess
-from scapy.all import rdpcap, DNS, IP
+from scapy.all import rdpcap, DNS, IP, Ether
from test_module import TestModule
import os
+from collections import Counter
LOG_NAME = 'test_dns'
-MODULE_REPORT_FILE_NAME='dns_report.html'
+MODULE_REPORT_FILE_NAME = 'dns_report.html'
DNS_SERVER_CAPTURE_FILE = '/runtime/network/dns.pcap'
STARTUP_CAPTURE_FILE = '/runtime/device/startup.pcap'
MONITOR_CAPTURE_FILE = '/runtime/device/monitor.pcap'
@@ -30,7 +31,6 @@ class DNSModule(TestModule):
def __init__(self,
module,
- log_dir=None,
conf_file=None,
results_dir=None,
dns_server_capture_file=DNS_SERVER_CAPTURE_FILE,
@@ -38,12 +38,11 @@ def __init__(self,
monitor_capture_file=MONITOR_CAPTURE_FILE):
super().__init__(module_name=module,
log_name=LOG_NAME,
- log_dir=log_dir,
conf_file=conf_file,
results_dir=results_dir)
- self.dns_server_capture_file=dns_server_capture_file
- self.startup_capture_file=startup_capture_file
- self.monitor_capture_file=monitor_capture_file
+ self.dns_server_capture_file = dns_server_capture_file
+ self.startup_capture_file = startup_capture_file
+ self.monitor_capture_file = monitor_capture_file
self._dns_server = '10.10.10.4'
global LOGGER
LOGGER = self._get_logger()
@@ -52,21 +51,20 @@ def generate_module_report(self):
# Extract DNS data from the pcap file
dns_table_data = self.extract_dns_data()
- html_content = '
DNS Module
'
+ html_content = '
DNS Module
'
# Set the summary variables
- local_requests = sum(1 for row in dns_table_data
- if row['Destination'] ==
- self._dns_server and row['Type'] == 'Query')
- external_requests = sum(1 for row in dns_table_data
- if row['Destination'] !=
- self._dns_server and row['Type'] == 'Query')
+ local_requests = sum(
+ 1 for row in dns_table_data
+ if row['Destination'] == self._dns_server and row['Type'] == 'Query')
+ external_requests = sum(
+ 1 for row in dns_table_data
+ if row['Destination'] != self._dns_server and row['Type'] == 'Query')
- total_requests = sum(1 for row in dns_table_data
- if row['Type'] == 'Query')
+ total_requests = sum(1 for row in dns_table_data if row['Type'] == 'Query')
total_responses = sum(1 for row in dns_table_data
- if row['Type'] == 'Response')
+ if row['Type'] == 'Response')
# Add summary table
html_content += (f'''
@@ -97,25 +95,33 @@ def generate_module_report(self):
Source
Destination
+
Resolved IP
Type
URL
+
Count
'''
- for row in dns_table_data:
- table_content += (f'''
-
-
{row['Source']}
-
{row['Destination']}
-
{row['Type']}
-
{row['Data']}
-
''')
+ # Count unique combinations
+ counter = Counter((row['Source'], row['Destination'], row['ResolvedIP'],
+ row['Type'], row['Data']) for row in dns_table_data)
+
+ # Generate the HTML table with the count column
+ for (src, dst, res_ip, typ, dat), count in counter.items():
+ table_content += f'''
+
+
{src}
+
{dst}
+
{res_ip}
+
{typ}
+
{dat}
+
{count}
+
'''
table_content += '''
-
- '''
+ '''
html_content += table_content
@@ -149,30 +155,47 @@ def extract_dns_data(self):
# Iterate through DNS packets
for packet in packets:
if DNS in packet and packet.haslayer(IP):
- source_ip = packet[IP].src
- destination_ip = packet[IP].dst
- dns_layer = packet[DNS]
-
- # 'qr' field indicates query (0) or response (1)
- dns_type = 'Query' if dns_layer.qr == 0 else 'Response'
-
- # Check for the presence of DNS query name
- if hasattr(dns_layer, 'qd') and dns_layer.qd is not None:
+
+ # Check if either source or destination MAC matches the device
+ if self._device_mac in (packet[Ether].src, packet[Ether].dst):
+ source_ip = packet[IP].src
+ destination_ip = packet[IP].dst
+ dns_layer = packet[DNS]
+ # 'qr' field indicates query (0) or response (1)
+ dns_type = 'Query' if dns_layer.qr == 0 else 'Response'
+
+ # Check if 'qd' (query data) exists and has at least one entry
+ if hasattr(dns_layer, 'qd') and dns_layer.qdcount > 0:
qname = dns_layer.qd.qname.decode() if dns_layer.qd.qname else 'N/A'
- else:
+ else:
qname = 'N/A'
- dns_data.append({
- 'Timestamp': float(packet.time), # Timestamp of the DNS packet
- 'Source': source_ip,
- 'Destination': destination_ip,
- 'Type': dns_type,
- 'Data': qname[:-1]
- })
+ resolved_ip = 'N/A'
+ # If it's a response packet, extract the resolved IP address
+ # from the answer section
+ if dns_layer.qr == 1 and hasattr(dns_layer,
+ 'an') and dns_layer.ancount > 0:
+ # Loop through all answers in the DNS response
+ for i in range(dns_layer.ancount):
+ answer = dns_layer.an[i]
+ # Check for IPv4 (A record) or IPv6 (AAAA record)
+ if answer.type == 1: # Indicates an A record (IPv4 address)
+ resolved_ip = answer.rdata # Extract IPv4 address
+ break # Stop after finding the first valid resolved IP
+ elif answer.type == 28: # Indicates an AAAA record (IPv6 address)
+ resolved_ip = answer.rdata # Extract IPv6 address
+ break # Stop after finding the first valid resolved IP
+
+ dns_data.append({
+ 'Timestamp': float(packet.time), # Timestamp of the DNS packet
+ 'Source': source_ip,
+ 'Destination': destination_ip,
+ 'ResolvedIP': resolved_ip, # Adding the resolved IP address
+ 'Type': dns_type,
+ 'Data': qname[:-1]
+ })
# Filter unique entries based on 'Timestamp'
- # DNS Server will duplicate messages caught by
- # startup and monitor
filtered_unique_dns_data = []
seen_timestamps = set()
@@ -273,10 +296,10 @@ def _exec_tcpdump(self, tcpdump_filter, capture_file):
LOGGER.debug('tcpdump command: ' + command)
with subprocess.Popen(command,
- universal_newlines=True,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE) as process:
+ universal_newlines=True,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE) as process:
text = str(process.stdout.read()).rstrip()
LOGGER.debug('tcpdump response: ' + text)
diff --git a/modules/test/ntp/bin/start_test_module b/modules/test/ntp/bin/start_test_module
index a09349cf9..33b2881f4 100644
--- a/modules/test/ntp/bin/start_test_module
+++ b/modules/test/ntp/bin/start_test_module
@@ -27,11 +27,8 @@ else
fi
# Create and set permissions on the log files
-LOG_FILE=/runtime/output/$MODULE_NAME.log
RESULT_FILE=/runtime/output/$MODULE_NAME-result.json
-touch $LOG_FILE
touch $RESULT_FILE
-chown $HOST_USER $LOG_FILE
chown $HOST_USER $RESULT_FILE
# Run the python scrip that will execute the tests for this module
diff --git a/modules/test/ntp/conf/module_config.json b/modules/test/ntp/conf/module_config.json
index 55eb3df76..6634b127d 100644
--- a/modules/test/ntp/conf/module_config.json
+++ b/modules/test/ntp/conf/module_config.json
@@ -9,14 +9,13 @@
"docker": {
"depends_on": "base",
"enable_container": true,
- "timeout": 30
+ "timeout": 60
},
"tests":[
{
"name": "ntp.network.ntp_support",
"test_description": "Does the device request network time sync as client as per RFC 5905 - Network Time Protocol Version 4: Protocol and Algorithms Specification",
"expected_behavior": "The device sends an NTPv4 request to the configured NTP server.",
- "required_result": "Required",
"recommendations": [
"Set the NTP version to v4 in the NTP client",
"Install an NTP client that supports NTPv4"
@@ -26,7 +25,6 @@
"name": "ntp.network.ntp_dhcp",
"test_description": "Accept NTP address over DHCP",
"expected_behavior": "Device can accept NTP server address, provided by the DHCP server (DHCP OFFER PACKET)",
- "required_result": "Roadmap",
"recommendations": [
"Install an NTP client that supports fetching the NTP servers from DHCP options"
]
diff --git a/modules/test/ntp/ntp.Dockerfile b/modules/test/ntp/ntp.Dockerfile
index 33b06287e..4d9701464 100644
--- a/modules/test/ntp/ntp.Dockerfile
+++ b/modules/test/ntp/ntp.Dockerfile
@@ -1,5 +1,19 @@
-# Image name: test-run/ntp-test
-FROM test-run/base-test:latest
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Image name: testrun/ntp-test
+FROM testrun/base-test:latest
ARG MODULE_NAME=ntp
ARG MODULE_DIR=modules/test/$MODULE_NAME
@@ -8,7 +22,7 @@ ARG MODULE_DIR=modules/test/$MODULE_NAME
COPY $MODULE_DIR/python/requirements.txt /testrun/python
# Install all python requirements for the module
-RUN pip3 install -r /testrun/python/requirements.txt
+RUN pip install -r /testrun/python/requirements.txt
# Copy over all configuration files
COPY $MODULE_DIR/conf /testrun/conf
diff --git a/modules/test/ntp/python/requirements.txt b/modules/test/ntp/python/requirements.txt
index 93b351f44..f61132516 100644
--- a/modules/test/ntp/python/requirements.txt
+++ b/modules/test/ntp/python/requirements.txt
@@ -1 +1,6 @@
-scapy
\ No newline at end of file
+# Dependencies to user defined packages
+# Package dependencies should always be defined before the user defined
+# packages to prevent auto-upgrades of stable dependencies
+
+# User defined packages
+scapy==2.6.0
diff --git a/modules/test/ntp/python/src/ntp_module.py b/modules/test/ntp/python/src/ntp_module.py
index 453c992e6..67e2a3c92 100644
--- a/modules/test/ntp/python/src/ntp_module.py
+++ b/modules/test/ntp/python/src/ntp_module.py
@@ -14,8 +14,8 @@
"""NTP test module"""
from test_module import TestModule
from scapy.all import rdpcap, IP, IPv6, NTP, UDP, Ether
-from datetime import datetime
import os
+from collections import defaultdict
LOG_NAME = 'test_ntp'
MODULE_REPORT_FILE_NAME = 'ntp_report.html'
@@ -30,7 +30,6 @@ class NTPModule(TestModule):
def __init__(self,
module,
- log_dir=None,
conf_file=None,
results_dir=None,
ntp_server_capture_file=NTP_SERVER_CAPTURE_FILE,
@@ -38,7 +37,6 @@ def __init__(self,
monitor_capture_file=MONITOR_CAPTURE_FILE):
super().__init__(module_name=module,
log_name=LOG_NAME,
- log_dir=log_dir,
conf_file=conf_file,
results_dir=results_dir)
self.ntp_server_capture_file = ntp_server_capture_file
@@ -54,7 +52,7 @@ def generate_module_report(self):
# Extract NTP data from the pcap file
ntp_table_data = self.extract_ntp_data()
- html_content = '
NTP Module
'
+ html_content = '
NTP Module
'
# Set the summary variables
local_requests = sum(
@@ -69,6 +67,33 @@ def generate_module_report(self):
total_responses = sum(1 for row in ntp_table_data
if row['Type'] == 'Server')
+ # Initialize a dictionary to store timestamps for each unique combination
+ timestamps = defaultdict(list)
+
+ # Collect timestamps for each unique combination
+ for row in ntp_table_data:
+ # Add the timestamp to the corresponding combination
+ key = (row['Source'], row['Destination'], row['Type'], row['Version'])
+ timestamps[key].append(row['Timestamp'])
+
+ # Calculate the average time between requests for each unique combination
+ average_time_between_requests = {}
+
+ for key, times in timestamps.items():
+ # Sort the timestamps
+ times.sort()
+
+ # Calculate the time differences between consecutive timestamps
+ time_diffs = [t2 - t1 for t1, t2 in zip(times[:-1], times[1:])]
+
+ # Calculate the average of the time differences
+ if time_diffs:
+ avg_diff = sum(time_diffs) / len(time_diffs)
+ else:
+ avg_diff = 0 # one timestamp, the average difference is 0
+
+ average_time_between_requests[key] = avg_diff
+
# Add summary table
html_content += (f'''
'''
- for row in ntp_table_data:
-
- # Timestamp of the NTP packet
- dt_object = datetime.utcfromtimestamp(row['Timestamp'])
-
- # Extract milliseconds from the fractional part of the timestamp
- milliseconds = int((row['Timestamp'] % 1) * 1000)
+ # Generate the HTML table with the count column
+ for (src, dst, typ,
+ version), avg_diff in average_time_between_requests.items():
+ cnt = len(timestamps[(src, dst, typ, version)])
- # Format the datetime object with milliseconds
- formatted_time = dt_object.strftime(
- '%b %d, %Y %H:%M:%S.') + f'{milliseconds:03d}'
+ # Sync Average only applies to client requests
+ if 'Client' in typ:
+ # Convert avg_diff to seconds and format it
+ avg_diff_seconds = avg_diff
+ avg_formatted_time = f'{avg_diff_seconds:.3f} seconds'
+ else:
+ avg_formatted_time = 'N/A'
- table_content += (f'''
+ table_content += f'''
-
{row['Source']}
-
{row['Destination']}
-
{row['Type']}
-
{row['Version']}
-
{formatted_time}
-
''')
+
{src}
+
{dst}
+
{typ}
+
{version}
+
{cnt}
+
{avg_formatted_time}
+ '''
table_content += '''
'''
-
html_content += table_content
else:
@@ -159,8 +185,8 @@ def extract_ntp_data(self):
# Read the pcap files
packets = (rdpcap(self.startup_capture_file) +
- rdpcap(self.monitor_capture_file) +
- rdpcap(self.ntp_server_capture_file))
+ rdpcap(self.monitor_capture_file) +
+ rdpcap(self.ntp_server_capture_file))
# Iterate through NTP packets
for packet in packets:
@@ -171,6 +197,10 @@ def extract_ntp_data(self):
# Local NTP server syncs to external servers so we need to filter only
# for traffic to/from the device
if self._device_mac in (source_mac, destination_mac):
+
+ source_ip = None
+ dest_ip = None
+
if IP in packet:
source_ip = packet[IP].src
dest_ip = packet[IP].dst
@@ -218,6 +248,9 @@ def _ntp_network_ntp_support(self):
for packet in packet_capture:
if NTP in packet and packet.src == self._device_mac:
+
+ dest_ip = None
+
if IP in packet:
dest_ip = packet[IP].dst
elif IPv6 in packet:
@@ -229,16 +262,15 @@ def _ntp_network_ntp_support(self):
device_sends_ntp3 = True
LOGGER.info(f'Device sent NTPv3 request to {dest_ip}')
- if not (device_sends_ntp3 or device_sends_ntp4):
- result = False, 'Device has not sent any NTP requests'
- elif device_sends_ntp3 and device_sends_ntp4:
- result = False, ('Device sent NTPv3 and NTPv4 packets. ' +
- 'NTPv3 is not allowed.')
+ result = False, 'Device has not sent any NTP requests'
+
+ if device_sends_ntp3 and device_sends_ntp4:
+ result = False, ('Device sent NTPv3 and NTPv4 packets')
elif device_sends_ntp3:
- result = False, ('Device sent NTPv3 packets. '
- 'NTPv3 is not allowed.')
+ result = False, ('Device sent NTPv3 packets')
elif device_sends_ntp4:
- result = True, 'Device sent NTPv4 packets.'
+ result = True, 'Device sent NTPv4 packets'
+
LOGGER.info(result[1])
return result
@@ -255,6 +287,7 @@ def _ntp_network_ntp_dhcp(self):
for packet in packet_capture:
if NTP in packet and packet.src == self._device_mac:
device_sends_ntp = True
+ dest_ip = None
if IP in packet:
dest_ip = packet[IP].dst
elif IPv6 in packet:
@@ -266,17 +299,17 @@ def _ntp_network_ntp_dhcp(self):
LOGGER.info('Device sent NTP request to non-DHCP provided NTP server')
ntp_to_remote = True
+ result = 'Feature Not Detected', 'Device has not sent any NTP requests'
+
if device_sends_ntp:
if ntp_to_local and ntp_to_remote:
result = False, ('Device sent NTP request to DHCP provided ' +
'server and non-DHCP provided server')
elif ntp_to_remote:
result = ('Feature Not Detected',
- 'Device sent NTP request to non-DHCP provided server')
+ 'Device sent NTP request to non-DHCP provided server')
elif ntp_to_local:
result = True, 'Device sent NTP request to DHCP provided server'
- else:
- result = 'Feature Not Detected', 'Device has not sent any NTP requests'
LOGGER.info(result[1])
return result
diff --git a/modules/test/protocol/README.md b/modules/test/protocol/README.md
index 765fbf758..08c8cd345 100644
--- a/modules/test/protocol/README.md
+++ b/modules/test/protocol/README.md
@@ -14,6 +14,6 @@ Within the ```python/src``` directory, the below tests are executed.
| ID | Description | Expected behavior | Required result
|---|---|---|---|
-| protocol.valid_bacnet | Can valid BACnet traffic be seen | BACnet traffic can be seen on the network and packets are valid | Required if Applicable |
+| protocol.valid_bacnet | Can valid BACnet traffic be seen | BACnet traffic can be seen on the network and packets are valid | Recommended |
| protocol.bacnet.version | Obtain the version of BACnet client used | The BACnet client implements an up to date version of BACnet | Recommended |
| protocol.valid_modbus | Can valid Modbus traffic be seen | Any Modbus functionality works as expected and valid Modbus traffic can be observed | Recommended |
\ No newline at end of file
diff --git a/modules/test/protocol/bin/start_test_module b/modules/test/protocol/bin/start_test_module
index a0754836c..e51fdb7ed 100644
--- a/modules/test/protocol/bin/start_test_module
+++ b/modules/test/protocol/bin/start_test_module
@@ -1,53 +1,50 @@
-#!/bin/bash
-
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Setup and start the connection test module
-
-# Define where the python source files are located
-PYTHON_SRC_DIR=/testrun/python/src
-
-# Fetch module name
-MODULE_NAME=$1
-
-# Default interface should be veth0 for all containers
-DEFAULT_IFACE=veth0
-
-# Allow a user to define an interface by passing it into this script
-DEFINED_IFACE=$2
-
-# Select which interace to use
-if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]]
-then
- echo "No interface defined, defaulting to veth0"
- INTF=$DEFAULT_IFACE
-else
- INTF=$DEFINED_IFACE
-fi
-
-# Create and set permissions on the log files
-LOG_FILE=/runtime/output/$MODULE_NAME.log
-RESULT_FILE=/runtime/output/$MODULE_NAME-result.json
-touch $LOG_FILE
-touch $RESULT_FILE
-chown $HOST_USER $LOG_FILE
-chown $HOST_USER $RESULT_FILE
-
-# Run the python script that will execute the tests for this module
-# -u flag allows python print statements
-# to be logged by docker by running unbuffered
-python3 -u $PYTHON_SRC_DIR/run.py "-m $MODULE_NAME"
-
+#!/bin/bash
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Setup and start the connection test module
+
+# Define where the python source files are located
+PYTHON_SRC_DIR=/testrun/python/src
+
+# Fetch module name
+MODULE_NAME=$1
+
+# Default interface should be veth0 for all containers
+DEFAULT_IFACE=veth0
+
+# Allow a user to define an interface by passing it into this script
+DEFINED_IFACE=$2
+
+# Select which interace to use
+if [[ -z $DEFINED_IFACE || "$DEFINED_IFACE" == "null" ]]
+then
+ echo "No interface defined, defaulting to veth0"
+ INTF=$DEFAULT_IFACE
+else
+ INTF=$DEFINED_IFACE
+fi
+
+# Create and set permissions on the log files
+RESULT_FILE=/runtime/output/$MODULE_NAME-result.json
+touch $RESULT_FILE
+chown $HOST_USER $RESULT_FILE
+
+# Run the python script that will execute the tests for this module
+# -u flag allows python print statements
+# to be logged by docker by running unbuffered
+python3 -u $PYTHON_SRC_DIR/run.py "-m $MODULE_NAME"
+
echo Module has finished
\ No newline at end of file
diff --git a/modules/test/protocol/conf/module_config.json b/modules/test/protocol/conf/module_config.json
index 365bd346b..554f43cc7 100644
--- a/modules/test/protocol/conf/module_config.json
+++ b/modules/test/protocol/conf/module_config.json
@@ -16,20 +16,17 @@
{
"name": "protocol.valid_bacnet",
"test_description": "Can valid BACnet traffic be seen",
- "expected_behavior": "BACnet traffic can be seen on the network and packets are valid and not malformed",
- "required_result": "Recommended"
+ "expected_behavior": "BACnet traffic can be seen on the network and packets are valid and not malformed"
},
{
"name": "protocol.bacnet.version",
"test_description": "Obtain the version of BACnet client used",
- "expected_behavior": "The BACnet client implements an up to date version of BACnet",
- "required_result": "Recommended"
+ "expected_behavior": "The BACnet client implements an up to date version of BACnet"
},
{
"name": "protocol.valid_modbus",
"test_description": "Can valid Modbus traffic be seen",
"expected_behavior": "Any Modbus functionality works as expected and valid Modbus traffic can be observed",
- "required_result": "Recommended",
"config":{
"port": 502,
"device_id": 1,
diff --git a/modules/test/protocol/protocol.Dockerfile b/modules/test/protocol/protocol.Dockerfile
index 6f55520e1..4494ae94e 100644
--- a/modules/test/protocol/protocol.Dockerfile
+++ b/modules/test/protocol/protocol.Dockerfile
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Image name: test-run/protocol-test
-FROM test-run/base-test:latest
+# Image name: testrun/protocol-test
+FROM testrun/base-test:latest
# Set DEBIAN_FRONTEND to noninteractive mode
ENV DEBIAN_FRONTEND=noninteractive
@@ -28,7 +28,7 @@ ARG MODULE_DIR=modules/test/$MODULE_NAME
COPY $MODULE_DIR/python/requirements.txt /testrun/python
#Install all python requirements for the module
-RUN pip3 install -r /testrun/python/requirements.txt
+RUN pip install -r /testrun/python/requirements.txt
# Copy over all configuration files
COPY $MODULE_DIR/conf /testrun/conf
diff --git a/modules/test/protocol/python/requirements.txt b/modules/test/protocol/python/requirements.txt
index 57917735d..1fe889fe9 100644
--- a/modules/test/protocol/python/requirements.txt
+++ b/modules/test/protocol/python/requirements.txt
@@ -1,7 +1,14 @@
-# Required for BACnet protocol tests
-netifaces
-BAC0
-pytz
-
-# Required for Modbus protocol tests
-pymodbus
\ No newline at end of file
+# Dependencies to user defined packages
+# Package dependencies should always be defined before the user defined
+# packages to prevent auto-upgrades of stable dependencies
+bacpypes==0.18.7
+colorama==0.4.6
+
+# User defined packages
+# Required for BACnet protocol tests
+netifaces==0.11.0
+BAC0==23.7.3
+pytz==2024.2
+
+# Required for Modbus protocol tests
+pymodbus==3.7.4
diff --git a/modules/test/protocol/python/src/protocol_bacnet.py b/modules/test/protocol/python/src/protocol_bacnet.py
index a17c9cdd3..9d4399b2b 100644
--- a/modules/test/protocol/python/src/protocol_bacnet.py
+++ b/modules/test/protocol/python/src/protocol_bacnet.py
@@ -82,8 +82,10 @@ def validate_device(self):
for device in self.devices:
object_id = str(device[3]) # BACnet Object ID
LOGGER.info('Checking device: ' + str(device))
- result &= self.validate_bacnet_source(
+ device_valid = self.validate_bacnet_source(
object_id=object_id, device_hw_addr=self.device_hw_addr)
+ if device_valid is not None:
+ result &= device_valid
description = ('BACnet device discovered' if result else
'BACnet device was found but was not device under test')
else:
diff --git a/modules/test/protocol/python/src/protocol_modbus.py b/modules/test/protocol/python/src/protocol_modbus.py
index 925e9517a..a722f928e 100644
--- a/modules/test/protocol/python/src/protocol_modbus.py
+++ b/modules/test/protocol/python/src/protocol_modbus.py
@@ -103,7 +103,7 @@ def __init__(self, log, device_ip, config):
self._discrete_input_enabled = False
# Initialize the modbus client
- self.client = ModbusClient(device_ip, self._port)
+ self.client = ModbusClient(host=device_ip, port=self._port)
# Connections created from this method are simple socket connections
# and aren't indicative of valid modbus
diff --git a/modules/test/protocol/python/src/protocol_module.py b/modules/test/protocol/python/src/protocol_module.py
index 4f7c1a7e7..9d99c91bd 100644
--- a/modules/test/protocol/python/src/protocol_module.py
+++ b/modules/test/protocol/python/src/protocol_module.py
@@ -22,7 +22,7 @@
class ProtocolModule(TestModule):
- """Protocol Test module"""
+ """Protocol test module"""
def __init__(self, module):
self._supports_bacnet = False
diff --git a/modules/test/protocol/python/src/run.py b/modules/test/protocol/python/src/run.py
index d47c81cb6..a2788c833 100644
--- a/modules/test/protocol/python/src/run.py
+++ b/modules/test/protocol/python/src/run.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Run Baseline module"""
+"""Run protocol module"""
import argparse
import signal
import sys
diff --git a/modules/test/services/README.md b/modules/test/services/README.md
index eae8a0bd0..f6133cc6e 100644
--- a/modules/test/services/README.md
+++ b/modules/test/services/README.md
@@ -22,6 +22,6 @@ Within the ```python/src``` directory, the below tests are executed.
| security.services.pop | Check POP ports 109 and 110 are disabled and POP is not running on any port | There is no POP service running on any port | Required |
| security.services.imap | Check IMAP port 143 is disabled and IMAP is not running on any port | There is no IMAP service running on any port | Required |
| security.services.snmpv3 | Check SNMP port 161/162 is disabled. If SNMP is an essential service, it should be v3 | Device is unreachable on port 161/162 unless SNMP is essential in which case it is SNMPv3 that is used | Required |
-| security.services.vnc | Check VNS is disabled on any port | Device cannot be accessed via VNC on any port | Required |
+| security.services.vnc | Check VNC is disabled on any port | Device cannot be accessed via VNC on any port | Required |
| security.services.tftp | Check TFTP port 69 is disabled (UDP) | There is no TFTP service running on any port | Required |
| ntp.network.ntp_server | Check NTP port 123 is disabled and the device is not acting as an NTP server | The devices does not respond to NTP requests | Required |
\ No newline at end of file
diff --git a/modules/test/services/bin/start_test_module b/modules/test/services/bin/start_test_module
index d8cede486..a42ee4cf0 100644
--- a/modules/test/services/bin/start_test_module
+++ b/modules/test/services/bin/start_test_module
@@ -41,11 +41,8 @@ else
fi
# Create and set permissions on the log files
-LOG_FILE=/runtime/output/$MODULE_NAME.log
RESULT_FILE=/runtime/output/$MODULE_NAME-result.json
-touch $LOG_FILE
touch $RESULT_FILE
-chown $HOST_USER $LOG_FILE
chown $HOST_USER $RESULT_FILE
# Run the python scrip that will execute the tests for this module
diff --git a/modules/test/services/conf/module_config.json b/modules/test/services/conf/module_config.json
index 5c20b4beb..b37435eda 100644
--- a/modules/test/services/conf/module_config.json
+++ b/modules/test/services/conf/module_config.json
@@ -9,14 +9,13 @@
"docker": {
"depends_on": "base",
"enable_container": true,
- "timeout": 600
+ "timeout": 900
},
"tests": [
{
"name": "security.services.ftp",
"test_description": "Check FTP port 20/21 is disabled and FTP is not running on any port",
"expected_behavior": "There is no FTP service running on any port",
- "required_result": "Required",
"config": {
"services": [
"ftp",
@@ -50,7 +49,6 @@
"name": "security.ssh.version",
"test_description": "If the device is running a SSH server ensure it is SSHv2",
"expected_behavior": "SSH server is not running or server is SSHv2",
- "required_result": "Required",
"config": {
"services": ["ssh"],
"ports": [
@@ -70,7 +68,6 @@
"name": "security.services.telnet",
"test_description": "Check TELNET port 23 is disabled and TELNET is not running on any port",
"expected_behavior": "There is no Telnet service running on any port",
- "required_result": "Required",
"config": {
"services": [
"telnet"
@@ -95,7 +92,6 @@
"name": "security.services.smtp",
"test_description": "Check SMTP ports 25, 465 and 587 are not enabled and SMTP is not running on any port.",
"expected_behavior": "There is no SMTP service running on any port",
- "required_result": "Required",
"config": {
"services": [
"smtp"
@@ -123,7 +119,6 @@
"name": "security.services.http",
"test_description": "Check that there is no HTTP server running on any port",
"expected_behavior": "Device is unreachable on port 80 (or any other port) and only responds to HTTPS requests on port 443 (or any other port if HTTP is used at all)",
- "required_result": "Required",
"config": {
"services": [
"http"
@@ -158,7 +153,6 @@
"name": "security.services.pop",
"test_description": "Check POP ports 109 and 110 are disabled and POP is not running on any port",
"expected_behavior": "There is no POP service running on any port",
- "required_result": "Required",
"config": {
"services": [
"pop2",
@@ -200,7 +194,6 @@
"name": "security.services.imap",
"test_description": "Check IMAP port 143 is disabled and IMAP is not running on any port",
"expected_behavior": "There is no IMAP service running on any port",
- "required_result": "Required",
"config": {
"services": [
"imap",
@@ -250,7 +243,6 @@
"name": "security.services.snmpv3",
"test_description": "Check SNMP port 161/162 is disabled. If SNMP is an essential service, check it supports version 3",
"expected_behavior": "Device is unreachable on port 161 (or any other port) and device is unreachable on port 162 (or any other port) unless SNMP is essential in which case it is SNMPv3 is used.",
- "required_result": "Required",
"config": {
"services": [
"snmp"
@@ -275,7 +267,6 @@
"name": "security.services.vnc",
"test_description": "Check VNC is disabled on any port",
"expected_behavior": "Device cannot be accessed / connected to via VNC on any port",
- "required_result": "Required",
"config": {
"services": [
"vnc",
@@ -319,6 +310,10 @@
{
"number": 5903,
"type": "tcp"
+ },
+ {
+ "number": 6001,
+ "type": "tcp"
}
]
},
@@ -330,7 +325,6 @@
"name": "security.services.tftp",
"test_description": "Check TFTP port 69 is disabled (UDP)",
"expected_behavior": "There is no TFTP service running on any port",
- "required_result": "Required",
"config": {
"services": [
"tftp",
@@ -363,7 +357,6 @@
"name": "ntp.network.ntp_server",
"test_description": "Check NTP port 123 is disabled and the device is not operating as an NTP server",
"expected_behavior": "The device does not respond to NTP requests when it's IP is set as the NTP server on another device",
- "required_result": "Required",
"config": {
"services": [
"ntp"
@@ -379,6 +372,22 @@
"Disable the NTP server",
"Drop traffic entering port 123/udp"
]
+ },
+ {
+ "name": "protocol.services.bacnet",
+ "test_description": "Report whether the device is running a BACnet server",
+ "expected_behavior": "The device may or may not be running a BACnet server",
+ "config": {
+ "services": [
+ "bacnet"
+ ],
+ "ports": [
+ {
+ "number": 47808,
+ "type": "udp"
+ }
+ ]
+ }
}
]
}
diff --git a/modules/test/services/python/requirements.txt b/modules/test/services/python/requirements.txt
index a3fdd1857..02acc1e19 100644
--- a/modules/test/services/python/requirements.txt
+++ b/modules/test/services/python/requirements.txt
@@ -1 +1,6 @@
-xmltodict==0.13.0
\ No newline at end of file
+# Dependencies to user defined packages
+# Package dependencies should always be defined before the user defined
+# packages to prevent auto-upgrades of stable dependencies
+
+# User defined packages
+xmltodict==0.14.2
diff --git a/modules/test/services/python/src/services_module.py b/modules/test/services/python/src/services_module.py
index bfa232c87..1a783e7dc 100644
--- a/modules/test/services/python/src/services_module.py
+++ b/modules/test/services/python/src/services_module.py
@@ -31,14 +31,12 @@ class ServicesModule(TestModule):
def __init__(self,
module,
- log_dir=None,
conf_file=None,
results_dir=None,
run=True,
nmap_scan_results_path=None):
super().__init__(module_name=module,
log_name=LOG_NAME,
- log_dir=log_dir,
conf_file=conf_file,
results_dir=results_dir)
self._scan_tcp_results = None
@@ -83,7 +81,7 @@ def generate_module_report(self):
else:
udp_open += 1
- html_content = '
context: {
header: 'Sorry, there are no reports yet!',
message:
- 'Reports will automatically generate following a test attempt completion.'
+ 'Reports will automatically generate following a test attempt completion.',
}
">
+ It has been saved as "{{ data.profile.name }}" and can now be attached to
+ reports.
+
+
+ The preliminary risk estimation based on your answers is
+
+ {{ data.profile.risk }} risk
+
+
+ {{ getRiskExplanation(data.profile.risk) }} The full report can be found
+ in the zip file. Please share with the lab to validate this profile and
+ determine next steps.
+
+
+
+ Close
+
+
diff --git a/modules/ui/src/app/pages/risk-assessment/components/success-dialog/success-dialog.component.scss b/modules/ui/src/app/pages/risk-assessment/components/success-dialog/success-dialog.component.scss
new file mode 100644
index 000000000..23badf7a4
--- /dev/null
+++ b/modules/ui/src/app/pages/risk-assessment/components/success-dialog/success-dialog.component.scss
@@ -0,0 +1,73 @@
+/**
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@import '../../../../../theming/colors';
+@import '../../../../../theming/variables';
+
+:host {
+ display: grid;
+ overflow: hidden;
+ width: 570px;
+ padding: 24px 0 8px 0;
+ > * {
+ padding: 0 16px 0 24px;
+ }
+}
+
+.simple-dialog-title {
+ font-family: $font-primary;
+ font-size: 18px;
+ font-weight: 400;
+ line-height: 24px;
+ text-align: left;
+}
+
+.simple-dialog-title + .simple-dialog-content {
+ margin-top: 0;
+ padding-top: 0;
+ border-bottom: 1px solid $lighter-grey;
+}
+
+.simple-dialog-content {
+ font-family: Roboto, sans-serif;
+ font-size: 14px;
+ line-height: 20px;
+ letter-spacing: 0.2px;
+ color: $grey-800;
+ padding: 16px 16px 16px 24px;
+ margin: 0;
+}
+
+.simple-dialog-actions {
+ padding: 0;
+ min-height: 30px;
+}
+
+.simple-dialog-content-risk {
+ font-weight: bold;
+ display: inline-flex;
+ align-items: center;
+}
+
+.profile-item-risk {
+ display: inline-flex;
+ align-items: center;
+ height: 20px;
+ margin-left: 2px;
+ font-family: $font-secondary;
+ font-size: 12px;
+ font-weight: 400;
+ letter-spacing: 0.3px;
+}
diff --git a/modules/ui/src/app/pages/risk-assessment/components/success-dialog/success-dialog.component.spec.ts b/modules/ui/src/app/pages/risk-assessment/components/success-dialog/success-dialog.component.spec.ts
new file mode 100644
index 000000000..b3a40c1bc
--- /dev/null
+++ b/modules/ui/src/app/pages/risk-assessment/components/success-dialog/success-dialog.component.spec.ts
@@ -0,0 +1,78 @@
+/**
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { ComponentFixture, TestBed } from '@angular/core/testing';
+
+import { SuccessDialogComponent } from './success-dialog.component';
+import { TestRunService } from '../../../../services/test-run.service';
+import { MAT_DIALOG_DATA, MatDialogRef } from '@angular/material/dialog';
+import { of } from 'rxjs';
+import { PROFILE_MOCK } from '../../../../mocks/profile.mock';
+import { ProfileRisk } from '../../../../model/profile';
+
+describe('SuccessDialogComponent', () => {
+ let component: SuccessDialogComponent;
+ let fixture: ComponentFixture;
+ const testRunServiceMock = jasmine.createSpyObj(['getRiskClass']);
+ let compiled: HTMLElement;
+
+ beforeEach(async () => {
+ await TestBed.configureTestingModule({
+ imports: [SuccessDialogComponent],
+ providers: [
+ { provide: TestRunService, useValue: testRunServiceMock },
+ {
+ provide: MatDialogRef,
+ useValue: {
+ keydownEvents: () => of(new KeyboardEvent('keydown', { code: '' })),
+ close: () => ({}),
+ },
+ },
+ { provide: MAT_DIALOG_DATA, useValue: {} },
+ ],
+ }).compileComponents();
+ fixture = TestBed.createComponent(SuccessDialogComponent);
+ component = fixture.componentInstance;
+ component.data = {
+ profile: PROFILE_MOCK,
+ };
+ compiled = fixture.nativeElement as HTMLElement;
+ fixture.detectChanges();
+ });
+
+ it('should create', () => {
+ expect(component).toBeTruthy();
+ });
+
+ it('should close dialog on "cancel" click', () => {
+ const closeSpy = spyOn(component.dialogRef, 'close');
+ const confirmButton = compiled.querySelector(
+ '.confirm-button'
+ ) as HTMLButtonElement;
+
+ confirmButton?.click();
+
+ expect(closeSpy).toHaveBeenCalled();
+
+ closeSpy.calls.reset();
+ });
+
+ it('should return proper text for risk', () => {
+ expect(component.getRiskExplanation(ProfileRisk.LIMITED)).toEqual('');
+ expect(component.getRiskExplanation(ProfileRisk.HIGH)).toEqual(
+ 'An additional assessment may be required.'
+ );
+ });
+});
diff --git a/modules/ui/src/app/pages/risk-assessment/components/success-dialog/success-dialog.component.ts b/modules/ui/src/app/pages/risk-assessment/components/success-dialog/success-dialog.component.ts
new file mode 100644
index 000000000..e7ce23ff3
--- /dev/null
+++ b/modules/ui/src/app/pages/risk-assessment/components/success-dialog/success-dialog.component.ts
@@ -0,0 +1,65 @@
+/**
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import { Component, Inject } from '@angular/core';
+import {
+ MAT_DIALOG_DATA,
+ MatDialogModule,
+ MatDialogRef,
+} from '@angular/material/dialog';
+import { MatButtonModule } from '@angular/material/button';
+import { EscapableDialogComponent } from '../../../../components/escapable-dialog/escapable-dialog.component';
+import {
+ Profile,
+ ProfileRisk,
+ RiskResultClassName,
+} from '../../../../model/profile';
+import { TestRunService } from '../../../../services/test-run.service';
+import { CommonModule } from '@angular/common';
+
+interface DialogData {
+ profile: Profile;
+}
+
+@Component({
+ selector: 'app-success-dialog',
+ templateUrl: './success-dialog.component.html',
+ styleUrls: ['./success-dialog.component.scss'],
+ standalone: true,
+ imports: [MatDialogModule, MatButtonModule, CommonModule],
+})
+export class SuccessDialogComponent extends EscapableDialogComponent {
+ constructor(
+ private readonly testRunService: TestRunService,
+ public override dialogRef: MatDialogRef,
+ @Inject(MAT_DIALOG_DATA) public data: DialogData
+ ) {
+ super(dialogRef);
+ }
+
+ confirm() {
+ this.dialogRef.close();
+ }
+
+ public getRiskClass(riskResult: string): RiskResultClassName {
+ return this.testRunService.getRiskClass(riskResult);
+ }
+
+ getRiskExplanation(risk: string | undefined) {
+ return risk === ProfileRisk.HIGH
+ ? 'An additional assessment may be required.'
+ : '';
+ }
+}
diff --git a/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.html b/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.html
index e5d0ae0cb..8bde474ba 100644
--- a/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.html
+++ b/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.html
@@ -15,11 +15,11 @@
-->
@@ -76,240 +60,19 @@
(click)="onSaveClick(ProfileStatus.DRAFT)">
Save Draft
+
+ Discard
+
+
+ Close
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- {{ description }}
-
- Please, check. “ and \ are not allowed.
-
-
- The field is required
-
-
- The field must be a maximum of
- {{ getControl(formControlName).getError('maxlength').requiredLength }}
- characters.
-
-
-
-
-
-
-
- {{ description }}
-
- Please, check. “ and \ are not allowed.
-
-
- The field is required
-
-
- The field must be a maximum of
- {{ getControl(formControlName).getError('maxlength').requiredLength }}
- characters.
-
-
-
-
-
-
-
- {{ description }}
-
- The field is required
-
-
- Please, check the email address. Valid e-mail can contain only latin
- letters, numbers, @ and . (dot).
-
-
- The field must be a maximum of
- {{ getControl(formControlName).getError('maxlength').requiredLength }}
- characters.
-
-
-
-
-
-
-
-
- {{ option }}
-
-
- {{
- description
- }}
-
-
-
-
-
-
-
- {{ option }}
-
-
- {{
- description
- }}
-
- The field is required
-
-
-
diff --git a/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.scss b/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.scss
index d9e90a2c4..1e4ad721b 100644
--- a/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.scss
+++ b/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.scss
@@ -17,42 +17,32 @@
@import 'src/theming/colors';
@import 'src/theming/variables';
+:host {
+ height: 100%;
+ display: flex;
+ flex-direction: column;
+}
+
.profile-form {
+ overflow: scroll;
+
+ .name-field-label {
+ padding-top: 0;
+ }
.field-container {
display: flex;
flex-direction: column;
align-items: flex-start;
padding: 8px 16px 8px 24px;
}
- .field-label {
- margin: 0;
- color: $grey-800;
- font-size: 18px;
- line-height: 24px;
- padding-top: 24px;
- padding-bottom: 16px;
- &:first-child {
- padding-top: 0;
- }
- &:has(+ .field-select-multiple.ng-invalid.ng-dirty) {
- color: mat.get-color-from-palette($color-warn, 700);
- }
- }
- mat-form-field {
+
+ .profile-form-field {
width: 100%;
}
- .field-hint {
- font-family: $font-secondary;
- font-size: 12px;
- font-weight: 400;
- line-height: 16px;
- text-align: left;
- padding-top: 8px;
- }
}
-.profile-form-field {
- width: 100%;
+.profile-form-field ::ng-deep .mat-mdc-form-field-textarea-control {
+ display: inherit;
}
.form-actions {
@@ -61,23 +51,14 @@
padding: 8px 24px 24px 24px;
}
-.save-draft-button:not(.mat-mdc-button-disabled) {
+.save-draft-button:not(.mat-mdc-button-disabled),
+.discard-button:not(.mat-mdc-button-disabled) {
color: $primary;
}
-.field-select-multiple {
- .field-select-checkbox {
- &:has(::ng-deep .mat-mdc-checkbox-checked) {
- background: mat.get-color-from-palette($color-primary, 50);
- }
- ::ng-deep .mdc-checkbox__ripple {
- display: none;
- }
- &:first-of-type {
- margin-top: 0;
- }
- &:last-of-type {
- margin-bottom: 8px;
- }
- }
+.save-profile-button:not(.mat-mdc-button-disabled),
+.save-draft-button:not(.mat-mdc-button-disabled),
+.discard-button:not(.mat-mdc-button-disabled) {
+ cursor: pointer;
+ pointer-events: auto;
}
diff --git a/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.spec.ts b/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.spec.ts
index 7344dd92c..d279a1f14 100644
--- a/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.spec.ts
+++ b/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.spec.ts
@@ -18,15 +18,17 @@ import { ComponentFixture, TestBed } from '@angular/core/testing';
import { ProfileFormComponent } from './profile-form.component';
import { BrowserAnimationsModule } from '@angular/platform-browser/animations';
import {
+ COPY_PROFILE_MOCK,
NEW_PROFILE_MOCK,
NEW_PROFILE_MOCK_DRAFT,
+ OUTDATED_DRAFT_PROFILE_MOCK,
PROFILE_FORM,
PROFILE_MOCK,
PROFILE_MOCK_2,
PROFILE_MOCK_3,
RENAME_PROFILE_MOCK,
} from '../../../mocks/profile.mock';
-import { FormControlType, ProfileStatus } from '../../../model/profile';
+import { ProfileStatus } from '../../../model/profile';
describe('ProfileFormComponent', () => {
let component: ProfileFormComponent;
@@ -132,7 +134,6 @@ describe('ProfileFormComponent', () => {
name.dispatchEvent(new Event('input'));
component.nameControl.markAsTouched();
- fixture.detectChanges();
fixture.detectChanges();
const nameError = compiled.querySelector('mat-error')?.innerHTML;
@@ -140,180 +141,11 @@ describe('ProfileFormComponent', () => {
expect(error).toBeTruthy();
expect(nameError).toContain(
- 'This Profile name is already used for another Risk Assessment profile'
+ 'This Profile name is already used for another profile'
);
});
});
- PROFILE_FORM.forEach((item, index) => {
- const uiIndex = index + 1; // as Profile name is at 0 position, the json items start from 1 i
-
- it(`should have form field with specific type"`, () => {
- const fields = compiled.querySelectorAll('.profile-form-field');
-
- if (item.type === FormControlType.SELECT) {
- const select = fields[uiIndex].querySelector('mat-select');
- expect(select).toBeTruthy();
- } else if (item.type === FormControlType.SELECT_MULTIPLE) {
- const select = fields[uiIndex].querySelector('mat-checkbox');
- expect(select).toBeTruthy();
- } else if (item.type === FormControlType.TEXTAREA) {
- const input = fields[uiIndex]?.querySelector('textarea');
- expect(input).toBeTruthy();
- } else {
- const input = fields[uiIndex]?.querySelector('input');
- expect(input).toBeTruthy();
- }
- });
-
- it('should have label', () => {
- const labels = compiled.querySelectorAll('.field-label');
- const uiIndex = index + 1; // as Profile name is at 0 position, the json items start from 1 i
-
- const label = item?.validation?.required
- ? item.question + ' *'
- : item.question;
- expect(labels[uiIndex].textContent?.trim()).toEqual(label);
- });
-
- it('should have hint', () => {
- const fields = compiled.querySelectorAll('.profile-form-field');
- const uiIndex = index + 1; // as Profile name is at 0 position, the json items start from 1 i
- const hint = fields[uiIndex].querySelector('mat-hint');
-
- if (item.description) {
- expect(hint?.textContent?.trim()).toEqual(item.description);
- } else {
- expect(hint).toBeNull();
- }
- });
-
- if (item.type === FormControlType.SELECT) {
- describe('select', () => {
- it(`should have default value if provided`, () => {
- const fields = compiled.querySelectorAll('.profile-form-field');
- const select = fields[uiIndex].querySelector('mat-select');
- expect(select?.textContent?.trim()).toEqual(item.default || '');
- });
-
- it('should have "required" error when field is not filled', () => {
- const fields = compiled.querySelectorAll('.profile-form-field');
-
- component.getControl(index).setValue('');
- component.getControl(index).markAsTouched();
-
- fixture.detectChanges();
-
- const error = fields[uiIndex].querySelector('mat-error')?.innerHTML;
-
- expect(error).toContain('The field is required');
- });
- });
- }
-
- if (item.type === FormControlType.SELECT_MULTIPLE) {
- describe('select multiple', () => {
- it(`should mark form group as dirty while tab navigation`, () => {
- const fields = compiled.querySelectorAll('.profile-form-field');
- const checkbox = fields[uiIndex].querySelector(
- '.field-select-checkbox:last-of-type mat-checkbox'
- );
- checkbox?.dispatchEvent(
- new KeyboardEvent('keydown', { key: 'Tab' })
- );
- fixture.detectChanges();
-
- expect(component.getControl(index).dirty).toBeTrue();
- });
- });
- }
-
- if (
- item.type === FormControlType.TEXT ||
- item.type === FormControlType.TEXTAREA ||
- item.type === FormControlType.EMAIL_MULTIPLE
- ) {
- describe('text or text-long or email-multiple', () => {
- if (item.validation?.required) {
- it('should have "required" error when field is not filled', () => {
- const fields = compiled.querySelectorAll('.profile-form-field');
- const uiIndex = index + 1; // as Profile name is at 0 position, the json items start from 1 i
- const input = fields[uiIndex].querySelector(
- '.mat-mdc-input-element'
- ) as HTMLInputElement;
- ['', ' '].forEach(value => {
- input.value = value;
- input.dispatchEvent(new Event('input'));
- component.getControl(index).markAsTouched();
- fixture.detectChanges();
- const errors = fields[uiIndex].querySelectorAll('mat-error');
- let hasError = false;
- errors.forEach(error => {
- if (error.textContent === 'The field is required') {
- hasError = true;
- }
- });
-
- expect(hasError).toBeTrue();
- });
- });
- }
-
- it('should have "invalid_format" error when field does not satisfy validation rules', () => {
- const fields = compiled.querySelectorAll('.profile-form-field');
- const uiIndex = index + 1; // as Profile name is at 0 position, the json items start from 1 i
- const input: HTMLInputElement = fields[uiIndex].querySelector(
- '.mat-mdc-input-element'
- ) as HTMLInputElement;
- input.value = 'as\\\\\\\\\\""""""""';
- input.dispatchEvent(new Event('input'));
- component.getControl(index).markAsTouched();
- fixture.detectChanges();
- const result =
- item.type === FormControlType.EMAIL_MULTIPLE
- ? 'Please, check the email address. Valid e-mail can contain only latin letters, numbers, @ and . (dot).'
- : 'Please, check. “ and \\ are not allowed.';
- const errors = fields[uiIndex].querySelectorAll('mat-error');
- let hasError = false;
- errors.forEach(error => {
- if (error.textContent === result) {
- hasError = true;
- }
- });
-
- expect(hasError).toBeTrue();
- });
-
- if (item.validation?.max) {
- it('should have "maxlength" error when field is exceeding max length', () => {
- const fields = compiled.querySelectorAll('.profile-form-field');
- const uiIndex = index + 1; // as Profile name is at 0 position, the json items start from 1 i
- const input: HTMLInputElement = fields[uiIndex].querySelector(
- '.mat-mdc-input-element'
- ) as HTMLInputElement;
- input.value =
- 'very long value very long value very long value very long value very long value very long value very long value very long value very long value very long value';
- input.dispatchEvent(new Event('input'));
- component.getControl(index).markAsTouched();
- fixture.detectChanges();
-
- const errors = fields[uiIndex].querySelectorAll('mat-error');
- let hasError = false;
- errors.forEach(error => {
- if (
- error.textContent ===
- `The field must be a maximum of ${item.validation?.max} characters.`
- ) {
- hasError = true;
- }
- });
- expect(hasError).toBeTrue();
- });
- }
- });
- }
- });
-
describe('Draft button', () => {
it('should be disabled when profile name is empty', () => {
component.nameControl.setValue('');
@@ -388,9 +220,52 @@ describe('ProfileFormComponent', () => {
});
});
});
+
+ describe('Discard button', () => {
+ beforeEach(() => {
+ fillForm(component);
+ fixture.detectChanges();
+ });
+
+ it('should be enabled when form is filled', () => {
+ const discardButton = compiled.querySelector(
+ '.discard-button'
+ ) as HTMLButtonElement;
+
+ expect(discardButton.disabled).toBeFalse();
+ });
+
+ it('should emit discard', () => {
+ const emitSpy = spyOn(component.discard, 'emit');
+ const discardButton = compiled.querySelector(
+ '.discard-button'
+ ) as HTMLButtonElement;
+ discardButton.click();
+
+ expect(emitSpy).toHaveBeenCalled();
+ });
+ });
});
describe('Class tests', () => {
+ describe('with outdated draft profile', () => {
+ beforeEach(() => {
+ component.selectedProfile = OUTDATED_DRAFT_PROFILE_MOCK;
+ fixture.detectChanges();
+ });
+
+ it('should have an error when uses the name of copy profile', () => {
+ expect(component.profileForm.value).toEqual({
+ 0: '',
+ 1: 'IoT Sensor',
+ 2: '',
+ 3: { 0: false, 1: false, 2: false },
+ 4: '',
+ name: 'Outdated profile',
+ });
+ });
+ });
+
describe('with profile', () => {
beforeEach(() => {
component.selectedProfile = PROFILE_MOCK;
@@ -432,6 +307,15 @@ describe('ProfileFormComponent', () => {
component.nameControl.hasError('has_same_profile_name')
).toBeTrue();
});
+
+ it('should have an error when uses the name of copy profile', () => {
+ component.selectedProfile = COPY_PROFILE_MOCK;
+ component.profiles = [PROFILE_MOCK, PROFILE_MOCK_2, COPY_PROFILE_MOCK];
+
+ expect(
+ component.nameControl.hasError('has_same_profile_name')
+ ).toBeTrue();
+ });
});
describe('with no profile', () => {
diff --git a/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.ts b/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.ts
index a15867ae7..2656221cd 100644
--- a/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.ts
+++ b/modules/ui/src/app/pages/risk-assessment/profile-form/profile-form.component.ts
@@ -16,6 +16,7 @@
import { CdkTextareaAutosize, TextFieldModule } from '@angular/cdk/text-field';
import {
afterNextRender,
+ AfterViewInit,
ChangeDetectionStrategy,
Component,
EventEmitter,
@@ -39,19 +40,19 @@ import {
FormGroup,
ReactiveFormsModule,
ValidatorFn,
- Validators,
} from '@angular/forms';
import { MatInputModule } from '@angular/material/input';
import { DeviceValidators } from '../../devices/components/device-form/device.validators';
import {
- FormControlType,
Profile,
ProfileFormat,
ProfileStatus,
Question,
- Validation,
} from '../../../model/profile';
+import { FormControlType } from '../../../model/question';
import { ProfileValidators } from './profile.validators';
+import { DynamicFormComponent } from '../../../components/dynamic-form/dynamic-form.component';
+import { CdkTrapFocus } from '@angular/cdk/a11y';
@Component({
selector: 'app-profile-form',
@@ -66,22 +67,24 @@ import { ProfileValidators } from './profile.validators';
MatSelectModule,
MatCheckboxModule,
TextFieldModule,
+ DynamicFormComponent,
],
templateUrl: './profile-form.component.html',
styleUrl: './profile-form.component.scss',
+ hostDirectives: [CdkTrapFocus],
changeDetection: ChangeDetectionStrategy.OnPush,
})
-export class ProfileFormComponent implements OnInit {
+export class ProfileFormComponent implements OnInit, AfterViewInit {
private profile: Profile | null = null;
private profileList!: Profile[];
private injector = inject(Injector);
private nameValidator!: ValidatorFn;
- public readonly FormControlType = FormControlType;
public readonly ProfileStatus = ProfileStatus;
profileForm: FormGroup = this.fb.group({});
@ViewChildren(CdkTextareaAutosize)
autosize!: QueryList;
@Input() profileFormat!: ProfileFormat[];
+ @Input() isCopyProfile!: boolean;
@Input()
set profiles(profiles: Profile[]) {
this.profileList = profiles;
@@ -105,15 +108,19 @@ export class ProfileFormComponent implements OnInit {
}
@Output() saveProfile = new EventEmitter();
+ @Output() discard = new EventEmitter();
constructor(
private deviceValidators: DeviceValidators,
private profileValidators: ProfileValidators,
private fb: FormBuilder
) {}
ngOnInit() {
- this.profileForm = this.createProfileForm(this.profileFormat);
+ this.profileForm = this.createProfileForm();
+ }
+
+ ngAfterViewInit(): void {
if (this.selectedProfile) {
- this.fillProfileForm(this.profileFormat, this.selectedProfile);
+ this.fillProfileForm(this.profileFormat, this.selectedProfile!);
}
}
@@ -138,7 +145,7 @@ export class ProfileFormComponent implements OnInit {
return this.profileForm.get(name.toString()) as AbstractControl;
}
- createProfileForm(questions: ProfileFormat[]): FormGroup {
+ createProfileForm(): FormGroup {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const group: any = {};
@@ -153,52 +160,9 @@ export class ProfileFormComponent implements OnInit {
this.nameValidator,
]);
- questions.forEach((question, index) => {
- if (question.type === FormControlType.SELECT_MULTIPLE) {
- group[index] = this.getMultiSelectGroup(question);
- } else {
- const validators = this.getValidators(
- question.type,
- question.validation
- );
- group[index] = new FormControl(question.default || '', validators);
- }
- });
return new FormGroup(group);
}
- getValidators(type: FormControlType, validation?: Validation): ValidatorFn[] {
- const validators: ValidatorFn[] = [];
- if (validation) {
- if (validation.required) {
- validators.push(this.profileValidators.textRequired());
- }
- if (validation.max) {
- validators.push(Validators.maxLength(Number(validation.max)));
- }
- if (type === FormControlType.EMAIL_MULTIPLE) {
- validators.push(this.profileValidators.emailStringFormat());
- }
- if (type === FormControlType.TEXT || type === FormControlType.TEXTAREA) {
- validators.push(this.profileValidators.textFormat());
- }
- }
- return validators;
- }
-
- getMultiSelectGroup(question: ProfileFormat): FormGroup {
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
- const group: any = {};
- question.options?.forEach((option, index) => {
- group[index] = false;
- });
- return this.fb.group(group, {
- validators: question.validation?.required
- ? [this.profileValidators.multiSelectRequired]
- : [],
- });
- }
-
getFormGroup(name: string | number): FormGroup {
return this.profileForm?.controls[name] as FormGroup;
}
@@ -206,18 +170,22 @@ export class ProfileFormComponent implements OnInit {
fillProfileForm(profileFormat: ProfileFormat[], profile: Profile): void {
this.nameControl.setValue(profile.name);
profileFormat.forEach((question, index) => {
+ const answer = profile.questions.find(
+ answers => answers.question === question.question
+ );
if (question.type === FormControlType.SELECT_MULTIPLE) {
question.options?.forEach((item, idx) => {
- if ((profile.questions[index].answer as number[])?.includes(idx)) {
+ if ((answer?.answer as number[])?.includes(idx)) {
this.getFormGroup(index).controls[idx].setValue(true);
} else {
this.getFormGroup(index).controls[idx].setValue(false);
}
});
} else {
- this.getControl(index).setValue(profile.questions[index].answer);
+ this.getControl(index).setValue(answer?.answer || '');
}
});
+ this.nameControl.markAsTouched();
this.triggerResize();
}
@@ -231,14 +199,8 @@ export class ProfileFormComponent implements OnInit {
this.saveProfile.emit(response);
}
- public markSectionAsDirty(
- optionIndex: number,
- optionLength: number,
- formControlName: string
- ) {
- if (optionIndex === optionLength - 1) {
- this.getControl(formControlName).markAsDirty();
- }
+ onDiscardClick() {
+ this.discard.emit();
}
private buildResponseFromForm(
@@ -251,7 +213,7 @@ export class ProfileFormComponent implements OnInit {
const request: any = {
questions: [],
};
- if (profile) {
+ if (profile && !this.isCopyProfile) {
request.name = profile.name;
request.rename = this.nameControl.value?.trim();
} else {
diff --git a/modules/ui/src/app/pages/risk-assessment/profile-form/profile.validators.ts b/modules/ui/src/app/pages/risk-assessment/profile-form/profile.validators.ts
index dcad4b397..10280586d 100644
--- a/modules/ui/src/app/pages/risk-assessment/profile-form/profile.validators.ts
+++ b/modules/ui/src/app/pages/risk-assessment/profile-form/profile.validators.ts
@@ -36,8 +36,14 @@ export class ProfileValidators {
profile: Profile | null
): ValidatorFn {
return (control: AbstractControl): ValidationErrors | null => {
- const value = control.value?.trim();
- if (value && profiles.length && (!profile || profile?.name !== value)) {
+ const value = control.value?.trim().toLowerCase();
+ if (
+ value &&
+ profiles.length &&
+ (!profile ||
+ !profile.created ||
+ (profile.created && profile?.name.toLowerCase() !== value))
+ ) {
const isSameProfileName = this.hasSameProfileName(value, profiles);
return isSameProfileName ? { has_same_profile_name: true } : null;
}
@@ -85,7 +91,8 @@ export class ProfileValidators {
profiles: Profile[]
): boolean {
return (
- profiles.some(profile => profile.name === profileName?.trim()) || false
+ profiles.some(profile => profile.name.toLowerCase() === profileName) ||
+ false
);
}
}
diff --git a/modules/ui/src/app/pages/risk-assessment/profile-item/profile-item.component.html b/modules/ui/src/app/pages/risk-assessment/profile-item/profile-item.component.html
index 35850f0ed..41f90de38 100644
--- a/modules/ui/src/app/pages/risk-assessment/profile-item/profile-item.component.html
+++ b/modules/ui/src/app/pages/risk-assessment/profile-item/profile-item.component.html
@@ -13,17 +13,32 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
-
0; else warning_message">
-
- Warning! Testrun requires two ports to operate correctly.
-
-
System settings
- Warning! No ports is detected.
+ Warning! No ports detected.
+ The following recommendations are required solely for full device qualification.
+ They are optional for the pilot assessment.
+ But you may find it valuable to understand what will be required in the future
+ and our recommendations for your device.
+
+
+ {% for step in optional_steps_to_resolve %}
+
+
+
+ {{ loop.index }}.
+
+
+ Name
+ {{ step['name'] }}
+
+
+ Description
+ {{ step["description"] }}
+
+
+
+ Steps to resolve
+ {% for recommedtation in step['optional_recommendations'] %}
+
+
+ {{ loop.index }}. {{ recommedtation }}
+
+ {% endfor %}
+
+
+ {% endfor %}
+
+
+ {% endif %}
+
+
diff --git a/resources/risk_assessment.json b/resources/risk_assessment.json
index b94c5b7b6..d4f2574fb 100644
--- a/resources/risk_assessment.json
+++ b/resources/risk_assessment.json
@@ -1,180 +1,7 @@
[
- {
- "question": "What type of device is this?",
- "type": "select",
- "options": [
- {
- "text": "Building Automation Gateway",
- "risk": "High"
- },
- {
- "text": "IoT Gateway",
- "risk": "High"
- },
- {
- "text": "Controller - AHU",
- "risk": "High"
- },
- {
- "text": "Controller - Boiler",
- "risk": "High"
- },
- {
- "text": "Controller - Chiller",
- "risk": "High"
- },
- {
- "text": "Controller - FCU",
- "risk": "Limited"
- },
- {
- "text": "Controller - Pump",
- "risk": "Limited"
- },
- {
- "text": "Controller - CRAC",
- "risk": "High"
- },
- {
- "text": "Controller - VAV",
- "risk": "Limited"
- },
- {
- "text": "Controller - VRF",
- "risk": "Limited"
- },
- {
- "text": "Controller - Multiple",
- "risk": "High"
- },
- {
- "text": "Controller - Other",
- "risk": "High"
- },
- {
- "text": "Controller - Lighting",
- "risk": "Limited"
- },
- {
- "text": "Controller - Blinds/Facades",
- "risk": "High"
- },
- {
- "text": "Controller - Lifts/Elevators",
- "risk": "High"
- },
- {
- "text": "Controller - UPS",
- "risk": "High"
- },
- {
- "text": "Sensor - Air Quality",
- "risk": "Limited"
- },
- {
- "text": "Sensor - Vibration",
- "risk": "Limited"
- },
- {
- "text": "Sensor - Humidity",
- "risk": "Limited"
- },
- {
- "text": "Sensor - Water",
- "risk": "Limited"
- },
- {
- "text": "Sensor - Occupancy",
- "risk": "High"
- },
- {
- "text": "Sensor - Volume",
- "risk": "Limited"
- },
- {
- "text": "Sensor - Weight",
- "risk": "Limited"
- },
- {
- "text": "Sensor - Weather",
- "risk": "Limited"
- },
- {
- "text": "Sensor - Steam",
- "risk": "High"
- },
- {
- "text": "Sensor - Air Flow",
- "risk": "Limited"
- },
- {
- "text": "Sensor - Lighting",
- "risk": "Limited"
- },
- {
- "text": "Sensor - Other",
- "risk": "High"
- },
- {
- "text": "Sensor - Air Quality",
- "risk": "Limited"
- },
- {
- "text": "Monitoring - Fire System",
- "risk": "Limited"
- },
- {
- "text": "Monitoring - Emergency Lighting",
- "risk": "Limited"
- },
- {
- "text": "Monitoring - Other",
- "risk": "High"
- },
- {
- "text": "Monitoring - UPS",
- "risk": "Limited"
- },
- {
- "text": "Meter - Water",
- "risk": "Limited"
- },
- {
- "text": "Meter - Gas",
- "risk": "Limited"
- },
- {
- "text": "Meter - Electricity",
- "risk": "Limited"
- },
- {
- "text": "Meter - Other",
- "risk": "High"
- },
- {
- "text": "Other",
- "risk": "High"
- },
- {
- "text": "Data - Storage",
- "risk": "High"
- },
- {
- "text": "Data - Processing",
- "risk": "High"
- },
- {
- "text": "Tablet",
- "risk": "High"
- }
- ],
- "validation": {
- "required": true
- }
- },
{
"question": "How will this device be used at Google?",
- "description": "Desribe your use case. Add links to user journey diagrams and TDD if available.",
+ "description": "Describe your use case. Add links to user journey diagrams and TDD if available.",
"type": "text-long",
"validation": {
"max": "512",
@@ -218,33 +45,6 @@
"required": true
}
},
- {
- "category": "Data Collection",
- "question": "Are any of the following statements true about your device?",
- "description": "This tells us about the data your device will collect",
- "type": "select-multiple",
- "options": [
- {
- "text": "The device collects any Personal Identifiable Information (PII) or Personal Health Information (PHI)",
- "risk": "High"
- },
- {
- "text": "The device collects intellectual property and trade secrets, sensitive business data, critical infrastructure data, identity assets",
- "risk": "High"
- },
- {
- "text": "The device streams confidential business data in real-time (seconds)?",
- "risk": "High"
- },
- {
- "text": "None of the above",
- "risk": "Limited"
- }
- ],
- "validation": {
- "required": true
- }
- },
{
"category": "Data Transmission",
"question": "Which of the following statements are true about this device?",
@@ -260,7 +60,7 @@
"risk": "High"
},
{
- "text": "A failure in data transmission would likely have a substantial negative impact (https://www.rra.rocks/docs/standard_levels#levels-definitions)",
+ "text": "A failure in data transmission would likely have a substantial negative impact (https://www.rra.rocks/docs/standard_levels#levels-definitions)",
"risk": "High"
},
{
diff --git a/resources/test_packs/pilot.json b/resources/test_packs/pilot.json
new file mode 100644
index 000000000..587d0a25a
--- /dev/null
+++ b/resources/test_packs/pilot.json
@@ -0,0 +1,169 @@
+{
+ "name": "Pilot Assessment",
+ "language": {
+ "compliant_description": "Your device has met the initial pilot assessment requirements. Please send your Testrun ZIP file to the qualification lab for verification. The lab will then contact you with further instructions.",
+ "non_compliant_description": "Your device didn't quite meet the initial pilot assessment requirements. The Testrun report will provide guidance on how to resolve any issues. If you require further support, please get in touch with the qualification lab."
+ },
+ "tests": [
+ {
+ "name": "connection.port_link",
+ "required_result": "Informational"
+ },
+ {
+ "name": "connection.port_speed",
+ "required_result": "Informational"
+ },
+ {
+ "name": "connection.port_duplex",
+ "required_result": "Informational"
+ },
+ {
+ "name": "connection.switch.arp_inspection",
+ "required_result": "Informational"
+ },
+ {
+ "name": "connection.switch.dhcp_snooping",
+ "required_result": "Informational"
+ },
+ {
+ "name": "connection.dhcp_address",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.mac_address",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.mac_oui",
+ "required_result": "Informational"
+ },
+ {
+ "name": "connection.private_address",
+ "required_result": "Informational"
+ },
+ {
+ "name": "connection.shared_address",
+ "required_result": "Informational"
+ },
+ {
+ "name": "connection.single_ip",
+ "required_result": "Informational"
+ },
+ {
+ "name": "connection.target_ping",
+ "required_result": "Informational"
+ },
+ {
+ "name": "connection.ipaddr.ip_change",
+ "required_result": "Informational"
+ },
+ {
+ "name": "connection.ipaddr.dhcp_failover",
+ "required_result": "Informational"
+ },
+ {
+ "name": "connection.ipv6_slaac",
+ "required_result": "Informational"
+ },
+ {
+ "name": "connection.ipv6_ping",
+ "required_result": "Informational"
+ },
+ {
+ "name": "dns.network.hostname_resolution",
+ "required_result": "Informational"
+ },
+ {
+ "name": "dns.network.from_dhcp",
+ "required_result": "Informational"
+ },
+ {
+ "name": "dns.mdns",
+ "required_result": "Informational"
+ },
+ {
+ "name": "ntp.network.ntp_support",
+ "required_result": "Informational"
+ },
+ {
+ "name": "ntp.network.ntp_dhcp",
+ "required_result": "Informational"
+ },
+ {
+ "name": "protocol.valid_bacnet",
+ "required_result": "Informational"
+ },
+ {
+ "name": "protocol.bacnet.version",
+ "required_result": "Informational"
+ },
+ {
+ "name": "protocol.valid_modbus",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.services.ftp",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.ssh.version",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.services.telnet",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.services.smtp",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.services.http",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.services.pop",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.services.imap",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.services.snmpv3",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.services.vnc",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.services.tftp",
+ "required_result": "Informational"
+ },
+ {
+ "name": "ntp.network.ntp_server",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.tls.v1_0_client",
+ "required_result": "Required if Applicable"
+ },
+ {
+ "name": "security.tls.v1_2_server",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.tls.v1_2_client",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.tls.v1_3_server",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.tls.v1_3_client",
+ "required_result": "Informational"
+ }
+ ]
+ }
\ No newline at end of file
diff --git a/resources/test_packs/qualification.json b/resources/test_packs/qualification.json
new file mode 100644
index 000000000..967370b4a
--- /dev/null
+++ b/resources/test_packs/qualification.json
@@ -0,0 +1,177 @@
+{
+ "name": "Device Qualification",
+ "language": {
+ "compliant_description": "Your device has met the initial device qualification requirements. Please send your Testrun ZIP file to the qualification lab for verification. The lab will then contact you with further instructions.",
+ "non_compliant_description": "Your device didn't quite meet the initial device qualification requirements. The Testrun report will provide guidance on how to resolve any issues. If you require further support, please get in touch with the qualification lab."
+ },
+ "tests": [
+ {
+ "name": "connection.port_link",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.port_speed",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.port_duplex",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.switch.arp_inspection",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.switch.dhcp_snooping",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.dhcp_address",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.mac_address",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.mac_oui",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.private_address",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.shared_address",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.dhcp_disconnect",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.dhcp_disconnect_ip_change",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.single_ip",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.target_ping",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.ipaddr.ip_change",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.ipaddr.dhcp_failover",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.ipv6_slaac",
+ "required_result": "Required"
+ },
+ {
+ "name": "connection.ipv6_ping",
+ "required_result": "Required"
+ },
+ {
+ "name": "dns.network.hostname_resolution",
+ "required_result": "Required"
+ },
+ {
+ "name": "dns.network.from_dhcp",
+ "required_result": "Informational"
+ },
+ {
+ "name": "dns.mdns",
+ "required_result": "Informational"
+ },
+ {
+ "name": "ntp.network.ntp_support",
+ "required_result": "Required"
+ },
+ {
+ "name": "ntp.network.ntp_dhcp",
+ "required_result": "Roadmap"
+ },
+ {
+ "name": "protocol.valid_bacnet",
+ "required_result": "Recommended"
+ },
+ {
+ "name": "protocol.bacnet.version",
+ "required_result": "Recommended"
+ },
+ {
+ "name": "protocol.valid_modbus",
+ "required_result": "Recommended"
+ },
+ {
+ "name": "security.services.ftp",
+ "required_result": "Required"
+ },
+ {
+ "name": "security.ssh.version",
+ "required_result": "Required"
+ },
+ {
+ "name": "security.services.telnet",
+ "required_result": "Required"
+ },
+ {
+ "name": "security.services.smtp",
+ "required_result": "Required"
+ },
+ {
+ "name": "security.services.http",
+ "required_result": "Required"
+ },
+ {
+ "name": "security.services.pop",
+ "required_result": "Required"
+ },
+ {
+ "name": "security.services.imap",
+ "required_result": "Required"
+ },
+ {
+ "name": "security.services.snmpv3",
+ "required_result": "Required"
+ },
+ {
+ "name": "security.services.vnc",
+ "required_result": "Required"
+ },
+ {
+ "name": "security.services.tftp",
+ "required_result": "Required"
+ },
+ {
+ "name": "ntp.network.ntp_server",
+ "required_result": "Required"
+ },
+ {
+ "name": "security.tls.v1_0_client",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.tls.v1_2_server",
+ "required_result": "Required if Applicable"
+ },
+ {
+ "name": "security.tls.v1_2_client",
+ "required_result": "Required if Applicable"
+ },
+ {
+ "name": "security.tls.v1_3_server",
+ "required_result": "Informational"
+ },
+ {
+ "name": "security.tls.v1_3_client",
+ "required_result": "Informational"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/testing/api/certificates/WR2.pem b/testing/api/certificates/WR2.pem
new file mode 100644
index 000000000..f82f4d12d
--- /dev/null
+++ b/testing/api/certificates/WR2.pem
@@ -0,0 +1,29 @@
+-----BEGIN CERTIFICATE-----
+MIIFCzCCAvOgAwIBAgIQf/AFoHxM3tEArZ1mpRB7mDANBgkqhkiG9w0BAQsFADBH
+MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM
+QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMjMxMjEzMDkwMDAwWhcNMjkwMjIw
+MTQwMDAwWjA7MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVR29vZ2xlIFRydXN0IFNl
+cnZpY2VzMQwwCgYDVQQDEwNXUjIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQCp/5x/RR5wqFOfytnlDd5GV1d9vI+aWqxG8YSau5HbyfsvAfuSCQAWXqAc
++MGr+XgvSszYhaLYWTwO0xj7sfUkDSbutltkdnwUxy96zqhMt/TZCPzfhyM1IKji
+aeKMTj+xWfpgoh6zySBTGYLKNlNtYE3pAJH8do1cCA8Kwtzxc2vFE24KT3rC8gIc
+LrRjg9ox9i11MLL7q8Ju26nADrn5Z9TDJVd06wW06Y613ijNzHoU5HEDy01hLmFX
+xRmpC5iEGuh5KdmyjS//V2pm4M6rlagplmNwEmceOuHbsCFx13ye/aoXbv4r+zgX
+FNFmp6+atXDMyGOBOozAKql2N87jAgMBAAGjgf4wgfswDgYDVR0PAQH/BAQDAgGG
+MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/
+AgEAMB0GA1UdDgQWBBTeGx7teRXUPjckwyG77DQ5bUKyMDAfBgNVHSMEGDAWgBTk
+rysmcRorSCeFL1JmLO/wiRNxPjA0BggrBgEFBQcBAQQoMCYwJAYIKwYBBQUHMAKG
+GGh0dHA6Ly9pLnBraS5nb29nL3IxLmNydDArBgNVHR8EJDAiMCCgHqAchhpodHRw
+Oi8vYy5wa2kuZ29vZy9yL3IxLmNybDATBgNVHSAEDDAKMAgGBmeBDAECATANBgkq
+hkiG9w0BAQsFAAOCAgEARXWL5R87RBOWGqtY8TXJbz3S0DNKhjO6V1FP7sQ02hYS
+TL8Tnw3UVOlIecAwPJQl8hr0ujKUtjNyC4XuCRElNJThb0Lbgpt7fyqaqf9/qdLe
+SiDLs/sDA7j4BwXaWZIvGEaYzq9yviQmsR4ATb0IrZNBRAq7x9UBhb+TV+PfdBJT
+DhEl05vc3ssnbrPCuTNiOcLgNeFbpwkuGcuRKnZc8d/KI4RApW//mkHgte8y0YWu
+ryUJ8GLFbsLIbjL9uNrizkqRSvOFVU6xddZIMy9vhNkSXJ/UcZhjJY1pXAprffJB
+vei7j+Qi151lRehMCofa6WBmiA4fx+FOVsV2/7R6V2nyAiIJJkEd2nSi5SnzxJrl
+Xdaqev3htytmOPvoKWa676ATL/hzfvDaQBEcXd2Ppvy+275W+DKcH0FBbX62xevG
+iza3F4ydzxl6NJ8hk8R+dDXSqv1MbRT1ybB5W0k8878XSOjvmiYTDIfyc9acxVJr
+Y/cykHipa+te1pOhv7wYPYtZ9orGBV5SGOJm4NrB3K1aJar0RfzxC3ikr7Dyc6Qw
+qDTBU39CluVIQeuQRgwG3MuSxl7zRERDRilGoKb8uY45JzmxWuKxrfwT/478JuHU
+/oTxUFqOl2stKnn7QGTq8z29W+GgBLCXSBxC9epaHM0myFH/FJlniXJfHeytWt0=
+-----END CERTIFICATE-----
diff --git a/testing/api/certificates/crt.pem b/testing/api/certificates/crt.pem
new file mode 100644
index 000000000..410b1f104
--- /dev/null
+++ b/testing/api/certificates/crt.pem
@@ -0,0 +1,31 @@
+-----BEGIN CERTIFICATE-----
+MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw
+CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
+MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
+MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
+Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo
+27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w
+Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw
+TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl
+qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH
+szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8
+Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk
+MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92
+wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p
+aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN
+VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID
+AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
+FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb
+C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe
+QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy
+h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4
+7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J
+ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef
+MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/
+Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT
+6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ
+0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm
+2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb
+bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c
+-----END CERTIFICATE-----
diff --git a/testing/api/certificates/invalid.pem b/testing/api/certificates/invalid.pem
new file mode 100644
index 000000000..d3f5a12fa
--- /dev/null
+++ b/testing/api/certificates/invalid.pem
@@ -0,0 +1 @@
+
diff --git a/testing/api/certificates/invalidname1234567891234.pem b/testing/api/certificates/invalidname1234567891234.pem
new file mode 100644
index 000000000..410b1f104
--- /dev/null
+++ b/testing/api/certificates/invalidname1234567891234.pem
@@ -0,0 +1,31 @@
+-----BEGIN CERTIFICATE-----
+MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw
+CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
+MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
+MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
+Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo
+27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w
+Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw
+TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl
+qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH
+szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8
+Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk
+MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92
+wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p
+aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN
+VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID
+AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
+FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb
+C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe
+QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy
+h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4
+7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J
+ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef
+MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/
+Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT
+6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ
+0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm
+2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb
+bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c
+-----END CERTIFICATE-----
diff --git a/testing/api/devices/device_1/device_config.json b/testing/api/devices/device_1/device_config.json
new file mode 100644
index 000000000..3be69a082
--- /dev/null
+++ b/testing/api/devices/device_1/device_config.json
@@ -0,0 +1,54 @@
+{
+ "mac_addr": "00:1e:42:28:9e:4a",
+ "manufacturer": "Teltonika",
+ "model": "TRB140",
+ "type": "IoT Gateway",
+ "technology": "Hardware - Access Control",
+ "test_pack": "Device Qualification",
+ "additional_info": [
+ {
+ "question": "What type of device is this?",
+ "answer": "IoT Gateway"
+ },
+ {
+ "question": "Please select the technology this device falls into",
+ "answer": "Hardware - Access Control"
+ },
+ {
+ "question": "Does your device process any sensitive information?",
+ "answer": "Yes"
+ },
+ {
+ "question": "Can all non-essential services be disabled on your device?",
+ "answer": "Yes"
+ },
+ {
+ "question": "Is there a second IP port on the device?",
+ "answer": "Yes"
+ },
+ {
+ "question": "Can the second IP port on your device be disabled?",
+ "answer": "Yes"
+ }
+ ],
+ "test_modules": {
+ "protocol": {
+ "enabled": true
+ },
+ "services": {
+ "enabled": false
+ },
+ "ntp": {
+ "enabled": true
+ },
+ "tls": {
+ "enabled": false
+ },
+ "connection": {
+ "enabled": true
+ },
+ "dns": {
+ "enabled": true
+ }
+ }
+}
diff --git a/testing/api/devices/device_2/device_config.json b/testing/api/devices/device_2/device_config.json
new file mode 100644
index 000000000..177ee23e6
--- /dev/null
+++ b/testing/api/devices/device_2/device_config.json
@@ -0,0 +1,54 @@
+{
+ "mac_addr": "00:1e:42:35:73:c6",
+ "manufacturer": "Google",
+ "model": "First",
+ "type": "IoT Gateway",
+ "technology": "Hardware - Access Control",
+ "test_pack": "Device Qualification",
+ "additional_info": [
+ {
+ "question": "What type of device is this?",
+ "answer": "IoT Gateway"
+ },
+ {
+ "question": "Please select the technology this device falls into",
+ "answer": "Hardware - Access Control"
+ },
+ {
+ "question": "Does your device process any sensitive information?",
+ "answer": "Yes"
+ },
+ {
+ "question": "Can all non-essential services be disabled on your device?",
+ "answer": "Yes"
+ },
+ {
+ "question": "Is there a second IP port on the device?",
+ "answer": "Yes"
+ },
+ {
+ "question": "Can the second IP port on your device be disabled?",
+ "answer": "Yes"
+ }
+ ],
+ "test_modules": {
+ "protocol": {
+ "enabled": true
+ },
+ "services": {
+ "enabled": false
+ },
+ "ntp": {
+ "enabled": true
+ },
+ "tls": {
+ "enabled": false
+ },
+ "connection": {
+ "enabled": true
+ },
+ "dns": {
+ "enabled": true
+ }
+ }
+}
diff --git a/testing/api/profiles/draft_profile.json b/testing/api/profiles/draft_profile.json
new file mode 100644
index 000000000..0f580fb98
--- /dev/null
+++ b/testing/api/profiles/draft_profile.json
@@ -0,0 +1,35 @@
+{
+ "name": "draft_profile",
+ "version": "1.4",
+ "created": "2024-09-03",
+ "questions": [
+ {
+ "question": "How will this device be used at Google?",
+ "answer": "Monitoring"
+ },
+ {
+ "question": "Is this device going to be managed by Google or a third party?",
+ "answer": "Google"
+ },
+ {
+ "question": "Will the third-party device administrator be able to grant access to authorized Google personnel upon request?",
+ "answer": ""
+ },
+ {
+ "question": "Which of the following statements are true about this device?",
+ "answer": []
+ },
+ {
+ "question": "Does the network protocol assure server-to-client identity verification?",
+ "answer": "Yes"
+ },
+ {
+ "question": "Click the statements that best describe the characteristics of this device.",
+ "answer": []
+ },
+ {
+ "question": "Are any of the following statements true about this device?",
+ "answer": []
+ }
+ ]
+}
\ No newline at end of file
diff --git a/testing/api/profiles/valid_profile.json b/testing/api/profiles/valid_profile.json
new file mode 100644
index 000000000..207929f8d
--- /dev/null
+++ b/testing/api/profiles/valid_profile.json
@@ -0,0 +1,39 @@
+{
+ "name": "valid_profile",
+ "version": "1.4",
+ "created": "2024-09-03",
+ "questions": [
+ {
+ "question": "How will this device be used at Google?",
+ "answer": "Monitoring"
+ },
+ {
+ "question": "Is this device going to be managed by Google or a third party?",
+ "answer": "Google"
+ },
+ {
+ "question": "Will the third-party device administrator be able to grant access to authorized Google personnel upon request?",
+ "answer": "N/A"
+ },
+ {
+ "question": "Which of the following statements are true about this device?",
+ "answer": [0]
+ },
+ {
+ "question": "Does the network protocol assure server-to-client identity verification?",
+ "answer": "Yes"
+ },
+ {
+ "question": "Click the statements that best describe the characteristics of this device.",
+ "answer": [0]
+ },
+ {
+ "question": "Are any of the following statements true about this device?",
+ "answer": [0]
+ },
+ {
+ "question": "Comments",
+ "answer": ""
+ }
+ ]
+}
\ No newline at end of file
diff --git a/testing/api/reports/report.json b/testing/api/reports/report.json
new file mode 100644
index 000000000..bd697654d
--- /dev/null
+++ b/testing/api/reports/report.json
@@ -0,0 +1,134 @@
+{
+ "testrun": {
+ "version": "1.3.1"
+ },
+ "mac_addr": null,
+ "device": {
+ "mac_addr": "00:1e:42:35:73:c4",
+ "manufacturer": "Teltonika",
+ "model": "TRB140",
+ "firmware": "1.2.3",
+ "test_modules": {
+ "protocol": {
+ "enabled": true
+ },
+ "services": {
+ "enabled": false
+ },
+ "connection": {
+ "enabled": false
+ },
+ "tls": {
+ "enabled": true
+ },
+ "ntp": {
+ "enabled": true
+ },
+ "dns": {
+ "enabled": true
+ }
+ }
+ },
+ "status": "Non-Compliant",
+ "started": "2024-08-05 13:37:53",
+ "finished": "2024-08-05 13:39:35",
+ "tests": {
+ "total": 12,
+ "results": [
+ {
+ "name": "protocol.valid_bacnet",
+ "description": "BACnet device could not be discovered",
+ "expected_behavior": "BACnet traffic can be seen on the network and packets are valid and not malformed",
+ "required_result": "Recommended",
+ "result": "Feature Not Detected"
+ },
+ {
+ "name": "protocol.bacnet.version",
+ "description": "Device did not respond to BACnet discovery",
+ "expected_behavior": "The BACnet client implements an up to date version of BACnet",
+ "required_result": "Recommended",
+ "result": "Feature Not Detected"
+ },
+ {
+ "name": "protocol.valid_modbus",
+ "description": "Device did not respond to Modbus connection",
+ "expected_behavior": "Any Modbus functionality works as expected and valid Modbus traffic can be observed",
+ "required_result": "Recommended",
+ "result": "Feature Not Detected"
+ },
+ {
+ "name": "security.tls.v1_2_server",
+ "description": "TLS 1.2 certificate is invalid",
+ "expected_behavior": "TLS 1.2 certificate is issued to the web browser client when accessed",
+ "required_result": "Required if Applicable",
+ "result": "Non-Compliant",
+ "recommendations": [
+ "Enable TLS 1.2 support in the web server configuration",
+ "Disable TLS 1.0 and 1.1",
+ "Sign the certificate used by the web server"
+ ]
+ },
+ {
+ "name": "security.tls.v1_2_client",
+ "description": "TLS 1.2 client connections valid",
+ "expected_behavior": "The packet indicates a TLS connection with at least TLS 1.2 and support for ECDH and ECDSA ciphers",
+ "required_result": "Required if Applicable",
+ "result": "Compliant"
+ },
+ {
+ "name": "security.tls.v1_3_server",
+ "description": "TLS 1.3 certificate is invalid",
+ "expected_behavior": "TLS 1.3 certificate is issued to the web browser client when accessed",
+ "required_result": "Informational",
+ "result": "Informational"
+ },
+ {
+ "name": "security.tls.v1_3_client",
+ "description": "TLS 1.3 client connections valid",
+ "expected_behavior": "The packet indicates a TLS connection with at least TLS 1.3",
+ "required_result": "Informational",
+ "result": "Informational"
+ },
+ {
+ "name": "ntp.network.ntp_support",
+ "description": "Device sent NTPv3 packets. NTPv3 is not allowed",
+ "expected_behavior": "The device sends an NTPv4 request to the configured NTP server.",
+ "required_result": "Required",
+ "result": "Non-Compliant",
+ "recommendations": [
+ "Set the NTP version to v4 in the NTP client",
+ "Install an NTP client that supports NTPv4"
+ ]
+ },
+ {
+ "name": "ntp.network.ntp_dhcp",
+ "description": "Device sent NTP request to non-DHCP provided server",
+ "expected_behavior": "Device can accept NTP server address, provided by the DHCP server (DHCP OFFER PACKET)",
+ "required_result": "Roadmap",
+ "result": "Feature Not Detected"
+ },
+ {
+ "name": "dns.network.hostname_resolution",
+ "description": "DNS traffic detected from device",
+ "expected_behavior": "The device sends DNS requests.",
+ "required_result": "Required",
+ "result": "Compliant"
+ },
+ {
+ "name": "dns.network.from_dhcp",
+ "description": "DNS traffic detected only to DHCP provided server",
+ "expected_behavior": "The device sends DNS requests to the DNS server provided by the DHCP server",
+ "required_result": "Informational",
+ "result": "Informational"
+ },
+ {
+ "name": "dns.mdns",
+ "description": "No MDNS traffic detected from the device",
+ "expected_behavior": "Device may send MDNS requests",
+ "required_result": "Informational",
+ "result": "Informational"
+ }
+ ]
+ },
+ "report": "http://localhost:8000/report/Teltonika TRB140/2024-08-05T13:37:53"
+}
\ No newline at end of file
diff --git a/testing/api/reports/report.pdf b/testing/api/reports/report.pdf
new file mode 100644
index 000000000..0e449f196
Binary files /dev/null and b/testing/api/reports/report.pdf differ
diff --git a/testing/api/system.json b/testing/api/sys_config/system.json
similarity index 100%
rename from testing/api/system.json
rename to testing/api/sys_config/system.json
diff --git a/testing/api/sys_config/updated_system.json b/testing/api/sys_config/updated_system.json
new file mode 100644
index 000000000..95c10642c
--- /dev/null
+++ b/testing/api/sys_config/updated_system.json
@@ -0,0 +1,7 @@
+{
+ "network": {
+ "device_intf": "updated_endev0a",
+ "internet_intf": "updated_dummynet"
+ },
+ "log_level": "DEBUG"
+}
\ No newline at end of file
diff --git a/testing/api/test_api b/testing/api/test_api
index 6751ae0ad..095123a3b 100755
--- a/testing/api/test_api
+++ b/testing/api/test_api
@@ -37,14 +37,15 @@ sudo docker build ./testing/docker/ci_test_device1 -t test-run/ci_device_1 -f .
sudo chown -R $USER local
# Copy configuration to testrun
-sudo cp testing/api/system.json local/system.json
+sudo cp testing/api/sys_config/system.json local/system.json
# Needs to be sudo because this invokes bin/testrun
sudo venv/bin/python3 -m pytest -v testing/api/test_api.py
+return_code=$?
# Clean up network interfaces after use
sudo docker network rm endev0
sudo ip link del dev endev0a
sudo ip link del dev dummynet
-exit $?
\ No newline at end of file
+exit $return_code
\ No newline at end of file
diff --git a/testing/api/test_api.py b/testing/api/test_api.py
index 75811e3bb..e67506a71 100644
--- a/testing/api/test_api.py
+++ b/testing/api/test_api.py
@@ -16,56 +16,81 @@
# pylint: disable=redefined-outer-name
from collections.abc import Callable
-import copy
import json
import os
-from pathlib import Path
import re
import shutil
import signal
import subprocess
import time
-from typing import Iterator
import pytest
import requests
+from cryptography import x509
+from cryptography.hazmat.backends import default_backend
-ALL_DEVICES = "*"
API = "http://127.0.0.1:8000"
LOG_PATH = "/tmp/testrun.log"
TEST_SITE_DIR = ".."
DEVICES_DIRECTORY = "local/devices"
-TESTING_DEVICES = "../device_configs"
-SYSTEM_CONFIG_PATH = "local/system.json"
+TESTING_DEVICES = "../devices"
+PROFILES_DIRECTORY = "local/risk_profiles"
+SYS_CONFIG_FILE = "local/system.json"
+CERTS_DIRECTORY = "local/root_certs"
+
+SYS_CONFIG_PATH = "testing/api/sys_config"
+CERTS_PATH = "testing/api/certificates"
+PROFILES_PATH = "testing/api/profiles"
+REPORTS_PATH = "testing/api/reports"
+DEVICES_PATH = "testing/api/devices"
+DEVICE_1_PATH = "testing/api/devices/device_1"
+DEVICE_2_PATH = "testing/api/devices/device_2"
+
BASELINE_MAC_ADDR = "02:42:aa:00:01:01"
ALL_MAC_ADDR = "02:42:aa:00:00:01"
+DEVICE_PROFILE_QUESTIONS = "resources/devices/device_profile.json"
+
def pretty_print(dictionary: dict):
""" Pretty print dictionary """
print(json.dumps(dictionary, indent=4))
+def query_system_status():
+ """ Query system/status endpoint and returns 'status' value """
-def query_system_status() -> str:
- """Query system status from API and returns this"""
+ # Send the get request
r = requests.get(f"{API}/system/status", timeout=5)
- response = json.loads(r.text)
- return response["status"]
+ # Parse the json response
+ response = r.json()
+
+ # Return the system status
+ return response["status"]
def query_test_count() -> int:
- """Queries status and returns number of test results"""
+ """ Queries status and returns number of test results """
r = requests.get(f"{API}/system/status", timeout=5)
- response = json.loads(r.text)
+ response = r.json()
return len(response["tests"]["results"])
+@pytest.fixture
+def testing_devices():
+ """ Use devices from the testing/devices directory """
+ delete_all_devices()
+ shutil.copytree(
+ os.path.join(os.path.dirname(__file__), TESTING_DEVICES),
+ os.path.join(DEVICES_DIRECTORY),
+ dirs_exist_ok=True,
+ )
+ return get_all_devices()
def start_test_device(
- device_name, mac_address, image_name="test-run/ci_device_1", args=""
+ device_name, mac_addr, image_name="test-run/ci_device_1", args=""
):
""" Start test device container with given name """
cmd = subprocess.run(
- f"docker run -d --network=endev0 --mac-address={mac_address}"
+ f"docker run -d --network=endev0 --mac-address={mac_addr}"
f" --cap-add=NET_ADMIN -v /tmp:/out --privileged --name={device_name}"
f" {image_name} {args}",
shell=True,
@@ -74,7 +99,6 @@ def start_test_device(
)
print(cmd.stdout)
-
def stop_test_device(device_name):
""" Stop docker container with given name """
cmd = subprocess.run(
@@ -88,7 +112,6 @@ def stop_test_device(device_name):
)
print(cmd.stdout)
-
def docker_logs(device_name):
""" Print docker logs from given docker container name """
cmd = subprocess.run(
@@ -97,28 +120,27 @@ def docker_logs(device_name):
)
print(cmd.stdout)
+def load_json(file_name, directory):
+ """ Utility method to load json files """
-@pytest.fixture
-def empty_devices_dir():
- """ Use e,pty devices directory """
- local_delete_devices(ALL_DEVICES)
+ # Construct the base path relative to the main folder
+ base_path = os.path.abspath(os.path.join(__file__, "../../.."))
+ # Construct the full file path
+ file_path = os.path.join(base_path, directory, file_name)
-@pytest.fixture
-def testing_devices():
- """ Use devices from the testing/device_configs directory """
- local_delete_devices(ALL_DEVICES)
- shutil.copytree(
- os.path.join(os.path.dirname(__file__), TESTING_DEVICES),
- os.path.join(DEVICES_DIRECTORY),
- dirs_exist_ok=True,
- )
- return local_get_devices()
+ # Open the file in read mode
+ with open(file_path, "r", encoding="utf-8") as file:
+ # Return the file content
+ return json.load(file)
@pytest.fixture
def testrun(request): # pylint: disable=W0613
- """ Start intstance of testrun """
+ """ Start instance of testrun """
+
+ # Launch the Testrun in a new process group
+ # pylint: disable=W1509
with subprocess.Popen(
"bin/testrun",
stdout=subprocess.PIPE,
@@ -127,44 +149,65 @@ def testrun(request): # pylint: disable=W0613
preexec_fn=os.setsid
) as proc:
+ # Wait until the API is ready to accept requests or timeout
while True:
+
try:
+
+ # Capture the process output
outs = proc.communicate(timeout=1)[0]
+
except subprocess.TimeoutExpired as e:
+
+ # If output is captured during timeout, decode and check
if e.output is not None:
output = e.output.decode("utf-8")
+
+ # Check if the output contains the message indicating the API is ready
if re.search("API waiting for requests", output):
break
+
except Exception:
- pytest.fail("testrun terminated")
+ # Fail if the Testrun process unexpectedly terminates
+ pytest.fail("Testrun terminated")
+ # Wait for two seconds before yielding
time.sleep(2)
yield
+ # Terminate the Testrun process group
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
try:
+
+ # Wait up to 60 seconds for clean termination of the Testrun process
outs = proc.communicate(timeout=60)[0]
+
+ # If termination exceeds the timeout, force to kill the process
except subprocess.TimeoutExpired as e:
print(e.output)
os.killpg(os.getpgid(proc.pid), signal.SIGKILL)
pytest.exit(
- "waited 60s but Testrun did not cleanly exit .. terminating all tests"
+ "Waited 60s but Testrun did not cleanly exit .. terminating all tests"
)
print(outs)
+ # Stop any remaining Docker containers after the test
cmd = subprocess.run(
"docker stop $(docker ps -a -q)", shell=True,
capture_output=True, check=False
)
+
print(cmd.stdout)
+
+ # Remove the stopped Docker containers
cmd = subprocess.run(
"docker rm $(docker ps -a -q)", shell=True,
capture_output=True, check=False
)
- print(cmd.stdout)
+ print(cmd.stdout)
def until_true(func: Callable, message: str, timeout: int):
""" Blocks until given func returns True
@@ -179,9 +222,8 @@ def until_true(func: Callable, message: str, timeout: int):
time.sleep(1)
raise TimeoutError(f"Timed out waiting {timeout}s for {message}")
-
-def dict_paths(thing: dict, stem: str = "") -> Iterator[str]:
- """Returns json paths (in dot notation) from a given dictionary"""
+def dict_paths(thing: dict, stem: str = ""):
+ """ Returns json paths (in dot notation) from a given dictionary """
for k, v in thing.items():
path = f"{stem}.{k}" if stem else k
if isinstance(v, dict):
@@ -189,770 +231,2820 @@ def dict_paths(thing: dict, stem: str = "") -> Iterator[str]:
else:
yield path
+def get_network_interfaces() -> str:
+ """ Return list of network interfaces on machine
-def get_network_interfaces():
- """return list of network interfaces on machine
-
- uses /sys/class/net rather than inetfaces as test-run uses the latter
+ Uses /sys/class/net rather than interfaces as testrun uses the latter
"""
+ # Initialise empty list
ifaces = []
- path = Path("/sys/class/net")
- for i in path.iterdir():
- if not i.is_dir():
+
+ # Path to the directory containing network interfaces
+ path = "/sys/class/net"
+
+ # Iterate over the items in the directory
+ for item in os.listdir(path):
+
+ # Construct the full path
+ full_path = os.path.join(path, item)
+
+ # Skip if the item is not a directory
+ if not os.path.isdir(full_path):
continue
- if i.stem.startswith("en") or i.stem.startswith("eth"):
- ifaces.append(i.stem)
+
+ # Check if the interface name starts with 'en' or 'eth'
+ if item.startswith("en") or item.startswith("eth"):
+ ifaces.append(item)
+
+ # Return the list of network interfaces
return ifaces
+def test_invalid_api_path(testrun): # pylint: disable=W0613
+ """ Test for invalid API path (404)"""
-def local_delete_devices(path):
- """ Deletes all local devices
- """
- for thing in Path(DEVICES_DIRECTORY).glob(path):
- if thing.is_file():
- thing.unlink()
- else:
- shutil.rmtree(thing)
+ # Send the get request to the invalid path
+ r = requests.get(f"{API}/non-existing", timeout=5)
+
+ # Check that the response status code is 404 (Not Found)
+ assert r.status_code == 404
+# Tests for system endpoints
-def local_get_devices():
- """ Returns path to device configs of devices in local/devices directory"""
- return sorted(
- Path(DEVICES_DIRECTORY).glob(
- "*/device_config.json"
- )
- )
+@pytest.fixture()
+def restore_sys_config():
+ """ Restore the original system configuration (system.json) after the test """
+
+ yield
+
+ # Construct the full path for 'system.json'
+ sys_config = os.path.join(SYS_CONFIG_PATH, "system.json")
+
+ # Restore system.json from 'testing/api/sys_config' after the test
+ if os.path.exists(sys_config):
+
+ shutil.copy(sys_config, SYS_CONFIG_FILE)
+
+@pytest.fixture()
+def update_sys_config():
+ """ Update the system configuration (system.json) before the test """
+ # Construct the full path for 'updated_system.json'
+ updated_sys_config = os.path.join(SYS_CONFIG_PATH, "updated_system.json")
-def test_get_system_interfaces(testrun): # pylint: disable=W0613
- """Tests API system interfaces against actual local interfaces"""
+ # Restore system.json from 'testing/api/sys_config' after the test
+ if os.path.exists(updated_sys_config):
+
+ shutil.copy(updated_sys_config, SYS_CONFIG_FILE)
+
+def test_get_sys_interfaces(testrun): # pylint: disable=W0613
+ """ Tests API system interfaces against actual local interfaces (200) """
+
+ # Send a GET request to the API to retrieve system interfaces
r = requests.get(f"{API}/system/interfaces", timeout=5)
- response = json.loads(r.text)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Parse the JSON response
+ response = r.json()
+
+ # Retrieve the actual network interfaces
local_interfaces = get_network_interfaces()
+
+ # Check if the key are in the response
assert set(response.keys()) == set(local_interfaces)
- # schema expects a flat list
+ # Ensure that all values in the response are strings
assert all(isinstance(x, str) for x in response)
+def test_update_sys_config(testrun, restore_sys_config): # pylint: disable=W0613
+ """ Test update system configuration endpoint (200) """
-def test_status_idle(testrun): # pylint: disable=W0613
- until_true(
- lambda: query_system_status().lower() == "idle",
- "system status is `idle`",
- 30,
- )
+ # Load the updated system configuration
+ updated_sys_config = load_json("updated_system.json",
+ directory=SYS_CONFIG_PATH)
-# Currently not working due to blocking during monitoring period
-@pytest.mark.skip()
-def test_status_in_progress(testing_devices, testrun): # pylint: disable=W0613
+ # Assign the values of 'device_intf' and 'internet_intf' from payload
+ updated_device_intf = updated_sys_config["network"]["device_intf"]
+ updated_internet_intf = updated_sys_config["network"]["internet_intf"]
- payload = {"device": {"mac_addr": BASELINE_MAC_ADDR, "firmware": "asd"}}
- r = requests.post(f"{API}/system/start", data=json.dumps(payload), timeout=10)
+ # Send the post request to update the system configuration
+ r = requests.post(f"{API}/system/config",
+ data=json.dumps(updated_sys_config),
+ timeout=5)
+
+ # Check if status code is 200 (OK)
assert r.status_code == 200
- until_true(
- lambda: query_system_status().lower() == "waiting for device",
- "system status is `waiting for device`",
- 30,
- )
+ # Load 'system.json' from 'local' folder
+ local_sys_config = load_json("system.json", directory="local")
- start_test_device("x123", BASELINE_MAC_ADDR)
+ # Assign 'device_intf' and 'internet_intf' values from 'local/system.json'
+ local_device_intf = local_sys_config["network"]["device_intf"]
+ local_internet_intf = local_sys_config["network"]["internet_intf"]
- until_true(
- lambda: query_system_status().lower() == "in progress",
- "system status is `in progress`",
- 600,
- )
+ # Check if 'device_intf' has been updated
+ assert updated_device_intf == local_device_intf
+ # Check if 'internet_intf' has been updated
+ assert updated_internet_intf == local_internet_intf
-@pytest.mark.skip()
-def test_status_non_compliant(testing_devices, testrun): # pylint: disable=W0613
+def test_update_sys_config_invalid_json(testrun): # pylint: disable=W0613
+ """ Test invalid payload for update system configuration (400) """
- r = requests.get(f"{API}/devices", timeout=5)
- all_devices = json.loads(r.text)
- payload = {
- "device": {
- "mac_addr": all_devices[0]["mac_addr"],
- "firmware": "asd"
- }
- }
- r = requests.post(f"{API}/system/start", data=json.dumps(payload),
- timeout=10)
- assert r.status_code == 200
- print(r.text)
+ # Empty payload
+ updated_system_config = {}
- until_true(
- lambda: query_system_status().lower() == "waiting for device",
- "system status is `waiting for device`",
- 30,
- )
+ # Send the post request to update the system configuration
+ r = requests.post(f"{API}/system/config",
+ data=json.dumps(updated_system_config),
+ timeout=5)
- start_test_device("x123", all_devices[0]["mac_addr"])
+ # Check if status code is 400 (Invalid config)
+ assert r.status_code == 400
- until_true(
- lambda: query_system_status().lower() == "non-compliant",
- "system status is `complete",
- 600,
- )
+def test_get_sys_config(testrun): # pylint: disable=W0613
+ """ Tests get system configuration endpoint (200) """
- stop_test_device("x123")
+ # Send a GET request to the API to retrieve system configuration
+ r = requests.get(f"{API}/system/config", timeout=5)
-def test_create_get_devices(empty_devices_dir, testrun): # pylint: disable=W0613
- device_1 = {
- "manufacturer": "Google",
- "model": "First",
- "mac_addr": "00:1e:42:35:73:c4",
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
- }
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
- r = requests.post(f"{API}/device", data=json.dumps(device_1),
- timeout=5)
- print(r.text)
- assert r.status_code == 201
- assert len(local_get_devices()) == 1
-
- device_2 = {
- "manufacturer": "Google",
- "model": "Second",
- "mac_addr": "00:1e:42:35:73:c6",
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
+ # Parse the JSON response
+ api_sys_config = r.json()
+
+ # Assign the json response keys and expected types
+ expected_keys = {
+ "network": dict,
+ "log_level": str,
+ "startup_timeout": int,
+ "monitor_period": int,
+ "max_device_reports": int,
+ "api_url": str,
+ "api_port": int,
+ "org_name": str,
}
- r = requests.post(f"{API}/device", data=json.dumps(device_2),
- timeout=5)
- assert r.status_code == 201
- assert len(local_get_devices()) == 2
- # Test that returned devices API endpoint matches expected structure
- r = requests.get(f"{API}/devices", timeout=5)
- all_devices = json.loads(r.text)
- pretty_print(all_devices)
+ # Iterate over the dict keys and values
+ for key, key_type in expected_keys.items():
- with open(
- os.path.join(os.path.dirname(__file__), "mockito/get_devices.json"),
- encoding="utf-8"
- ) as f:
- mockito = json.load(f)
+ # Check if the key is in the JSON response
+ assert key in api_sys_config
- print(mockito)
+ # Check if the key has the expected data type
+ assert isinstance(api_sys_config[key], key_type)
- # Validate structure
- assert all(isinstance(x, dict) for x in all_devices)
+ # Load the local system configuration file 'local/system.json'
+ local_sys_config = load_json("system.json", directory="local")
- # TOOO uncomment when is done
- # assert set(dict_paths(mockito[0])) == set(dict_paths(all_devices[0]))
+ # Assign 'device_intf' and 'internet_intf' values from 'local/system.json'
+ local_device_intf = local_sys_config["network"]["device_intf"]
+ local_internet_intf = local_sys_config["network"]["internet_intf"]
- # Validate contents of given keys matches
- for key in ["mac_addr", "manufacturer", "model"]:
- assert set([all_devices[0][key], all_devices[1][key]]) == set(
- [device_1[key], device_2[key]]
- )
+ # Assign 'device_intf' and 'internet_intf' values from the api response
+ api_device_intf = api_sys_config["network"]["device_intf"]
+ api_internet_intf = api_sys_config["network"]["internet_intf"]
+ # Check if the device interface in the local config matches the API config
+ assert api_device_intf == local_device_intf
-def test_delete_device_success(empty_devices_dir, testrun): # pylint: disable=W0613
- device_1 = {
- "manufacturer": "Google",
- "model": "First",
- "mac_addr": "00:1e:42:35:73:c4",
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
- }
+ # Check if the internet interface in the local config matches the API config
+ assert api_internet_intf == local_internet_intf
- # Send create device request
- r = requests.post(f"{API}/device",
- data=json.dumps(device_1),
- timeout=5)
- print(r.text)
+@pytest.fixture()
+def start_test():
+ """ Starts a testrun test using the API """
- # Check device has been created
- assert r.status_code == 201
- assert len(local_get_devices()) == 1
-
- device_2 = {
- "manufacturer": "Google",
- "model": "Second",
- "mac_addr": "00:1e:42:35:73:c6",
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
+ # Load the device (payload) using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the mac address
+ mac_addr = device["mac_addr"]
+
+ # Assign the test modules
+ test_modules = device["test_modules"]
+
+ # Payload with device details
+ payload = {
+ "device": {
+ "mac_addr": mac_addr,
+ "firmware": "test",
+ "test_modules": test_modules
+ }
}
- r = requests.post(f"{API}/device",
- data=json.dumps(device_2),
- timeout=5)
- assert r.status_code == 201
- assert len(local_get_devices()) == 2
+ # Send the post request (start test)
+ r = requests.post(f"{API}/system/start",
+ data=json.dumps(payload),
+ timeout=10)
- # Test that device_1 deletes
- r = requests.delete(f"{API}/device/",
- data=json.dumps(device_1),
- timeout=5)
- assert r.status_code == 200
- assert len(local_get_devices()) == 1
+ # Exception if status code is not 200
+ if r.status_code != 200:
+ raise ValueError(f"API request failed with code: {r.status_code}")
+@pytest.fixture()
+def stop_test():
+ """ Stops a testrun test using the API """
- # Test that returned devices API endpoint matches expected structure
- r = requests.get(f"{API}/devices", timeout=5)
- all_devices = json.loads(r.text)
- pretty_print(all_devices)
+ # Send the post request to stop the test
+ r = requests.post(f"{API}/system/stop", timeout=10)
- with open(
- os.path.join(os.path.dirname(__file__),
- "mockito/get_devices.json"),
- encoding="utf-8"
- ) as f:
- mockito = json.load(f)
+ # Exception if status code is not 200
+ if r.status_code != 200:
+ raise ValueError(f"API request failed with code: {r.status_code}")
- print(mockito)
+ # Validate system status
- # Validate structure
- assert all(isinstance(x, dict) for x in all_devices)
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_start_testrun_success(empty_devices_dir, add_devices, testrun): # pylint: disable=W0613
+ """ Test for testrun started successfully (200) """
- # TOOO uncomment when is done
- # assert set(dict_paths(mockito[0])) == set(dict_paths(all_devices[0]))
+ # Load the device using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
- # Validate contents of given keys matches
- for key in ["mac_addr", "manufacturer", "model"]:
- assert set([all_devices[0][key]]) == set(
- [device_2[key]]
- )
+ # Assign the device mac address
+ mac_addr = device["mac_addr"]
+ # Assign device modules
+ test_modules = device["test_modules"]
-def test_delete_device_not_found(empty_devices_dir, testrun): # pylint: disable=W0613
- device_1 = {
- "manufacturer": "Google",
- "model": "First",
- "mac_addr": "00:1e:42:35:73:c4",
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
+ # Payload with device details
+ payload = {
+ "device": {
+ "mac_addr": mac_addr,
+ "firmware": "test",
+ "test_modules": test_modules
+ }
}
- # Send create device request
- r = requests.post(f"{API}/device",
- data=json.dumps(device_1),
- timeout=5)
- print(r.text)
-
- # Check device has been created
- assert r.status_code == 201
- assert len(local_get_devices()) == 1
+ # Send the post request
+ r = requests.post(f"{API}/system/start", data=json.dumps(payload), timeout=10)
- # Test that device_1 deletes
- r = requests.delete(f"{API}/device/",
- data=json.dumps(device_1),
- timeout=5)
+ # Check if the response status code is 200 (OK)
assert r.status_code == 200
- assert len(local_get_devices()) == 0
- # Test that device_1 is not found
- r = requests.delete(f"{API}/device/",
- data=json.dumps(device_1),
- timeout=5)
- assert r.status_code == 404
- assert len(local_get_devices()) == 0
+ # Parse the json response
+ response = r.json()
+ # Check that device is in response
+ assert "device" in response
-def test_delete_device_no_mac(empty_devices_dir, testrun): # pylint: disable=W0613
- device_1 = {
- "manufacturer": "Google",
- "model": "First",
- "mac_addr": "00:1e:42:35:73:c4",
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
+ # Assign the json response keys and expected types
+ expected_keys = {
+ "mac_addr": str,
+ "firmware": str,
+ "test_modules": dict
}
- # Send create device request
- r = requests.post(f"{API}/device",
- data=json.dumps(device_1),
- timeout=5)
- print(r.text)
+ # Assign the device properties
+ device = response["device"]
- # Check device has been created
- assert r.status_code == 201
- assert len(local_get_devices()) == 1
+ # Iterate over the 'expected_keys' dict keys and values
+ for key, key_type in expected_keys.items():
- device_1.pop("mac_addr")
+ # Check if the key is in the device
+ assert key in device
- # Test that device_1 can't delete with no mac address
- r = requests.delete(f"{API}/device/",
- data=json.dumps(device_1),
- timeout=5)
- assert r.status_code == 400
- assert len(local_get_devices()) == 1
+ # Check if the key has the expected data type
+ assert isinstance(device[key], key_type)
+def test_start_testrun_invalid_json(testrun): # pylint: disable=W0613
+ """ Test for invalid JSON payload when testrun is started (400) """
-# Currently not working due to blocking during monitoring period
-@pytest.mark.skip()
-def test_delete_device_testrun_running(testing_devices, testrun): # pylint: disable=W0613
+ # Payload empty dict (no device)
+ payload = {}
- payload = {"device": {"mac_addr": BASELINE_MAC_ADDR, "firmware": "asd"}}
+ # Send the post request
r = requests.post(f"{API}/system/start", data=json.dumps(payload), timeout=10)
- assert r.status_code == 200
- until_true(
- lambda: query_system_status().lower() == "waiting for device",
- "system status is `waiting for device`",
- 30,
- )
+ # Check if the response status code is 400 (bad request)
+ assert r.status_code == 400
- start_test_device("x123", BASELINE_MAC_ADDR)
+ # Parse the json response
+ response = r.json()
- until_true(
- lambda: query_system_status().lower() == "in progress",
- "system status is `in progress`",
- 600,
- )
+ # Check if 'error' in response
+ assert "error" in response
- device_1 = {
- "manufacturer": "Google",
- "model": "First",
- "mac_addr": BASELINE_MAC_ADDR,
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
- }
- r = requests.delete(f"{API}/device/",
- data=json.dumps(device_1),
- timeout=5)
- assert r.status_code == 403
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_start_testrun_already_started(empty_devices_dir, add_devices, # pylint: disable=W0613
+ testrun, start_test): # pylint: disable=W0613
+ """ Test for testrun already started (409) """
+ # Load the device using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
-def test_start_testrun_started_successfully(
- testing_devices, # pylint: disable=W0613
- testrun): # pylint: disable=W0613
- payload = {"device": {"mac_addr": BASELINE_MAC_ADDR, "firmware": "asd"}}
- r = requests.post(f"{API}/system/start", data=json.dumps(payload), timeout=10)
- assert r.status_code == 200
+ # Assign the device mac address
+ mac_addr = device["mac_addr"]
+ # Assign the test modules
+ test_modules = device["test_modules"]
-# Currently not working due to blocking during monitoring period
-@pytest.mark.skip()
-def test_start_testrun_already_in_progress(
- testing_devices, # pylint: disable=W0613
- testrun): # pylint: disable=W0613
- payload = {"device": {"mac_addr": BASELINE_MAC_ADDR, "firmware": "asd"}}
+ # Payload with device details
+ payload = {
+ "device": {
+ "mac_addr": mac_addr,
+ "firmware": "test",
+ "test_modules": test_modules
+ }
+ }
+
+ # Send the post request (start test)
r = requests.post(f"{API}/system/start", data=json.dumps(payload), timeout=10)
- until_true(
- lambda: query_system_status().lower() == "waiting for device",
- "system status is `waiting for device`",
- 30,
- )
+ # Parse the json response
+ response = r.json()
- start_test_device("x123", BASELINE_MAC_ADDR)
+ # Check if the response status code is 409 (Conflict)
+ assert r.status_code == 409
- until_true(
- lambda: query_system_status().lower() == "in progress",
- "system status is `in progress`",
- 600,
- )
+ # Check if 'error' in response
+ assert "error" in response
+
+def test_start_testrun_device_not_found(empty_devices_dir, testrun): # pylint: disable=W0613
+ """ Test for start testrun when device is not found (404) """
+
+ # Payload with device details with no mac address assigned
+ payload = {"device": {
+ "mac_addr": "",
+ "firmware": "test",
+ "test_modules": {}
+ }}
+
+ # Send the post request
r = requests.post(f"{API}/system/start", data=json.dumps(payload), timeout=10)
- assert r.status_code == 409
-def test_start_system_not_configured_correctly(
- empty_devices_dir, # pylint: disable=W0613
- testrun): # pylint: disable=W0613
- device_1 = {
- "manufacturer": "Google",
- "model": "First",
- "mac_addr": "00:1e:42:35:73:c4",
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
- }
+ # Check if the response status code is 404 (not found)
+ assert r.status_code == 404
- # Send create device request
- r = requests.post(f"{API}/device",
- data=json.dumps(device_1),
- timeout=5)
- print(r.text)
+ # Parse the json response
+ response = r.json()
- payload = {"device": {"mac_addr": None, "firmware": "asd"}}
- r = requests.post(f"{API}/system/start",
- data=json.dumps(payload),
- timeout=10)
- assert r.status_code == 500
+ # Check if 'error' in response
+ assert "error" in response
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_start_testrun_error(empty_devices_dir, add_devices, # pylint: disable=W0613
+ update_sys_config, testrun, restore_sys_config): # pylint: disable=W0613
+ """ Test for start testrun internal server error (500) """
-def test_start_device_not_found(empty_devices_dir, # pylint: disable=W0613
- testrun): # pylint: disable=W0613
- device_1 = {
- "manufacturer": "Google",
- "model": "First",
- "mac_addr": "00:1e:42:35:73:c4",
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
- }
+ # Load the device using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
- # Send create device request
- r = requests.post(f"{API}/device",
- data=json.dumps(device_1),
- timeout=5)
- print(r.text)
+ # Assign the mac address
+ mac_addr = device["mac_addr"]
- r = requests.delete(f"{API}/device/",
- data=json.dumps(device_1),
- timeout=5)
+ # Assign the test modules
+ test_modules = device["test_modules"]
+
+ # Payload with device details
+ payload = { "device":
+ {
+ "mac_addr": mac_addr,
+ "firmware": "test",
+ "test_modules": test_modules
+ }
+ }
+
+ # Send the post request
+ r = requests.post(f"{API}/system/start", data=json.dumps(payload), timeout=10)
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if the response status code is 500
+ assert r.status_code == 500
+
+ # Check if 'error' in response
+ assert "error" in response
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_stop_running_testrun(empty_devices_dir, add_devices, # pylint: disable=W0613
+ testrun, start_test): # pylint: disable=W0613
+ """ Test for successfully stop testrun when test is running (200) """
+
+ # Send the post request to stop the test
+ r = requests.post(f"{API}/system/stop", timeout=10)
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if status code is 200 (ok)
assert r.status_code == 200
- payload = {"device": {"mac_addr": device_1["mac_addr"], "firmware": "asd"}}
- r = requests.post(f"{API}/system/start",
- data=json.dumps(payload),
- timeout=10)
+ # Check if error in response
+ assert "success" in response
+
+def test_stop_testrun_not_running(testrun): # pylint: disable=W0613
+ """ Test for stop testrun when is not running (404) """
+
+ # Send the post request to stop the test
+ r = requests.post(f"{API}/system/stop", timeout=10)
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if status code is 404 (not found)
assert r.status_code == 404
+ # Check if error in response
+ assert "error" in response
-def test_start_missing_device_information(
- empty_devices_dir, # pylint: disable=W0613
- testrun): # pylint: disable=W0613
- device_1 = {
- "manufacturer": "Google",
- "model": "First",
+def test_sys_shutdown(testrun): # pylint: disable=W0613
+ """ Test for testrun shutdown endpoint (200) """
+
+ # Send a POST request to initiate the system shutdown
+ r = requests.post(f"{API}/system/shutdown", timeout=5)
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if the response status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Check if null in response
+ assert response is None
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_sys_shutdown_in_progress(empty_devices_dir, add_devices, # pylint: disable=W0613
+ testrun, start_test): # pylint: disable=W0613
+ """ Test system shutdown during an in-progress test (400) """
+
+ # Attempt to shutdown while the test is running
+ r = requests.post(f"{API}/system/shutdown", timeout=5)
+
+ # Check if the response status code is 400 (test in progress)
+ assert r.status_code == 400
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if 'error' in response
+ assert "error" in response
+
+def test_sys_status_idle(testrun): # pylint: disable=W0613
+ """ Test for system status 'Idle' (200) """
+
+ # Send the get request
+ r = requests.get(f"{API}/system/status", timeout=5)
+
+ # Check if the response status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if system status is 'Idle'
+ assert response["status"] == "Idle"
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_sys_status_cancelled(empty_devices_dir, add_devices, # pylint: disable=W0613
+ testrun, start_test, stop_test): # pylint: disable=W0613
+ """ Test for system status 'cancelled' (200) """
+
+ # Send the get request to retrieve system status
+ r = requests.get(f"{API}/system/status", timeout=5)
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if status is 'Cancelled'
+ assert response["status"] == "Cancelled"
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_sys_status_waiting(empty_devices_dir, add_devices, # pylint: disable=W0613
+ testrun, start_test): # pylint: disable=W0613
+ """ Test for system status 'Waiting for Device' (200) """
+
+ # Send the get request
+ r = requests.get(f"{API}/system/status", timeout=5)
+
+ # Check if the response status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if system status is 'Waiting for Device'
+ assert response["status"] == "Waiting for Device"
+
+def test_system_version(testrun): # pylint: disable=W0613
+ """Test for testrun version endpoint"""
+
+ # Send the get request to the API
+ r = requests.get(f"{API}/system/version", timeout=5)
+
+ # Check if status code is 200 (ok)
+ assert r.status_code == 200
+
+ # Parse the response
+ response = r.json()
+
+ # Assign the expected json response keys and expected types
+ expected_keys = {
+ "installed_version": str,
+ "update_available": bool,
+ "latest_version": str,
+ "latest_version_url": str
+ }
+
+ # Iterate over the dict keys and values
+ for key, key_type in expected_keys.items():
+
+ # Check if the key is in the JSON response
+ assert key in response
+
+ # Check if the key has the expected data type
+ assert isinstance(response[key], key_type)
+
+def test_get_test_modules(testrun): # pylint: disable=W0613
+ """ Test the /system/modules endpoint to get the test modules (200) """
+
+ # Send a GET request to the API endpoint
+ r = requests.get(f"{API}/system/modules", timeout=5)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Parse the JSON response
+ response = r.json()
+
+ # Check if the response is a list
+ assert isinstance(response, list)
+
+# Tests for reports endpoints
+
+def get_timestamp(formatted=False):
+ """ Returns timestamp value from 'started' field from the report
+ found at 'testing/api/reports/report.json'
+ By default it will return the raw time format or iso if formatted=True
+ """
+
+ # Load the report.json using load_json utility method
+ report_json = load_json("report.json", directory="testing/api/reports")
+
+ # Assign the timestamp from report.json
+ timestamp = report_json["started"]
+
+ # If formatted is changed to 'True'
+ if formatted:
+
+ # Return the iso formatted timestamp
+ return timestamp.replace(" ", "T")
+
+ # Else return the raw timestamp
+ return timestamp
+
+@pytest.fixture
+def create_report_folder(): # pylint: disable=W0613
+ """ Fixture to create the device reports folder in local/devices """
+
+ # Load the device using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the device mac address
+ mac_addr = device["mac_addr"]
+
+ # Assign the device name
+ device_name = f'{device["manufacturer"]} {device["model"]}'
+
+ # Create the device folder path
+ main_folder = os.path.join(DEVICES_DIRECTORY, device_name)
+
+ # Remove the ":" from mac address for the folder structure
+ mac_addr = mac_addr.replace(":", "")
+
+ # Assign the timestamp from get_timestamp utility method
+ timestamp = get_timestamp(formatted=True)
+
+ # Create the report folder path
+ report_folder = os.path.join(main_folder, "reports", timestamp,
+ "test", mac_addr)
+
+ # Ensure the report folder exists
+ os.makedirs(report_folder, exist_ok=True)
+
+ # Iterate over the files from 'testing/api/reports' folder
+ for file in os.listdir(REPORTS_PATH):
+
+ # Construct full path of the file from 'testing/api/reports' folder
+ source_path = os.path.join(REPORTS_PATH, file)
+
+ # Construct full path where the file will be copied
+ target_path = os.path.join(report_folder, file)
+
+ # Copy the file
+ shutil.copy(source_path, target_path)
+
+def test_get_reports_no_reports(empty_devices_dir, testrun): # pylint: disable=W0613
+ """Test get reports when no reports exist"""
+
+ # Set the Origin headers to API address
+ headers = {
+ "Origin": API
+ }
+
+ # Send a GET request to the /reports endpoint
+ r = requests.get(f"{API}/reports", headers=headers, timeout=5)
+
+ # Check if the status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Parse the JSON response
+ response = r.json()
+
+ # Check if the response is a list
+ assert isinstance(response, list)
+
+ # Check if the response is an empty list
+ assert response == []
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_get_reports(empty_devices_dir, add_devices, # pylint: disable=W0613
+ create_report_folder, testrun): # pylint: disable=W0613
+ """ Test for get reports when one report is available (200) """
+
+ # Set the Origin headers to API address
+ headers = {
+ "Origin": API
+ }
+
+ # Get request to retrieve the generated reports
+ r = requests.get(f"{API}/reports", headers=headers, timeout=5)
+
+ # Parse the json
+ response = r.json()
+
+ # Check if status code is 200 (ok)
+ assert r.status_code == 200
+
+ # Check if response is a list
+ assert isinstance(response, list)
+
+ # Check if there is one report
+ assert len(response) == 1
+
+ # Assign the report from the response list
+ report = response[0]
+
+ # Assign the expected report properties
+ expected_keys = [
+ "testrun",
+ "mac_addr",
+ "device",
+ "status",
+ "started",
+ "finished",
+ "tests",
+ "report"
+ ]
+
+ # Iterate through the expected_keys
+ for key in expected_keys:
+
+ # Check if the key exists in the report
+ assert key in report
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_delete_report(empty_devices_dir, add_devices, # pylint: disable=W0613
+ create_report_folder, testrun): # pylint: disable=W0613
+ """ Test for succesfully delete a report (200) """
+
+ # Load the device using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the device mac address
+ mac_addr = device["mac_addr"]
+
+ # Assign the device name
+ device_name = f'{device["manufacturer"]} {device["model"]}'
+
+ # Payload
+ delete_data = {
+ "mac_addr": mac_addr,
+ "timestamp": get_timestamp()
+ }
+
+ # Send a DELETE request to remove the report
+ r = requests.delete(f"{API}/report", data=json.dumps(delete_data), timeout=5)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if "success" in response
+ assert "success" in response
+
+ # Construct the 'reports' folder path
+ reports_folder = os.path.join(device_name, "reports")
+
+ # Check if reports folder has been deleted
+ assert not os.path.exists(reports_folder)
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_delete_report_no_payload(empty_devices_dir, add_devices, # pylint: disable=W0613
+ create_report_folder, testrun): # pylint: disable=W0613
+ """ Test delete report bad request when the payload is missing (400) """
+
+ # Send a DELETE request to remove the report without the payload
+ r = requests.delete(f"{API}/report", timeout=5)
+
+ # Check if status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if "error" in response
+ assert "error" in response
+
+ # Check if the correct error message returned
+ assert "Invalid request received, missing body" in response["error"]
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_delete_report_invalid_payload(empty_devices_dir, add_devices, # pylint: disable=W0613
+ create_report_folder, testrun): # pylint: disable=W0613
+ """ Test delete report bad request missing mac addr or timestamp (400) """
+
+ # Empty payload
+ delete_data = {}
+
+ # Send a DELETE request to remove the report
+ r = requests.delete(f"{API}/report", data=json.dumps(delete_data), timeout=5)
+
+ # Check if status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if "error" in response
+ assert "error" in response
+
+ # Check if the correct error message returned
+ assert "Missing mac address or timestamp" in response["error"]
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_delete_report_invalid_timestamp(empty_devices_dir, add_devices, # pylint: disable=W0613
+ create_report_folder, testrun): # pylint: disable=W0613
+ """ Test delete report bad request if timestamp format is not valid (400) """
+
+ # Load the device using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the device mac address
+ mac_addr = device["mac_addr"]
+
+ # Assign the incorrect timestamp format
+ invalid_timestamp = "2024-01-01 invalid"
+
+ # Payload
+ delete_data = {
+ "mac_addr": mac_addr,
+ "timestamp": invalid_timestamp
+ }
+
+ # Send a DELETE request to remove the report
+ r = requests.delete(f"{API}/report", data=json.dumps(delete_data), timeout=5)
+
+ # Check if status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if "error" in response
+ assert "error" in response
+
+ # Check if the correct error message returned
+ assert "Incorrect timestamp format" in response["error"]
+
+def test_delete_report_no_device(empty_devices_dir, testrun): # pylint: disable=W0613
+ """ Test delete report when device does not exist (404) """
+
+ # Payload to be deleted for a non existing device
+ delete_data = {
"mac_addr": "00:1e:42:35:73:c4",
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
+ "timestamp": get_timestamp()
+ }
+
+ # Send the delete request to the endpoint
+ r = requests.delete(f"{API}/report", data=json.dumps(delete_data), timeout=5)
+
+ # Check if status is 404 (not found)
+ assert r.status_code == 404
+
+ # Parse the response json
+ response = r.json()
+
+ # Check if "error" in response
+ assert "error" in response
+
+ # Check if the correct error message returned
+ assert "Could not find device" in response["error"]
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_delete_report_no_report(empty_devices_dir, add_devices, testrun): # pylint: disable=W0613
+ """Test for delete report when report does not exist (404)"""
+
+ # Load the device using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the device mac address
+ mac_addr = device["mac_addr"]
+
+ # Prepare the payload for the DELETE request
+ delete_data = {
+ "mac_addr": mac_addr,
+ "timestamp": get_timestamp()
}
- # Send create device request
- r = requests.post(f"{API}/device",
- data=json.dumps(device_1),
+ # Send the delete request to delete the report
+ r = requests.delete(f"{API}/report",
+ data=json.dumps(delete_data),
+ timeout=5)
+
+ # Check if status code is 404 (not found)
+ assert r.status_code == 404
+
+ # Parse the JSON response
+ response = r.json()
+
+ # Check if error is present in the response
+ assert "error" in response
+
+ # Check if the correct error message is returned
+ assert "Report not found" in response["error"]
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_get_report_success(empty_devices_dir, add_devices, # pylint: disable=W0613
+ create_report_folder, testrun): # pylint: disable=W0613
+ """Test for successfully get report when report exists (200)"""
+
+ # Load the device using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the device name
+ device_name = f'{device["manufacturer"]} {device["model"]}'
+
+ # Assign the timestamp and change the format
+ timestamp = get_timestamp(formatted=True)
+
+ # Send the get request
+ r = requests.get(f"{API}/report/{device_name}/{timestamp}", timeout=5)
+
+ # Check if status code is 200 (ok)
+ assert r.status_code == 200
+
+ # Check if the response is a PDF
+ assert r.headers["Content-Type"] == "application/pdf"
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_get_report_not_found(empty_devices_dir, add_devices, testrun): # pylint: disable=W0613
+ """Test get report when report doesn't exist (404)"""
+
+ # Load the device using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the device name
+ device_name = f'{device["manufacturer"]} {device["model"]}'
+
+ # Assign the timestamp
+ timestamp = get_timestamp()
+
+ # Send the get request
+ r = requests.get(f"{API}/report/{device_name}/{timestamp}", timeout=5)
+
+ # Check if status code is 404 (not found)
+ assert r.status_code == 404
+
+ # Parse the response json
+ response = r.json()
+
+ # Check if "error" in response
+ assert "error" in response
+
+ # Check if the correct error message returned
+ assert "Report could not be found" in response["error"]
+
+def test_get_report_device_not_found(empty_devices_dir, testrun): # pylint: disable=W0613
+ """Test getting a report when the device is not found (404)"""
+
+ # Assign device name
+ device_name = "nonexistent_device"
+
+ # Assign the timestamp
+ timestamp = get_timestamp()
+
+ # Send the get request
+ r = requests.get(f"{API}/report/{device_name}/{timestamp}", timeout=5)
+
+ # Check if is 404 (not found)
+ assert r.status_code == 404
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if "error" in response
+ assert "error" in response
+
+ # Check if the correct error message is returned
+ assert "Device not found" in response["error"]
+
+def test_export_report_device_not_found(empty_devices_dir, create_report_folder, # pylint: disable=W0613
+ testrun): # pylint: disable=W0613
+ """Test for export the report result when the device could not be found"""
+
+ # Assign the non-existing device name
+ device_name = "non existing device"
+
+ # Assign the timestamp
+ timestamp = get_timestamp()
+
+ # Send the post request
+ r = requests.post(f"{API}/export/{device_name}/{timestamp}", timeout=5)
+
+ # Check if is 404 (not found)
+ assert r.status_code == 404
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if "error" in response
+ assert "error" in response
+
+ # Check if the correct error message returned
+ assert "A device with that name could not be found" in response["error"]
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_export_report_profile_not_found(empty_devices_dir, add_devices, # pylint: disable=W0613
+ create_report_folder, testrun): # pylint: disable=W0613
+ """Test for export report result when the profile is not found"""
+
+ # Load the device using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the device name
+ device_name = f'{device["manufacturer"]} {device["model"]}'
+
+ # Assign the timestamp
+ timestamp = get_timestamp()
+
+ # Add a non existing profile into the payload
+ payload = {"profile": "non_existent_profile"}
+
+ # Send the post request
+ r = requests.post(f"{API}/export/{device_name}/{timestamp}",
+ json=payload,
timeout=5)
- print(r.text)
- payload = {}
- r = requests.post(f"{API}/system/start",
- data=json.dumps(payload),
- timeout=10)
- assert r.status_code == 400
+ # Check if is 404 (not found)
+ assert r.status_code == 404
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if "error" in response
+ assert "error" in response
+
+ # Check if the correct error message returned
+ assert "A profile with that name could not be found" in response["error"]
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_export_report_not_found(empty_devices_dir, add_devices, testrun): # pylint: disable=W0613
+ """Test for export the report result when the report could not be found"""
+
+ # Load the device using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the device name
+ device_name = f'{device["manufacturer"]} {device["model"]}'
+
+ # Assign the timestamp
+ timestamp = get_timestamp()
+
+ # Send the post request to trigger the zipping process
+ r = requests.post(f"{API}/export/{device_name}/{timestamp}", timeout=10)
+
+ # Check if status code is 500 (Internal Server Error)
+ assert r.status_code == 404
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if "error" in response
+ assert "error" in response
+
+ # Check if the correct error message is returned
+ assert "Report could not be found" in response["error"]
+
+@pytest.mark.parametrize("add_devices, add_profiles", [
+ (["device_1"], ["valid_profile.json"])
+], indirect=True)
+def test_export_report_with_profile(empty_devices_dir, add_devices, # pylint: disable=W0613
+ empty_profiles_dir, add_profiles, # pylint: disable=W0613
+ create_report_folder, testrun): # pylint: disable=W0613
+ """Test export results with existing profile when report exists (200)"""
+
+ # Load the profile using load_json utility method
+ profile = load_json("valid_profile.json", directory=PROFILES_PATH)
+
+ # Load the device using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the device name
+ device_name = f'{device["manufacturer"]} {device["model"]}'
+
+ # Assign the timestamp and change the format
+ timestamp = get_timestamp(formatted=True)
+
+ # Send the post request
+ r = requests.post(f"{API}/export/{device_name}/{timestamp}",
+ json=profile,
+ timeout=5)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Check if the response is a zip file
+ assert r.headers["Content-Type"] == "application/zip"
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_export_results_with_no_profile(empty_devices_dir, add_devices, # pylint: disable=W0613
+ create_report_folder, testrun): # pylint: disable=W0613
+ """Test export results with no profile when report exists (200)"""
+
+ # Load the device using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the device name
+ device_name = f'{device["manufacturer"]} {device["model"]}'
+
+ # Assign the timestamp and change the format
+ timestamp = get_timestamp(formatted=True)
+
+ # Send the post request
+ r = requests.post(f"{API}/export/{device_name}/{timestamp}", timeout=5)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Check if the response is a zip file
+ assert r.headers["Content-Type"] == "application/zip"
+
+# Tests for device endpoints
+@pytest.fixture()
+def add_devices(request):
+ """ Upload specified device to local/devices """
+
+ # Access the parameter (devices list) provided to the fixture
+ devices = request.param
+
+ # Iterate over the device names provided
+ for device in devices:
+
+ # Construct the full path for the device_config.json
+ device_path = os.path.join(DEVICES_PATH, device)
+
+ # Load the device configurations using load_json utility method
+ device = load_json("device_config.json", directory=device_path)
+
+ # Assign the device name for the target directory
+ target_device_name = f'{device["manufacturer"]} {device["model"]}'
+
+ # Construct the source path of the device config file
+ source_path = os.path.join(device_path, "device_config.json")
+
+ # Construct the target path where the device config will be copied
+ target_path = os.path.join(DEVICES_DIRECTORY, target_device_name)
+
+ # Create the target directory if it doesn't exist
+ os.makedirs(target_path, exist_ok=True)
+
+ # Copy the device config from source to target
+ shutil.copy(source_path, target_path)
+
+ # Return the list with devices names
+ return devices
+
+def delete_all_devices():
+ """Utility method to delete all devices from local/devices"""
+
+ try:
+
+ # Check if the device_path (local/devices) exists and is a folder
+ if os.path.exists(DEVICES_DIRECTORY) and os.path.isdir(DEVICES_DIRECTORY):
+
+ # Iterate over all devices from devices folder
+ for item in os.listdir(DEVICES_DIRECTORY):
+
+ # Create the full path
+ item_path = os.path.join(DEVICES_DIRECTORY, item)
+
+ # Check if item is a file
+ if os.path.isfile(item_path):
+
+ # Remove file
+ os.unlink(item_path)
+
+ else:
+
+ # If item is a folder remove it
+ shutil.rmtree(item_path)
+
+ except PermissionError:
+
+ # Permission related issues
+ print(f"Permission Denied: {item}")
+
+ except OSError as err:
+
+ # System related issues
+ print(f"Error removing {item}: {err}")
+
+@pytest.fixture
+def empty_devices_dir():
+ """Delete all devices before and after test"""
+
+ # Empty the directory before the test
+ delete_all_devices()
+
+ yield
+
+ # Empty the directory after the test
+ delete_all_devices()
+
+def get_all_devices():
+ """ Returns list with paths to all devices from local/devices """
+
+ # List to store the paths of all 'device_config.json' files
+ devices = []
+
+ # Loop through each file/folder from 'local/devices'.
+ for device_folder in os.listdir(DEVICES_DIRECTORY):
+
+ # Construct the full path for the file/folder
+ device_path = os.path.join(DEVICES_DIRECTORY, device_folder)
+
+ # Check if the current path is a folder
+ if os.path.isdir(device_path):
+
+ # Construct the full path to 'device_config.json' inside the folder.
+ config_path = os.path.join(device_path, "device_config.json")
+
+ # Check if 'device_config.json' exists in the path.
+ if os.path.exists(config_path):
+
+ # Append the file path to the list.
+ devices.append(config_path)
+
+ # Return all the device_config.json paths
+ return devices
+
+def device_exists(device_mac):
+ """ Utility method to check if device exists """
+
+ # Send the get request
+ r = requests.get(f"{API}/devices", timeout=5)
+
+ # Check if status code is not 200 (OK)
+ if r.status_code != 200:
+ raise ValueError(f"Api request failed with code: {r.status_code}")
+
+ # Parse the JSON response to get the list of devices
+ devices = r.json()
+
+ # Return if mac address is in the list of devices
+ return any(p["mac_addr"] == device_mac for p in devices)
+
+@pytest.mark.parametrize(
+ "add_devices",
+ [
+ [],
+ ["device_1"],
+ ["device_1", "device_2"]
+ ],
+ indirect=True
+)
+def test_get_devices(empty_devices_dir, add_devices, testrun): # pylint: disable=W0613
+ """ Test get devices when none, one or two devices are available (200) """
+
+ # Send the get request to retrieve all devices
+ r = requests.get(f"{API}/devices", timeout=5)
+
+ # Check if status code is 200 (Ok)
+ assert r.status_code == 200
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if response is a list
+ assert isinstance(response, list)
+
+ # Check if the number of devices matches the number of devices available
+ assert len(response) == len(add_devices)
+
+ # Assign the expected device fields
+ expected_fields = [
+ "status",
+ "mac_addr",
+ "manufacturer",
+ "model",
+ "type",
+ "technology",
+ "test_pack",
+ "test_modules",
+ ]
+
+ # If devices are in the list
+ if len(add_devices) > 0:
+
+ # Iterate over all expected_fields list
+ for field in expected_fields:
+
+ # Check if devices have the expected fields
+ assert all(field in device for device in response)
+
+def test_create_device(empty_devices_dir, testrun): # pylint: disable=W0613
+ """ Test for successfully create device endpoint (201) """
+
+ # Load the first device using load_json utility method
+ device_1 = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the mac address for the first device
+ mac_addr_1 = device_1["mac_addr"]
+
+ # Send the post request to the '/device' endpoint
+ r = requests.post(f"{API}/device", data=json.dumps(device_1), timeout=5)
+
+ # Check if status code is 201 (Created)
+ assert r.status_code == 201
+
+ # Check if there is one device in 'local/devices'
+ assert len(get_all_devices()) == 1
+
+ # Load the second device using load_json utility method
+ device_2 = load_json("device_config.json", directory=DEVICE_2_PATH)
+
+ # Assign the mac address for the second device
+ mac_addr_2 = device_2["mac_addr"]
+
+ # Send the post request to the '/device' endpoint
+ r = requests.post(f"{API}/device", data=json.dumps(device_2), timeout=5)
+
+ # Check if status code is 201 (Created)
+ assert r.status_code == 201
+
+ # Check if there are two devices in 'local/devices'
+ assert len(get_all_devices()) == 2
+
+ # Send a get request to retrieve created devices
+ r = requests.get(f"{API}/devices", timeout=5)
+
+ # Parse the json response (devices)
+ response = r.json()
+
+ # Iterate through all the devices to find the device based on the "mac_addr"
+ created_devices = [
+ d for d in response
+ if d["mac_addr"] in {mac_addr_1, mac_addr_2}
+ ]
+
+ # Check if both devices have been found
+ assert len(created_devices) == 2
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_create_device_already_exists(empty_devices_dir, add_devices, # pylint: disable=W0613
+ testrun): # pylint: disable=W0613
+ """ Test for crete device when device already exists (409) """
+
+ # Error handling if there is not one devices in local/devices
+ if len(get_all_devices()) != 1:
+ raise Exception("Expected one device in local/devices")
+
+ # Load the device (payload) using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Send the post request to create the device
+ r = requests.post(f"{API}/device", data=json.dumps(device), timeout=5)
+
+ # Check if status code is 409 (conflict)
+ assert r.status_code == 409
+
+ # Parse the json response (devices)
+ response = r.json()
+
+ # Check if 'error' in response
+ assert "error" in response
+
+ # Check if 'local/device' has only one device
+ assert len(get_all_devices()) == 1
+
+def test_create_device_invalid_json(empty_devices_dir, testrun): # pylint: disable=W0613
+ """ Test for create device invalid json payload """
+
+ # Error handling if there are devices in local/devices
+ if len(get_all_devices()) != 0:
+ raise Exception("Expected no device in local/devices")
+
+ # Empty payload
+ device = {}
+
+ # Send the post request
+ r = requests.post(f"{API}/device", data=json.dumps(device), timeout=5)
+
+ # Check if status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the json response (devices)
+ response = r.json()
+
+ # Check if 'error' in response
+ assert "error" in response
+
+ # Check if 'local/device' has no devices
+ assert len(get_all_devices()) == 0
+
+def test_create_device_invalid_request(empty_devices_dir, testrun): # pylint: disable=W0613
+ """ Test for create device when no payload is added """
+
+ # Send the post request with no payload
+ r = requests.post(f"{API}/device", data=None, timeout=5)
+
+ # Check if status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the json response (devices)
+ response = r.json()
+
+ # Check if 'error' in response
+ assert "error" in response
+
+ # Check if 'local/device' has no devices
+ assert len(get_all_devices()) == 0
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_edit_device(empty_devices_dir, add_devices, # pylint: disable=W0613
+ testrun): # pylint: disable=W0613
+ """ Test for successfully edit device (200) """
+
+ # Error handling if there is not one devices in local/devices
+ if len(get_all_devices()) != 1:
+ raise Exception("Expected one device in local/devices")
+
+ # Load the device (payload) using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the mac address
+ mac_addr = device["mac_addr"]
+
+ # Update the manufacturer and model values
+ device["manufacturer"] = "Updated Manufacturer"
+ device["model"] = "Updated Model"
+
+ # Payload with the updated device name
+ updated_device = {
+ "mac_addr": mac_addr,
+ "device": device
+ }
+
+ # Exception if the device is not found
+ if not device_exists(mac_addr):
+ raise ValueError(f"Device with mac address:{mac_addr} not found")
+
+ # Send the post request to update the device
+ r = requests.post(
+ f"{API}/device/edit",
+ data=json.dumps(updated_device),
+ timeout=5)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Check if 'local/device' still has only one device
+ assert len(get_all_devices()) == 1
+
+ # Send a get request to verify device update
+ r = requests.get(f"{API}/devices", timeout=5)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Parse the response (devices list)
+ response = r.json()
+
+ # Iterate through the devices to find the device based on "mac_addr"
+ updated_device = next(
+ (d for d in response if d["mac_addr"] == mac_addr),
+ None
+ )
+
+ # Error handling if the device is not being found
+ if updated_device is None:
+ raise Exception("The device could not be found")
+
+ # Check if device "manufacturer" was updated
+ assert device["manufacturer"] == updated_device["manufacturer"]
+
+ # Check if device "manufacturer" was updated
+ assert device["model"] == updated_device["model"]
+
+def test_edit_device_not_found(empty_devices_dir, testrun): # pylint: disable=W0613
+
+ """ Test for edit device when device is not found (404) """
+
+ # Error handling if there are devices in local/devices
+ if len(get_all_devices()) != 0:
+ raise Exception("Expected no device in local/devices")
+
+ # Load the device (payload) using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the mac address
+ mac_addr = device["mac_addr"]
+
+ # Update the manufacturer and model values
+ device["manufacturer"] = "Updated manufacturer"
+ device["model"] = "Updated model"
+
+ # Payload with the updated device name
+ updated_device = {
+ "mac_addr": mac_addr,
+ "device": device
+ }
+
+ # Exception if the device is found
+ if device_exists(mac_addr):
+ raise ValueError(f"Device with mac address:{mac_addr} found")
+
+ # Send the post request to update the device
+ r = requests.post(
+ f"{API}/device/edit",
+ data=json.dumps(updated_device),
+ timeout=5)
+
+ # Check if status code is 404 (not found)
+ assert r.status_code == 404
+
+ # Parse the json response (devices)
+ response = r.json()
+
+ # Check if 'error' in response
+ assert "error" in response
+
+ # Check if 'local/device' still has no devices
+ assert len(get_all_devices()) == 0
+
+def test_edit_device_invalid_json(empty_devices_dir, testrun): # pylint: disable=W0613
+ """ Test for edit device invalid json (400) """
+
+ # Empty payload
+ payload = {}
+
+ # Send the post request to update the device
+ r = requests.post(f"{API}/device/edit",
+ data=json.dumps(payload),
+ timeout=5)
+
+ # Check if status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the json response (devices)
+ response = r.json()
+
+ # Check if 'error' in response
+ assert "error" in response
+
+@pytest.mark.parametrize(
+ "add_devices",
+ [
+ ["device_1", "device_2"]
+ ],
+ indirect=True
+)
+def test_edit_device_mac_already_exists( empty_devices_dir, add_devices, # pylint: disable=W0613
+ testrun): # pylint: disable=W0613
+ """ Test for edit device when the mac address already exists (409) """
+
+ # Load the first device (payload) using load_json utility method
+ device_1 = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the device_1 initial mac address
+ mac_addr_1 = device_1["mac_addr"]
+
+ # Load the second device using load_json utility method
+ device_2 = load_json("device_config.json", directory=DEVICE_2_PATH)
+
+ # Update the device_1 mac address with device_2 mac address
+ device_1["mac_addr"] = device_2["mac_addr"]
+
+ # Payload with the updated device mac address
+ updated_device = {
+ "mac_addr": mac_addr_1,
+ "device": device_1
+ }
+
+ # Exception if the device is not found
+ if not device_exists(mac_addr_1):
+ raise ValueError(f"Device with mac address:{mac_addr_1} not found")
+
+ # Send the post request to update the device
+ r = requests.post(f"{API}/device/edit",
+ data=json.dumps(updated_device),
+ timeout=5)
+
+ # Check if status code is 409 (conflict)
+ assert r.status_code == 409
+
+ # Parse the json response (devices)
+ response = r.json()
+
+ # Check if 'error' in response
+ assert "error" in response
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_edit_device_test_in_progress(empty_devices_dir, add_devices, # pylint: disable=W0613
+ testrun, start_test): # pylint: disable=W0613
+ """ Test for edit device when a test is in progress (403) """
+
+ # Load the device (payload) using load_json utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the mac address
+ mac_addr = device["mac_addr"]
+
+ # Update the manufacturer and model values
+ device["manufacturer"] = "Updated Manufacturer"
+ device["model"] = "Updated Model"
+
+ # Payload with the updated device name
+ updated_device = {
+ "mac_addr": mac_addr,
+ "device": device
+ }
+
+ # Exception if the device is not found
+ if not device_exists(mac_addr):
+ raise ValueError(f"Device with mac address:{mac_addr} not found")
+
+ # Send the post request to update the device
+ r = requests.post(
+ f"{API}/device/edit",
+ data=json.dumps(updated_device),
+ timeout=5)
+
+ # Check if status code is 403 (forbidden)
+ assert r.status_code == 403
+
+ # Send a get request to verify that device was not updated
+ r = requests.get(f"{API}/devices", timeout=5)
+
+ # Exception if status code is not 200
+ if r.status_code != 200:
+ raise ValueError(f"API request failed with code: {r.status_code}")
+
+ # Parse the response (devices list)
+ response = r.json()
+
+ # Iterate through the devices to find the device based on "mac_addr"
+ updated_device = next(
+ (d for d in response if d["mac_addr"] == mac_addr),
+ None
+ )
+
+ # Error handling if the device is not being found
+ if updated_device is None:
+ raise Exception("The device could not be found")
+
+ # Check that device "manufacturer" was not updated
+ assert device["manufacturer"] != updated_device["manufacturer"]
+
+ # Check that device "manufacturer" was not updated
+ assert device["model"] != updated_device["model"]
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_edit_device_invalid_manufacturer(empty_devices_dir, add_devices, # pylint: disable=W0613
+ testrun): # pylint: disable=W0613
+ """ Test for edit device invalid chars in 'manufacturer' field (400) """
+
+ # Load the device
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Modify the "manufacturer" field value with the invalid characters
+ device["manufacturer"] = "/';disallowed characters"
+
+ # Send the post request to update the device
+ r = requests.post(f"{API}/device", data=json.dumps(device),
+ timeout=5)
+
+ # Check if the status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if 'error' in response
+ assert "error" in response
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_edit_device_invalid_model(empty_devices_dir, add_devices, testrun): # pylint: disable=W0613
+ """ Test for edit device invalid chars in 'model' field (400) """
+
+ # Load the device
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Modify the "model" field value with the invalid characters
+ device["model"] = "/';disallowed characters"
+
+ # Send the post request to update the device
+ r = requests.post(f"{API}/device", data=json.dumps(device),
+ timeout=5)
+
+ # Check if the status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if 'error' in response
+ assert "error" in response
+
+def test_edit_long_chars(empty_devices_dir, testrun): # pylint: disable=W0613
+ """ Test for edit a device with model over 28 chars (400) """
+
+ # Load the device
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Modify the "model" field value with 29 chars
+ device["model"] = "a" * 29
+
+ # Send the post request to edit the device
+ r = requests.post(f"{API}/device", data=json.dumps(device),
+ timeout=5)
+
+ # Check if the status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if 'error' in response
+ assert "error" in response
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_delete_device(empty_devices_dir, add_devices, testrun): # pylint: disable=W0613
+ """ Test for succesfully delete device endpoint (200) """
+
+ # Load the device
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the mac address
+ mac_addr = device["mac_addr"]
+
+ # Assign the payload with device to be deleted
+ payload = { "mac_addr": mac_addr }
+
+ # Send the delete request
+ r = requests.delete(f"{API}/device/",
+ data=json.dumps(payload),
+ timeout=5)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Parse the JSON response
+ response = r.json()
+
+ # Check if the response contains "success" key
+ assert "success" in response
+
+ # Send the get request to check if the device has been deleted
+ r = requests.get(f"{API}/devices", timeout=5)
+
+ # Exception if status code is not 200
+ if r.status_code != 200:
+ raise ValueError(f"API request failed with code: {r.status_code}")
+
+ # Parse the JSON response (device)
+ device = r.json()
+
+ # Iterate through the devices to find the device based on the 'mac address'
+ deleted_device = next(
+ (d for d in device if d["mac_addr"] == mac_addr),
+ None
+ )
+
+ # Check if device was deleted
+ assert deleted_device is None
+
+def test_delete_device_not_found(empty_devices_dir, testrun): # pylint: disable=W0613
+ """ Test for delete device when the device doesn't exist (404) """
+
+ # Assign the payload with non existing device mac address
+ payload = {"mac_addr" : "non-existing"}
+
+ # Test that device_1 is not found
+ r = requests.delete(f"{API}/device/",
+ data=json.dumps(payload),
+ timeout=5)
+
+ # Check if status code is 404 (not found)
+ assert r.status_code == 404
+
+ # Parse the JSON response
+ response = r.json()
+
+ # Check if error in response
+ assert "error" in response
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_delete_device_no_mac(empty_devices_dir, add_devices, testrun): # pylint: disable=W0613
+ """ Test for delete device when no mac address in payload (400) """
+
+ # Assign an empty payload (no mac address)
+ payload = {}
+
+ # Send the delete request
+ r = requests.delete(f"{API}/device/",
+ data=json.dumps(payload),
+ timeout=5)
+
+ # Check if status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the JSON response
+ response = r.json()
+
+ # Check if 'error' in response
+ assert "error" in response
+
+ # Check that device wasn't deleted from 'local/devices'
+ assert len(get_all_devices()) == 1
+
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_delete_device_testrun_in_progress(empty_devices_dir, add_devices, # pylint: disable=W0613
+ testrun, start_test): # pylint: disable=W0613
+ """ Test for delete device when testrun is in progress (403) """
+
+ # Load the device details
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Assign the mac address
+ mac_addr = device["mac_addr"]
+
+ # Assign the payload with device to be deleted mac address
+ payload = { "mac_addr": mac_addr }
+
+ # Send the delete request
+ r = requests.delete(f"{API}/device/",
+ data=json.dumps(payload),
+ timeout=5)
+
+ # Check if status code is 403 (forbidden)
+ assert r.status_code == 403
+
+ # Parse the JSON response
+ response = r.json()
+
+ # Check if the response contains "success" key
+ assert "error" in response
+
+def test_create_invalid_manufacturer(empty_devices_dir, testrun): # pylint: disable=W0613
+ """ Test for create device invalid chars in 'manufacturer' field (400) """
+
+ # Load the device
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Modify the "manufacturer" field value with the invalid characters
+ device["manufacturer"] = "/';disallowed characters"
+
+ # Send the post request to create the device
+ r = requests.post(f"{API}/device", data=json.dumps(device),
+ timeout=5)
+
+ # Check if the status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if 'error' in response
+ assert "error" in response
+
+def test_create_invalid_model(empty_devices_dir, testrun): # pylint: disable=W0613
+ """ Test for create device invalid chars in 'model' field (400) """
+
+ # Load the device
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Modify the "model" field value with the invalid characters
+ device["model"] = "/';disallowed characters"
+
+ # Send the post request to create the device
+ r = requests.post(f"{API}/device", data=json.dumps(device),
+ timeout=5)
+
+ # Check if the status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if 'error' in response
+ assert "error" in response
+
+def test_create_long_chars(empty_devices_dir, testrun): # pylint: disable=W0613
+ """ Test for create a device with model over 28 chars (400) """
+
+ # Load the device
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
+
+ # Modify the "model" field value with 29 chars
+ device["model"] = "a" * 29
+
+ # Send the post request to create the device
+ r = requests.post(f"{API}/device", data=json.dumps(device),
+ timeout=5)
+
+ # Check if the status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if 'error' in response
+ assert "error" in response
+
+def test_get_devices_format(testrun): # pylint: disable=W0613
+ """ Test for get devices format (200) """
+
+ # Send the get request
+ r = requests.get(f"{API}/devices/format", timeout=5)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Parse the JSON response
+ response = r.json()
+
+ # Check if the response is a list
+ assert isinstance(response, list)
+
+ # Store the expected main keys and types
+ response_keys = {
+ "step": int,
+ "title": str,
+ "questions": list
+ }
+
+ # Store the 'questions' field expected keys and types
+ questions_keys = {
+ "id": int,
+ "question": str,
+ "type": str,
+ "options": list
+ }
+
+ # Iterate over the response items
+ for item in response:
+
+ # Iterate over the 'response_keys' dict keys and values
+ for key, key_type in response_keys.items():
+
+ # Check if the key is in the response item
+ assert key in item
+
+ # Check if the key has the expected data type
+ assert isinstance(item[key], key_type)
+
+ # Iterate over the 'questions' field
+ for questions in item["questions"]:
+
+ # Iterate over the 'questions_keys' dict keys and values
+ for key, key_type in questions_keys.items():
+
+ # Check if the key is in 'questions' field
+ assert key in questions
+
+ # Check if the key has the expected data type
+ assert isinstance(questions[key], key_type)
+
+def test_sys_testpacks(testrun): # pylint: disable=W0613
+ """ Test for system testpack endpoint (200) """
+
+ # Send the get request to the API
+ r = requests.get(f"{API}/system/testpacks", timeout=5)
+
+ # Check if status code is 200 (ok)
+ assert r.status_code == 200
+
+ # Parse the response
+ response = r.json()
+
+ # Check if the response is a list
+ assert isinstance(response, list)
+
+# Tests for certificates endpoints
+
+def delete_all_certs():
+ """ Delete all certificates from root_certs folder """
+
+ try:
+
+ # Check if the profile_path (local/root_certs) exists and is a folder
+ if os.path.exists(CERTS_DIRECTORY) and os.path.isdir(CERTS_DIRECTORY):
+
+ # Iterate over all certificates from root_certs folder
+ for item in os.listdir(CERTS_DIRECTORY):
+
+ # Combine the directory path with the item name to create the full path
+ item_path = os.path.join(CERTS_DIRECTORY, item)
+
+ # Check if item is a file
+ if os.path.isfile(item_path):
+
+ #If True remove file
+ os.unlink(item_path)
+
+ else:
+
+ # If item is a folder remove it
+ shutil.rmtree(item_path)
+
+ except PermissionError:
+
+ # Permission related issues
+ print(f"Permission Denied: {item}")
+
+ except OSError as err:
+
+ # System related issues
+ print(f"Error removing {item}: {err}")
+
+def load_cert_file(cert_filename):
+ """ Utility method to load a certificate file in binary read mode """
+
+ # Construct the full file path
+ cert_path = os.path.join(CERTS_PATH, cert_filename)
+
+ # Open the certificate file in binary read mode
+ with open(cert_path, "rb") as cert_file:
+
+ # Return the certificate file
+ return cert_file.read()
+
+def extract_name(cert_data):
+ """ Utility method to extract the Common Name (CN) from cert data """
+
+ # Load the cert using the cryptography library
+ cert = x509.load_pem_x509_certificate(cert_data, default_backend())
+
+ # Extract and return the common name value
+ return cert.subject.get_attributes_for_oid(x509.NameOID.COMMON_NAME)[0].value
+
+@pytest.fixture()
+def add_certs(request):
+ """ Upload specified certificates to local/root_certs """
+
+ # Access the parameter (certs list) provided to the fixture
+ certs = request.param
+
+ # Iterate over the certificate names provided
+ for cert in certs:
+
+ # Construct the full path for cert from 'testing/api/certificates'
+ source_path = os.path.join(CERTS_PATH, cert)
+
+ # Copy the cert from 'testing/api/certificates' to 'local/root_certs'
+ shutil.copy(source_path, CERTS_DIRECTORY)
+
+ # Return the list with certs name
+ return certs
+
+@pytest.fixture()
+def reset_certs():
+ """ Delete the certificates before and after each test """
+
+ # Delete before the test
+ delete_all_certs()
+
+ yield
+
+ # Delete after the test
+ delete_all_certs()
+
+# Use parametrize to create a test suite for 3 scenarios
+@pytest.mark.parametrize("add_certs", [
+ [],
+ ["crt.pem"],
+ ["crt.pem", "WR2.pem"],
+], indirect=True)
+def test_get_certs(reset_certs, add_certs, testrun): # pylint: disable=W0613
+ """ Test for get certs when none, one or two certs are available (200) """
+
+ # Send the GET request to "/system/config/certs" endpoint
+ r = requests.get(f"{API}/system/config/certs", timeout=5)
+
+ # Check if the status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Parse the response (certificates)
+ response = r.json()
+
+ # Check if response is a list
+ assert isinstance(response, list)
+
+ # Check if the number of certs matches the number of certs available
+ assert len(response) == len(add_certs)
+
+def test_upload_cert(reset_certs, testrun): # pylint: disable=W0613
+ """ Test for upload certificate successfully (200) """
+
+ # Load the first certificate file content using the utility method
+ cert_file = load_cert_file("crt.pem")
+
+ # Send a POST request to the API endpoint to upload the certificate
+ r = requests.post(
+ f"{API}/system/config/certs",
+ files={"file": ("crt.pem", cert_file, "application/x-x509-ca-cert")},
+ timeout=5
+ )
+
+ # Check if status code is 201 (Created)
+ assert r.status_code == 201
+
+ # Parse the response
+ response = r.json()
+
+ # Check if 'filename' field is in the response
+ assert "filename" in response
+
+ # Check if the certificate filename is 'crt.pem'
+ assert response["filename"] == "crt.pem"
+
+ # Load the second certificate file using the utility method
+ cert_file = load_cert_file("WR2.pem")
+
+ # Send a POST request to the API endpoint to upload the second certificate
+ r = requests.post(
+ f"{API}/system/config/certs",
+ files={"file": ("WR2.pem", cert_file, "application/x-x509-ca-cert")},
+ timeout=5
+ )
+
+ # Check if status code is 201 (Created)
+ assert r.status_code == 201
+
+ # Parse the response
+ response = r.json()
+
+ # Check if 'filename' field is in the response
+ assert "filename" in response
+
+ # Check if the certificate filename is 'WR2.pem'
+ assert response["filename"] == "WR2.pem"
+
+ # Send get request to check that the certificates are listed
+ r = requests.get(f"{API}/system/config/certs", timeout=5)
+
+ # Parse the response
+ response = r.json()
+
+ # Check if "crt.pem" exists
+ assert any(cert["filename"] == "crt.pem" for cert in response)
+
+ # Check if "WR2.pem" exists
+ assert any(cert["filename"] == "WR2.pem" for cert in response)
+
+def test_upload_invalid_cert_format(reset_certs, testrun): # pylint: disable=W0613
+ """ Test for upload an invalid certificate format (400) """
+
+ # Load the first certificate file content using the utility method
+ cert_file = load_cert_file("invalid.pem")
+
+ # Send a POST request to the API endpoint to upload the certificate
+ r = requests.post(
+ f"{API}/system/config/certs",
+ files={"file": ("invalid.pem", cert_file, "application/x-x509-ca-cert")},
+ timeout=5
+ )
+
+ # Check if status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the response
+ response = r.json()
+
+ # Check if "error" key is in response
+ assert "error" in response
+
+def test_upload_invalid_cert_name(reset_certs, testrun): # pylint: disable=W0613
+ """ Test for upload a valid certificate with invalid filename (400) """
+
+ # Assign the invalid certificate name to a variable
+ cert_name = "invalidname1234567891234.pem"
+
+ # Load the first certificate file content using the utility method
+ cert_file = load_cert_file(cert_name)
+
+ # Send a POST request to the API endpoint to upload the certificate
+ r = requests.post(
+ f"{API}/system/config/certs",
+ files={"file": (cert_name, cert_file, "application/x-x509-ca-cert")},
+ timeout=5
+ )
+
+ # Check if status code is 400 (bad request)
+ assert r.status_code == 400
+
+ # Parse the response
+ response = r.json()
+
+ # Check if "error" key is in response
+ assert "error" in response
+
+@pytest.mark.parametrize("add_certs", [["crt.pem"]], indirect=True)
+def test_upload_existing_cert(reset_certs, add_certs, testrun): # pylint: disable=W0613
+ """ Test for upload an existing certificate (409) """
+
+ # Load the cert file content using the utility method
+ cert_file = load_cert_file("crt.pem")
+
+ # Send a POST request to the API endpoint to upload the second certificate
+ r = requests.post(
+ f"{API}/system/config/certs",
+ files={"file": ("crt.pem", cert_file, "application/x-x509-ca-cert")},
+ timeout=5
+ )
+
+ # Check if status code is 409 (conflict)
+ assert r.status_code == 409
+
+ # Parse the json response
+ response = r.json()
+
+ # Check if "error" key is in response
+ assert "error" in response
+
+@pytest.mark.parametrize("add_certs", [["crt.pem", "WR2.pem"]], indirect=True)
+def test_delete_cert_success(reset_certs, add_certs, testrun): # pylint: disable=W0613
+ """ Test for successfully deleting an existing certificate (200) """
+
+ # Load the first cert details to extract the 'name' value
+ uploaded_cert = load_cert_file("crt.pem")
+
+ # Assign the 'name' value from certificate
+ cert_name = extract_name(uploaded_cert)
+
+ # Assign the payload
+ delete_payload = {"name": cert_name}
+
+ # Send delete certificate request
+ r = requests.delete(f"{API}/system/config/certs",
+ data=json.dumps(delete_payload),
+ timeout=5)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Send the get request to display all certificates
+ r = requests.get(f"{API}/system/config/certs", timeout=5)
+
+ # Parse the json response
+ response = r.json()
+
+ # Check that the certificate is no longer listed
+ assert not any(cert["filename"] == "crt.pem" for cert in response)
+
+ # Load the second cert details to extract the 'name' value
+ uploaded_cert = load_cert_file("WR2.pem")
+
+ # Assign the 'name' value from certificate
+ cert_name = extract_name(uploaded_cert)
+
+ # Assign the payload
+ delete_payload = {"name": cert_name}
+
+ # Send delete certificate request
+ r = requests.delete(f"{API}/system/config/certs",
+ data=json.dumps(delete_payload),
+ timeout=5)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Send the get request to display all certificates
+ r = requests.get(f"{API}/system/config/certs", timeout=5)
+
+ # Parse the json response
+ response = r.json()
+
+ # Check that the certificate is no longer listed
+ assert not any(cert["filename"] == "WR2.pem" for cert in response)
+
+@pytest.mark.parametrize("add_certs", [["crt.pem"]], indirect=True)
+def test_delete_cert_bad_request(reset_certs, add_certs, testrun): # pylint: disable=W0613
+ """ Test for delete a certificate without providing the name (400)"""
+
+ # Empty payload
+ delete_payload = {}
+
+ # Send the delete request
+ r = requests.delete(f"{API}/system/config/certs",
+ data=json.dumps(delete_payload),
+ timeout=5)
+
+ # Check if status code is 400 (Bad Request)
+ assert r.status_code == 400
+
+ # parse the json response
+ response = r.json()
+
+ # Check if error in response
+ assert "error" in response
+
+def test_delete_cert_not_found(reset_certs, testrun): # pylint: disable=W0613
+ """ Test for delete certificate when does not exist (404) """
+
+ # Attempt to delete a certificate with a name that doesn't exist
+ delete_payload = {"name": "non existing"}
+
+ # Send the delete request
+ r = requests.delete(f"{API}/system/config/certs",
+ data=json.dumps(delete_payload),
+ timeout=5)
+
+ # Check if status code is 404 (Not Found)
+ assert r.status_code == 404
+
+ # parse the json response
+ response = r.json()
+
+ # Check if error in response
+ assert "error" in response
+
+# Tests for profile endpoints
+
+@pytest.fixture()
+def add_profiles(request):
+ """ Upload specified profile to local/risk_profiles """
+
+ # Access the parameter (profiles list) provided to the fixture
+ profiles = request.param
+
+ # Iterate over the profile names provided
+ for profile in profiles:
+
+ # Construct full path of the file from 'testing/api/profiles' folder
+ source_path = os.path.join(PROFILES_PATH, profile)
+
+ # Copy the file_name from 'testing/api/profiles' to 'local/risk_profiles'
+ shutil.copy(source_path, PROFILES_DIRECTORY)
+
+ # Return the list with profiles name
+ return profiles
+
+def delete_all_profiles():
+ """Utility method to delete all profiles from local/risk_profiles"""
+
+ try:
+
+ # Check if the profile_path (local/risk_profiles) exists and is a folder
+ if os.path.exists(PROFILES_DIRECTORY) and os.path.isdir(PROFILES_DIRECTORY):
+
+ # Iterate over all profiles from risk_profiles folder
+ for item in os.listdir(PROFILES_DIRECTORY):
+
+ # Create the full path
+ item_path = os.path.join(PROFILES_DIRECTORY, item)
+
+ # Check if item is a file
+ if os.path.isfile(item_path):
+
+ # Remove file
+ os.unlink(item_path)
+
+ else:
+
+ # If item is a folder remove it
+ shutil.rmtree(item_path)
+
+ except PermissionError:
+
+ # Permission related issues
+ print(f"Permission Denied: {item}")
+
+ except OSError as err:
+
+ # System related issues
+ print(f"Error removing {item}: {err}")
+
+@pytest.fixture()
+def empty_profiles_dir():
+ """ Delete all the profiles before and after test """
+
+ # Delete before the test
+ delete_all_profiles()
+
+ yield
+
+ # Delete after the test
+ delete_all_profiles()
+
+def profile_exists(profile_name):
+ """ Utility method to check if profile exists """
+
+ # Send the get request
+ r = requests.get(f"{API}/profiles", timeout=5)
+
+ # Check if status code is not 200 (OK)
+ if r.status_code != 200:
+ raise ValueError(f"Api request failed with code: {r.status_code}")
+
+ # Parse the JSON response to get the list of profiles
+ profiles = r.json()
+
+ # Return if name is in the list of profiles
+ return any(p["name"] == profile_name for p in profiles)
+
+@pytest.fixture()
+def remove_risk_assessment():
+ """ Fixture to remove and restore risk_assessment.json """
+
+ # Path to the risk_assessment.json file
+ risk_assessment_path = os.path.join("resources", "risk_assessment.json")
+
+ # Backup path for the risk_assessment.json file
+ backup_path = os.path.join("resources", "risk_assessment_backup.json")
+
+ # Create a backup of the risk_assessment.json file
+ if os.path.exists(risk_assessment_path):
+ shutil.copy(risk_assessment_path, backup_path)
+
+ # Delete the risk_assessment.json file
+ if os.path.exists(risk_assessment_path):
+ os.remove(risk_assessment_path)
+
+ # Run the test
+ yield
+
+ # Restore the risk assessment file after the test
+ if os.path.exists(backup_path):
+ shutil.copy(backup_path, risk_assessment_path)
+ os.remove(backup_path)
+
+def test_get_profiles_format(testrun): # pylint: disable=W0613
+ """ Test for profiles format (200) """
+
+ # Send the get request
+ r = requests.get(f"{API}/profiles/format", timeout=5)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Parse the response
+ response = r.json()
+
+ # Check if the response is a list
+ assert isinstance(response, list)
+
+ # Check that each item in the response has keys "questions" and "type"
+ for item in response:
+ assert "question" in item
+ assert "type" in item
+
+# Use parametrize to create a test suite for 3 scenarios
+@pytest.mark.parametrize("add_profiles", [
+ [],
+ ["valid_profile.json"],
+ ["valid_profile.json", "draft_profile.json"],
+], indirect=True)
+def test_get_profiles(empty_profiles_dir, add_profiles, testrun): # pylint: disable=W0613
+ """ Test get profiles when none, one or two profiles are available (200) """
+
+ # Send get request to the "/profiles" endpoint
+ r = requests.get(f"{API}/profiles", timeout=5)
+
+ # Check if the status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Parse the response (profiles)
+ response = r.json()
+
+ # Check if response is a list
+ assert isinstance(response, list)
+
+ # Check if the number of profiles matches the number of profiles available
+ assert len(response) == len(add_profiles)
+
+ # Assign the expected profile fields
+ expected_fields = [
+ "name", "status", "created", "version", "questions", "risk"
+ ]
+
+ # Check if profile exist
+ if len(add_profiles) > 0:
+
+ # Iterate through profiles
+ for profile in response:
+
+ # Iterate through expected_fields list
+ for field in expected_fields:
+
+ # Check if the field is in profile
+ assert field in profile
+
+ # Assign profile["questions"]
+ profile_questions = profile["questions"]
+
+ # Check if "questions" value is a list
+ assert isinstance(profile_questions, list)
+
+ # Check that "questions" value has the expected fields
+ for element in profile_questions:
+
+ # Check if each element is dict
+ assert isinstance(element, dict)
+
+ # Check if "question" key is in dict element
+ assert "question" in element
+
+ # Check if "asnswer" key is in dict element
+ assert "answer" in element
+
+def test_create_profile(testrun): # pylint: disable=W0613
+ """ Test for create profile when profile does not exist (201) """
+
+ # Load the profile
+ new_profile = load_json("valid_profile.json", directory=PROFILES_PATH)
+
+ # Assign the profile name to profile_name
+ profile_name = new_profile["name"]
+
+ # Check if the profile already exists
+ if profile_exists(profile_name):
+ raise ValueError(f"Profile: {profile_name} exists")
+
+ # Send the post request
+ r = requests.post(f"{API}/profiles", data=json.dumps(new_profile), timeout=5)
+
+ # Check if status code is 201 (Created)
+ assert r.status_code == 201
+
+ # Parse the response
+ response = r.json()
+
+ # Check if "success" key in response
+ assert "success" in response
+
+ # Verify profile creation
+ r = requests.get(f"{API}/profiles", timeout=5)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Parse the response
+ profiles = r.json()
+
+ # Iterate through all the profiles to find the profile based on the "name"
+ created_profile = next(
+ (p for p in profiles if p["name"] == profile_name), None
+ )
+
+ # Check if profile was created
+ assert created_profile is not None
+
+@pytest.mark.parametrize("add_profiles", [
+ ["valid_profile.json"]
+], indirect=True)
+def test_update_profile(empty_profiles_dir, add_profiles, testrun): # pylint: disable=W0613
+ """ Test for update profile when profile already exists (200) """
+
+ # Load the profile using load_json utility method
+ new_profile = load_json("valid_profile.json", directory=PROFILES_PATH)
+
+ # Assign the new_profile name
+ profile_name = new_profile["name"]
+
+ # Assign the profile questions
+ profile_questions = new_profile["questions"]
+
+ # Assign the updated_profile name
+ updated_profile_name = "updated_valid_profile"
+
+ # Payload with the updated device name
+ updated_profile = {
+ "name": profile_name,
+ "rename" : updated_profile_name,
+ "questions": profile_questions
+ }
+
+ # Exception if the profile does not exists
+ if not profile_exists(profile_name):
+ raise ValueError(f"Profile: {profile_name} does not exists")
+
+ # Send the post request to update the profile
+ r = requests.post(
+ f"{API}/profiles",
+ data=json.dumps(updated_profile),
+ timeout=5)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+
+ # Parse the response
+ response = r.json()
+
+ # Check if "success" key in response
+ assert "success" in response
+
+ # Get request to verify profile update
+ r = requests.get(f"{API}/profiles", timeout=5)
+
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
+ # Parse the response
+ profiles = r.json()
-def test_create_device_already_exists(
- empty_devices_dir, # pylint: disable=W0613
- testrun): # pylint: disable=W0613
- device_1 = {
- "manufacturer": "Google",
- "model": "First",
- "mac_addr": "00:1e:42:35:73:c4",
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
- }
+ # Iterate through the profiles to find the profile based on the updated "name"
+ updated_profile_check = next(
+ (p for p in profiles if p["name"] == updated_profile_name),
+ None
+ )
+ # Check if profile was updated
+ assert updated_profile_check is not None
- r = requests.post(f"{API}/device",
- data=json.dumps(device_1),
- timeout=5)
- print(r.text)
- assert r.status_code == 201
- assert len(local_get_devices()) == 1
+def test_update_profile_no_profiles_format(empty_profiles_dir, # pylint: disable=W0613
+ remove_risk_assessment, testrun): # pylint: disable=W0613
+ """Test for profile update when profiles format is not available (501)"""
- r = requests.post(f"{API}/device",
- data=json.dumps(device_1),
+ # Prepare a valid profile update request
+ profile_update = load_json("valid_profile.json", directory=PROFILES_PATH)
+
+ # Send a POST request to update the profile
+ r = requests.post(f"{API}/profiles",
+ data=json.dumps(profile_update),
timeout=5)
- print(r.text)
- assert r.status_code == 409
+ # Check if the response status code is 501 (Not Implemented)
+ assert r.status_code == 501
-def test_create_device_invalid_json(
- empty_devices_dir, # pylint: disable=W0613
- testrun): # pylint: disable=W0613
- device_1 = {
- }
+ # Parse the response
+ response = r.json()
- r = requests.post(f"{API}/device",
- data=json.dumps(device_1),
- timeout=5)
- print(r.text)
- assert r.status_code == 400
+ # Check if "error" key is present in the response
+ assert "error" in response
+ # Check if the error message matches the expected response
+ assert response["error"] == "Risk profiles are not available right now"
-def test_create_device_invalid_request(
- empty_devices_dir, # pylint: disable=W0613
- testrun): # pylint: disable=W0613
+@pytest.mark.parametrize("add_profiles", [
+ ["valid_profile.json"]
+], indirect=True)
+def test_update_profile_invalid_json(empty_profiles_dir, add_profiles, # pylint: disable=W0613
+ testrun): # pylint: disable=W0613
+ """ Test for update profile invalid JSON payload (400) """
- r = requests.post(f"{API}/device",
- data=None,
- timeout=5)
- print(r.text)
+ # Invalid JSON
+ updated_profile = {}
+
+ # Send the post request to update the profile
+ r = requests.post(
+ f"{API}/profiles",
+ data=json.dumps(updated_profile),
+ timeout=5)
+
+ # Parse the response
+ response = r.json()
+
+ # Check if status code is 400 (Bad request)
assert r.status_code == 400
+ # Check if "error" key in response
+ assert "error" in response
-def test_device_edit_device(
- testing_devices, # pylint: disable=W0613
- testrun): # pylint: disable=W0613
- with open(
- testing_devices[1], encoding="utf-8"
- ) as f:
- local_device = json.load(f)
+def test_create_profile_invalid_json(empty_profiles_dir, testrun): # pylint: disable=W0613
+ """ Test for create profile invalid JSON payload (400) """
- mac_addr = local_device["mac_addr"]
- new_model = "Alphabet"
+ # Invalid JSON
+ new_profile = {}
- r = requests.get(f"{API}/devices", timeout=5)
- all_devices = json.loads(r.text)
+ # Send the post request to update the profile
+ r = requests.post(
+ f"{API}/profiles",
+ data=json.dumps(new_profile),
+ timeout=5)
- api_device = next(x for x in all_devices if x["mac_addr"] == mac_addr)
+ # Parse the response
+ response = r.json()
- updated_device = copy.deepcopy(api_device)
- updated_device["model"] = new_model
+ # Check if status code is 400 (Bad request)
+ assert r.status_code == 400
- new_test_modules = {
- k: {"enabled": not v["enabled"]}
- for k, v in updated_device["test_modules"].items()
- }
- updated_device["test_modules"] = new_test_modules
+ # Check if "error" key in response
+ assert "error" in response
- updated_device_payload = {}
- updated_device_payload["device"] = updated_device
- updated_device_payload["mac_addr"] = mac_addr
+@pytest.mark.parametrize("add_profiles", [
+ ["valid_profile.json"]
+], indirect=True)
+def test_delete_profile(empty_profiles_dir, add_profiles, testrun): # pylint: disable=W0613
+ """ Test for successfully delete profile (200) """
- print("updated_device")
- pretty_print(updated_device)
- print("api_device")
- pretty_print(api_device)
+ # Load the profile using load_json utility method
+ profile_to_delete = load_json("valid_profile.json", directory=PROFILES_PATH)
- # update device
- r = requests.post(f"{API}/device/edit",
- data=json.dumps(updated_device_payload),
- timeout=5)
+ # Assign the profile name
+ profile_name = profile_to_delete["name"]
+ # Send the delete request
+ r = requests.delete(
+ f"{API}/profiles",
+ data=json.dumps(profile_to_delete),
+ timeout=5)
+
+ # Check if status code is 200 (OK)
assert r.status_code == 200
- r = requests.get(f"{API}/devices", timeout=5)
- all_devices = json.loads(r.text)
- updated_device_api = next(x for x in all_devices if x["mac_addr"] == mac_addr)
+ # Parse the JSON response
+ response = r.json()
- assert updated_device_api["model"] == new_model
- assert updated_device_api["test_modules"] == new_test_modules
+ # Check if the response contains "success" key
+ assert "success" in response
+ # Check if the profile has been deleted
+ r = requests.get(f"{API}/profiles", timeout=5)
-def test_device_edit_device_not_found(
- empty_devices_dir, # pylint: disable=W0613
- testrun): # pylint: disable=W0613
- device_1 = {
- "manufacturer": "Google",
- "model": "First",
- "mac_addr": "00:1e:42:35:73:c4",
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
- }
+ # Check if status code is 200 (OK)
+ assert r.status_code == 200
- r = requests.post(f"{API}/device",
- data=json.dumps(device_1),
- timeout=5)
- print(r.text)
- assert r.status_code == 201
- assert len(local_get_devices()) == 1
+ # Parse the JSON response
+ profiles = r.json()
- updated_device = copy.deepcopy(device_1)
+ # Iterate through the profiles to find the profile based on the "name"
+ deleted_profile = next(
+ (p for p in profiles if p["name"] == profile_name),
+ None
+ )
- updated_device_payload = {}
- updated_device_payload["device"] = updated_device
- updated_device_payload["mac_addr"] = "00:1e:42:35:73:c6"
- updated_device_payload["model"] = "Alphabet"
+ # Check if profile was deleted
+ assert deleted_profile is None
+def test_delete_profile_no_profile(empty_profiles_dir, testrun): # pylint: disable=W0613
+ """ Test delete profile if the profile does not exists (404) """
- r = requests.post(f"{API}/device/edit",
- data=json.dumps(updated_device_payload),
- timeout=5)
+ # Assign the profile to delete
+ profile_to_delete = {"name": "non existing"}
- assert r.status_code == 404
+ # Delete the profile
+ r = requests.delete(
+ f"{API}/profiles",
+ data=json.dumps(profile_to_delete),
+ timeout=5)
+ # Check if status code is 404 (Profile does not exist)
+ assert r.status_code == 404
-def test_device_edit_device_incorrect_json_format(
- empty_devices_dir, # pylint: disable=W0613
- testrun): # pylint: disable=W0613
- device_1 = {
- "manufacturer": "Google",
- "model": "First",
- "mac_addr": "00:1e:42:35:73:c4",
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
- }
+ # Parse the response
+ response = r.json()
- r = requests.post(f"{API}/device",
- data=json.dumps(device_1),
- timeout=5)
- print(r.text)
- assert r.status_code == 201
- assert len(local_get_devices()) == 1
+ # Check if "error" key in response
+ assert "error" in response
- updated_device_payload = {}
+def test_delete_profile_invalid_json(empty_profiles_dir, testrun): # pylint: disable=W0613
+ """ Test for delete profile invalid JSON payload (400) """
+ # Invalid payload
+ profile_to_delete = {}
- r = requests.post(f"{API}/device/edit",
- data=json.dumps(updated_device_payload),
- timeout=5)
+ # Delete the profile
+ r = requests.delete(
+ f"{API}/profiles",
+ data=json.dumps(profile_to_delete),
+ timeout=5)
+ # Check if status code is 400 (bad request)
assert r.status_code == 400
+ # Parse the response
+ response = r.json()
-def test_device_edit_device_with_mac_already_exists(
- empty_devices_dir, # pylint: disable=W0613
- testrun): # pylint: disable=W0613
- device_1 = {
- "manufacturer": "Google",
- "model": "First",
- "mac_addr": "00:1e:42:35:73:c4",
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
- }
-
- r = requests.post(f"{API}/device",
- data=json.dumps(device_1),
- timeout=5)
- print(r.text)
- assert r.status_code == 201
- assert len(local_get_devices()) == 1
-
- device_2 = {
- "manufacturer": "Google",
- "model": "Second",
- "mac_addr": "00:1e:42:35:73:c6",
- "test_modules": {
- "dns": {"enabled": True},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
- }
- r = requests.post(f"{API}/device",
- data=json.dumps(device_2),
- timeout=5)
- assert r.status_code == 201
- assert len(local_get_devices()) == 2
-
- updated_device = copy.deepcopy(device_1)
+ # Check if "error" key in response
+ assert "error" in response
- updated_device_payload = {}
- updated_device_payload = {}
- updated_device_payload["device"] = updated_device
- updated_device_payload["mac_addr"] = "00:1e:42:35:73:c6"
- updated_device_payload["model"] = "Alphabet"
+ # Invalid payload
+ profile_to_delete_2 = {"status": "Draft"}
+ # Delete the profile
+ r = requests.delete(
+ f"{API}/profiles",
+ data=json.dumps(profile_to_delete_2),
+ timeout=5)
- r = requests.post(f"{API}/device/edit",
- data=json.dumps(updated_device_payload),
- timeout=5)
+ # Check if status code is 400 (bad request)
+ assert r.status_code == 400
- assert r.status_code == 409
+ # Parse the response
+ response = r.json()
+ # Check if "error" key in response
+ assert "error" in response
-def test_system_latest_version(testrun): # pylint: disable=W0613
- r = requests.get(f"{API}/system/version", timeout=5)
- assert r.status_code == 200
- updated_system_version = json.loads(r.text)["update_available"]
- assert updated_system_version is False
+@pytest.mark.parametrize("add_profiles", [
+ ["valid_profile.json"]
+], indirect=True)
+def test_delete_profile_server_error(empty_profiles_dir, add_profiles, # pylint: disable=W0613
+ testrun): # pylint: disable=W0613
+ """ Test for delete profile causing internal server error (500) """
-def test_get_system_config(testrun): # pylint: disable=W0613
- r = requests.get(f"{API}/system/config", timeout=5)
+ # Assign the profile from the fixture
+ profile_to_delete = load_json("valid_profile.json", directory=PROFILES_PATH)
- with open(
- SYSTEM_CONFIG_PATH,
- encoding="utf-8"
- ) as f:
- local_config = json.load(f)
+ # Assign the profile name to profile_name
+ profile_name = profile_to_delete["name"]
- api_config = json.loads(r.text)
+ # Construct the path to the profile JSON file in local/risk_profiles
+ risk_profile_path = os.path.join(PROFILES_DIRECTORY, f"{profile_name}.json")
- # validate structure
- assert set(dict_paths(api_config)) | set(dict_paths(local_config)) == set(
- dict_paths(api_config)
- )
+ # Delete the profile JSON file before making the DELETE request
+ if os.path.exists(risk_profile_path):
+ os.remove(risk_profile_path)
- assert (
- local_config["network"]["device_intf"]
- == api_config["network"]["device_intf"]
- )
- assert (
- local_config["network"]["internet_intf"]
- == api_config["network"]["internet_intf"]
- )
+ # Send the DELETE request to delete the profile
+ r = requests.delete(f"{API}/profiles",
+ json={"name": profile_to_delete["name"]},
+ timeout=5)
+ # Check if status code is 500 (Internal Server Error)
+ assert r.status_code == 500
-def test_invalid_path_get(testrun): # pylint: disable=W0613
- r = requests.get(f"{API}/blah/blah", timeout=5)
- response = json.loads(r.text)
- assert r.status_code == 404
- with open(
- os.path.join(os.path.dirname(__file__), "mockito/invalid_request.json"),
- encoding="utf-8"
- ) as f:
- mockito = json.load(f)
+ # Parse the json response
+ response = r.json()
- # validate structure
- assert set(dict_paths(mockito)) == set(dict_paths(response))
+ # Check if error in response
+ assert "error" in response
+# Skipped tests currently not working due to blocking during monitoring period
@pytest.mark.skip()
-def test_trigger_run(testing_devices, testrun): # pylint: disable=W0613
+def test_delete_device_testrun_running(testing_devices, testrun): # pylint: disable=W0613
+
payload = {"device": {"mac_addr": BASELINE_MAC_ADDR, "firmware": "asd"}}
r = requests.post(f"{API}/system/start", data=json.dumps(payload), timeout=10)
assert r.status_code == 200
@@ -966,49 +3058,54 @@ def test_trigger_run(testing_devices, testrun): # pylint: disable=W0613
start_test_device("x123", BASELINE_MAC_ADDR)
until_true(
- lambda: query_system_status().lower() == "compliant",
- "system status is `complete`",
+ lambda: query_system_status().lower() == "in progress",
+ "system status is `in progress`",
600,
)
- stop_test_device("x123")
-
- # Validate response
- r = requests.get(f"{API}/system/status", timeout=5)
- response = json.loads(r.text)
- pretty_print(response)
-
- # Validate results
- results = {x["name"]: x for x in response["tests"]["results"]}
- print(results)
- # there are only 3 baseline tests
- assert len(results) == 3
+ device_1 = {
+ "manufacturer": "Google",
+ "model": "First",
+ "mac_addr": BASELINE_MAC_ADDR,
+ "test_modules": {
+ "dns": {"enabled": True},
+ "connection": {"enabled": True},
+ "ntp": {"enabled": True},
+ "baseline": {"enabled": True},
+ "nmap": {"enabled": True},
+ },
+ }
+ r = requests.delete(f"{API}/device/",
+ data=json.dumps(device_1),
+ timeout=5)
+ assert r.status_code == 403
- # Validate structure
- with open(
- os.path.join(
- os.path.dirname(__file__), "mockito/running_system_status.json"
- ), encoding="utf-8"
- ) as f:
- mockito = json.load(f)
+@pytest.mark.skip()
+@pytest.mark.parametrize("add_devices", [
+ ["device_1"]
+],indirect=True)
+def test_stop_running_test(empty_devices_dir, add_devices, testrun): # pylint: disable=W0613
+ """ Test for successfully stop testrun when test is running (200) """
- # validate structure
- assert set(dict_paths(mockito)).issubset(set(dict_paths(response)))
+ # Load the device and mac address using add_device utility method
+ device = load_json("device_config.json", directory=DEVICE_1_PATH)
- # Validate results structure
- assert set(dict_paths(mockito["tests"]["results"][0])).issubset(
- set(dict_paths(response["tests"]["results"][0]))
- )
+ mac_addr = device["mac_addr"]
- # Validate a result
- assert results["baseline.compliant"]["result"] == "Compliant"
+ test_modules = device["test_modules"]
+ # Payload with device details
+ payload = {
+ "device": {
+ "mac_addr": mac_addr,
+ "firmware": "test",
+ "test_modules": test_modules
+ }
+ }
-@pytest.mark.skip()
-def test_stop_running_test(testing_devices, testrun): # pylint: disable=W0613
- payload = {"device": {"mac_addr": ALL_MAC_ADDR, "firmware": "asd"}}
r = requests.post(f"{API}/system/start", data=json.dumps(payload),
timeout=10)
+
assert r.status_code == 200
until_true(
@@ -1029,28 +3126,113 @@ def test_stop_running_test(testing_devices, testrun): # pylint: disable=W0613
# Validate response
r = requests.post(f"{API}/system/stop", timeout=5)
- response = json.loads(r.text)
+ response = r.json()
pretty_print(response)
assert response == {"success": "Testrun stopped"}
time.sleep(1)
# Validate response
r = requests.get(f"{API}/system/status", timeout=5)
- response = json.loads(r.text)
+ response = r.json()
pretty_print(response)
assert response["status"] == "Cancelled"
+@pytest.mark.skip()
+def test_status_in_progress(testing_devices, testrun): # pylint: disable=W0613
+
+ payload = {"device": {"mac_addr": BASELINE_MAC_ADDR, "firmware": "asd"}}
+ r = requests.post(f"{API}/system/start", data=json.dumps(payload), timeout=10)
+ assert r.status_code == 200
+
+ until_true(
+ lambda: query_system_status().lower() == "waiting for device",
+ "system status is `waiting for device`",
+ 30,
+ )
+
+ start_test_device("x123", BASELINE_MAC_ADDR)
+
+ until_true(
+ lambda: query_system_status().lower() == "in progress",
+ "system status is `in progress`",
+ 600,
+ )
+
+@pytest.mark.skip()
+def test_start_testrun_already_in_progress(
+ testing_devices, # pylint: disable=W0613
+ testrun): # pylint: disable=W0613
+ payload = {"device": {"mac_addr": BASELINE_MAC_ADDR, "firmware": "asd"}}
+ r = requests.post(f"{API}/system/start", data=json.dumps(payload), timeout=10)
+
+ until_true(
+ lambda: query_system_status().lower() == "waiting for device",
+ "system status is `waiting for device`",
+ 30,
+ )
+
+ start_test_device("x123", BASELINE_MAC_ADDR)
+
+ until_true(
+ lambda: query_system_status().lower() == "in progress",
+ "system status is `in progress`",
+ 600,
+ )
+ r = requests.post(f"{API}/system/start", data=json.dumps(payload), timeout=10)
+ assert r.status_code == 409
+
+@pytest.mark.skip()
+def test_trigger_run(testing_devices, testrun): # pylint: disable=W0613
+ payload = {"device": {"mac_addr": BASELINE_MAC_ADDR, "firmware": "asd"}}
+ r = requests.post(f"{API}/system/start", data=json.dumps(payload), timeout=10)
+ assert r.status_code == 200
+
+ until_true(
+ lambda: query_system_status().lower() == "waiting for device",
+ "system status is `waiting for device`",
+ 30,
+ )
+
+ start_test_device("x123", BASELINE_MAC_ADDR)
+
+ until_true(
+ lambda: query_system_status().lower() == "compliant",
+ "system status is `complete`",
+ 600,
+ )
+
+ stop_test_device("x123")
-def test_stop_running_not_running(testrun): # pylint: disable=W0613
# Validate response
- r = requests.post(f"{API}/system/stop",
- timeout=10)
- response = json.loads(r.text)
+ r = requests.get(f"{API}/system/status", timeout=5)
+ response = r.json()
pretty_print(response)
- assert r.status_code == 404
- assert response["error"] == "Testrun is not currently running"
+ # Validate results
+ results = {x["name"]: x for x in response["tests"]["results"]}
+ print(results)
+ # there are only 3 baseline tests
+ assert len(results) == 3
+
+ # Validate structure
+ with open(
+ os.path.join(
+ os.path.dirname(__file__), "mockito/running_system_status.json"
+ ), encoding="utf-8"
+ ) as f:
+ mockito = json.load(f)
+
+ # validate structure
+ assert set(dict_paths(mockito)).issubset(set(dict_paths(response)))
+
+ # Validate results structure
+ assert set(dict_paths(mockito["tests"]["results"][0])).issubset(
+ set(dict_paths(response["tests"]["results"][0]))
+ )
+
+ # Validate a result
+ assert results["baseline.compliant"]["result"] == "Compliant"
@pytest.mark.skip()
def test_multiple_runs(testing_devices, testrun): # pylint: disable=W0613
@@ -1078,7 +3260,7 @@ def test_multiple_runs(testing_devices, testrun): # pylint: disable=W0613
# Validate response
r = requests.get(f"{API}/system/status", timeout=5)
- response = json.loads(r.text)
+ response = r.json()
pretty_print(response)
# Validate results
@@ -1110,28 +3292,34 @@ def test_multiple_runs(testing_devices, testrun): # pylint: disable=W0613
stop_test_device("x123")
+@pytest.mark.skip()
+def test_status_non_compliant(testing_devices, testrun): # pylint: disable=W0613
-def test_create_invalid_chars(empty_devices_dir, testrun): # pylint: disable=W0613
- # local_delete_devices(ALL_DEVICES)
- # We must start test run with no devices in local/devices for this test
- # to function as expected
- assert len(local_get_devices()) == 0
-
- # Test adding device
- device_1 = {
- "manufacturer": "/'disallowed characters///",
- "model": "First",
- "mac_addr": BASELINE_MAC_ADDR,
- "test_modules": {
- "dns": {"enabled": False},
- "connection": {"enabled": True},
- "ntp": {"enabled": True},
- "baseline": {"enabled": True},
- "nmap": {"enabled": True},
- },
+ r = requests.get(f"{API}/devices", timeout=5)
+ all_devices = r.json()
+ payload = {
+ "device": {
+ "mac_addr": all_devices[0]["mac_addr"],
+ "firmware": "asd"
+ }
}
-
- r = requests.post(f"{API}/device", data=json.dumps(device_1),
- timeout=5)
+ r = requests.post(f"{API}/system/start", data=json.dumps(payload),
+ timeout=10)
+ assert r.status_code == 200
print(r.text)
- print(r.status_code)
+
+ until_true(
+ lambda: query_system_status().lower() == "waiting for device",
+ "system status is `waiting for device`",
+ 30,
+ )
+
+ start_test_device("x123", all_devices[0]["mac_addr"])
+
+ until_true(
+ lambda: query_system_status().lower() == "non-compliant",
+ "system status is `complete",
+ 600,
+ )
+
+ stop_test_device("x123")
diff --git a/testing/baseline/test_baseline b/testing/baseline/test_baseline
index dab23620d..44a17d348 100755
--- a/testing/baseline/test_baseline
+++ b/testing/baseline/test_baseline
@@ -22,8 +22,6 @@ ifconfig
sudo apt-get update
sudo apt-get install openvswitch-common openvswitch-switch tcpdump jq moreutils coreutils isc-dhcp-client
-pip3 install pytest==7.4.4
-
# Setup device network
sudo ip link add dev endev0a type veth peer name endev0b
sudo ip link set dev endev0a up
@@ -42,7 +40,7 @@ sudo cp testing/baseline/system.json local/system.json
# Copy device configs to testrun
sudo cp -r testing/device_configs/* local/devices
-sudo bin/testrun --single-intf --no-ui --validate > $TESTRUN_OUT 2>&1 &
+sudo bin/testrun --single-intf --no-ui --target 02:42:aa:00:01:01 -fw 1.0 --validate > $TESTRUN_OUT 2>&1 &
TPID=$!
# Time to wait for testrun to be ready
@@ -74,6 +72,11 @@ echo "Done baseline test"
more $TESTRUN_OUT
-pytest testing/baseline/test_baseline.py
+# Needs to be sudo because this invokes bin/testrun
+sudo venv/bin/python3 -m pytest -v testing/baseline/test_baseline.py
+
+# Clean the device network
+sudo ip link del dev endev0a
+sudo docker network rm endev0
exit $?
\ No newline at end of file
diff --git a/testing/docker/ci_baseline/Dockerfile b/testing/docker/ci_baseline/Dockerfile
index 93ad905f9..2af5ed46a 100644
--- a/testing/docker/ci_baseline/Dockerfile
+++ b/testing/docker/ci_baseline/Dockerfile
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-FROM ubuntu@sha256:e6173d4dc55e76b87c4af8db8821b1feae4146dd47341e4d431118c7dd060a74
+FROM ubuntu@sha256:77d57fd89366f7d16615794a5b53e124d742404e20f035c22032233f1826bd6a
# Update and get all additional requirements not contained in the base image
RUN apt-get update && apt-get -y upgrade
diff --git a/testing/pylint/test_pylint b/testing/pylint/test_pylint
index 1f71482e5..7e102c7f8 100755
--- a/testing/pylint/test_pylint
+++ b/testing/pylint/test_pylint
@@ -14,27 +14,35 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ERROR_LIMIT=25
-
-sudo cmd/install
+# Install python venv
+python3 -m venv venv
+# Activate the venv
source venv/bin/activate
-sudo pip3 install pylint==3.0.3
+# Install pylint
+pip install pylint==3.2.6
+
+# Declare the applicable files
files=$(find . -path ./venv -prune -o -name '*.py' -print)
+# Define the pylint output file
OUT=pylint.out
-rm -f $OUT && touch $OUT
+# Remove it if it already exists
+rm -f $OUT
+# Run pylint against the target files
+# Change the evaluation to total the number of errors
+# Output to the specified output file
pylint $files -ry --extension-pkg-allow-list=docker --evaluation="error + warning + refactor + convention" 2>/dev/null | tee -a $OUT
-new_errors=$(cat $OUT | grep -oP "(?!=^Your code has been rated at)([0-9]+)(?=\.00/10[ \(]?)" )
+# Obtain the total number of errors from the pylint out file
+errors=$(cat $OUT | grep -oP "(?!=^Your code has been rated at)([0-9]+)(?=\.00/10[ \(]?)" )
-echo "$new_errors > $ERROR_LIMIT?"
-if (( $new_errors > $ERROR_LIMIT)); then
- echo new errors $new_errors > error limit $ERROR_LIMIT
- echo failing ..
+# Check if any errors exist
+if (( $errors > 0 )); then
+ echo "$errors pylint issues have been identified. These must be resolved before merging."
exit 1
fi
diff --git a/testing/tests/test_tests.py b/testing/tests/test_tests.py
index aaae1a09d..21be6b7de 100644
--- a/testing/tests/test_tests.py
+++ b/testing/tests/test_tests.py
@@ -96,7 +96,7 @@ def test_list_tests(capsys, results, test_matrix):
print('============')
print('============')
print('tests seen:')
- print('\n'.join(set([x.name for x in all_tests])))
+ print('\n'.join(set(x.name for x in all_tests)))
print('\ntesting for pass:')
print('\n'.join(ci_pass))
print('\ntesting for fail:')
diff --git a/testing/unit/conn/captures/monitor.pcap b/testing/unit/conn/captures/monitor.pcap
new file mode 100644
index 000000000..0dfb85ff4
Binary files /dev/null and b/testing/unit/conn/captures/monitor.pcap differ
diff --git a/testing/unit/conn/captures/startup.pcap b/testing/unit/conn/captures/startup.pcap
new file mode 100644
index 000000000..dadd2edbc
Binary files /dev/null and b/testing/unit/conn/captures/startup.pcap differ
diff --git a/testing/unit/conn/conn_module_test.py b/testing/unit/conn/conn_module_test.py
index d31a8051f..1e5798df1 100644
--- a/testing/unit/conn/conn_module_test.py
+++ b/testing/unit/conn/conn_module_test.py
@@ -13,13 +13,18 @@
# limitations under the License.
"""Module run all the Connection module related unit tests"""
from port_stats_util import PortStatsUtil
+from connection_module import ConnectionModule
import os
+import sys
import unittest
from common import logger
MODULE = 'conn'
-# Define the file paths
-TEST_FILES_DIR = 'testing/unit/' + MODULE
+# Define the directories
+TEST_FILES_DIR = '/testing/unit/' + MODULE
+OUTPUT_DIR = os.path.join(TEST_FILES_DIR, 'output/')
+CAPTURES_DIR = os.path.join(TEST_FILES_DIR, 'captures/')
+
ETHTOOL_RESULTS_COMPLIANT_FILE = os.path.join(TEST_FILES_DIR, 'ethtool',
'ethtool_results_compliant.txt')
ETHTOOL_RESULTS_NONCOMPLIANT_FILE = os.path.join(
@@ -34,6 +39,11 @@
ETHTOOL_PORT_STATS_POST_NONCOMPLIANT_FILE = os.path.join(
TEST_FILES_DIR, 'ethtool',
'ethtool_port_stats_post_monitor_noncompliant.txt')
+
+# Define the capture files to be used for the test
+STARTUP_CAPTURE_FILE = os.path.join(CAPTURES_DIR, 'startup.pcap')
+MONITOR_CAPTURE_FILE = os.path.join(CAPTURES_DIR, 'monitor.pcap')
+
LOGGER = None
@@ -46,6 +56,9 @@ def setUpClass(cls):
global LOGGER
LOGGER = logger.get_logger('unit_test_' + MODULE)
+ # Set the MAC address for device in capture files
+ os.environ['DEVICE_MAC'] = '98:f0:7b:d1:87:06'
+
# Test the port link status
def connection_port_link_compliant_test(self):
LOGGER.info('connection_port_link_compliant_test')
@@ -117,6 +130,45 @@ def connection_port_speed_autonegotiation_fail_test(self):
LOGGER.info(result)
self.assertEqual(result[0], False)
+ # Test proper filtering for ICMP protocol in DHCP packets
+ def connection_switch_dhcp_snooping_icmp_test(self):
+ LOGGER.info('connection_switch_dhcp_snooping_icmp_test')
+ conn_module = ConnectionModule(module=MODULE,
+ results_dir=OUTPUT_DIR,
+ startup_capture_file=STARTUP_CAPTURE_FILE,
+ monitor_capture_file=MONITOR_CAPTURE_FILE)
+ result = conn_module._connection_switch_dhcp_snooping() # pylint: disable=W0212
+ LOGGER.info(result)
+ self.assertEqual(result[0], True)
+
+ def communication_network_type_test(self):
+ LOGGER.info('communication_network_type_test')
+ conn_module = ConnectionModule(module=MODULE,
+ results_dir=OUTPUT_DIR,
+ startup_capture_file=STARTUP_CAPTURE_FILE,
+ monitor_capture_file=MONITOR_CAPTURE_FILE)
+ result = conn_module._communication_network_type() # pylint: disable=W0212
+ details_expected = {
+ 'mac_address': '98:f0:7b:d1:87:06',
+ 'multicast': {
+ 'from': 11,
+ 'to': 0
+ },
+ 'broadcast': {
+ 'from': 13,
+ 'to': 0
+ },
+ 'unicast': {
+ 'from': 0,
+ 'to': 0
+ }
+ }
+ LOGGER.info(result)
+ self.assertEqual(result[0], 'Informational')
+ self.assertEqual(result[1], 'Packet types detected: Multicast, Broadcast')
+ self.assertEqual(result[2], details_expected)
+ #self.assertEqual(result[0], True)
+
if __name__ == '__main__':
suite = unittest.TestSuite()
@@ -136,5 +188,17 @@ def connection_port_speed_autonegotiation_fail_test(self):
suite.addTest(
ConnectionModuleTest('connection_port_speed_autonegotiation_fail_test'))
+ # DHCP Snooping related tests
+ suite.addTest(
+ ConnectionModuleTest('connection_switch_dhcp_snooping_icmp_test'))
+
+ # DHCP Snooping related tests
+ suite.addTest(ConnectionModuleTest('communication_network_type_test'))
+
runner = unittest.TextTestRunner()
- runner.run(suite)
+ test_result = runner.run(suite)
+
+ # Check if the tests failed and exit with the appropriate code
+ if not test_result.wasSuccessful():
+ sys.exit(1) # Return a non-zero exit code for failures
+ sys.exit(0) # Return zero for success
diff --git a/testing/unit/dns/dns_module_test.py b/testing/unit/dns/dns_module_test.py
index 6c3dec74d..d530498dd 100644
--- a/testing/unit/dns/dns_module_test.py
+++ b/testing/unit/dns/dns_module_test.py
@@ -16,7 +16,7 @@
import unittest
from scapy.all import rdpcap, DNS, wrpcap
import os
-from testreport import TestReport
+import sys
MODULE = 'dns'
@@ -28,7 +28,6 @@
LOCAL_REPORT = os.path.join(REPORTS_DIR, 'dns_report_local.html')
LOCAL_REPORT_NO_DNS = os.path.join(REPORTS_DIR, 'dns_report_local_no_dns.html')
-CONF_FILE = 'modules/test/' + MODULE + '/conf/module_config.json'
# Define the capture files to be used for the test
DNS_SERVER_CAPTURE_FILE = os.path.join(CAPTURES_DIR, 'dns.pcap')
@@ -44,11 +43,12 @@ def setUpClass(cls):
# Create the output directories and ignore errors if it already exists
os.makedirs(OUTPUT_DIR, exist_ok=True)
+ # Set the MAC address for device in capture files
+ os.environ['DEVICE_MAC'] = '38:d1:35:01:17:fe'
+
# Test the module report generation
def dns_module_report_test(self):
dns_module = DNSModule(module=MODULE,
- log_dir=OUTPUT_DIR,
- conf_file=CONF_FILE,
results_dir=OUTPUT_DIR,
dns_server_capture_file=DNS_SERVER_CAPTURE_FILE,
startup_capture_file=STARTUP_CAPTURE_FILE,
@@ -59,12 +59,6 @@ def dns_module_report_test(self):
# Read the generated report
with open(report_out_path, 'r', encoding='utf-8') as file:
report_out = file.read()
- formatted_report = self.add_formatting(report_out)
-
- # Write back the new formatted_report value
- out_report_path = os.path.join(OUTPUT_DIR, 'dns_report_with_dns.html')
- with open(out_report_path, 'w', encoding='utf-8') as file:
- file.write(formatted_report)
# Read the local good report
with open(LOCAL_REPORT, 'r', encoding='utf-8') as file:
@@ -103,8 +97,6 @@ def dns_module_report_no_dns_test(self):
wrpcap(monitor_cap_file, packets_monitor)
dns_module = DNSModule(module='dns',
- log_dir=OUTPUT_DIR,
- conf_file=CONF_FILE,
results_dir=OUTPUT_DIR,
dns_server_capture_file=dns_server_cap_file,
startup_capture_file=startup_cap_file,
@@ -115,12 +107,6 @@ def dns_module_report_no_dns_test(self):
# Read the generated report
with open(report_out_path, 'r', encoding='utf-8') as file:
report_out = file.read()
- formatted_report = self.add_formatting(report_out)
-
- # Write back the new formatted_report value
- out_report_path = os.path.join(OUTPUT_DIR, 'dns_report_no_dns.html')
- with open(out_report_path, 'w', encoding='utf-8') as file:
- file.write(formatted_report)
# Read the local good report
with open(LOCAL_REPORT_NO_DNS, 'r', encoding='utf-8') as file:
@@ -128,17 +114,6 @@ def dns_module_report_no_dns_test(self):
self.assertEqual(report_out, report_local)
- def add_formatting(self, body):
- return f'''
-
-
- {TestReport().generate_head()}
-
- {body}
-
- DNS Module
Requests to local DNS server
Requests to external DNS servers
Total DNS requests
Total DNS responses
71
6
77
91
Source
Destination
Type
URL
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
8.8.8.8
Query
mqtt.googleapis.com
10.10.10.4
8.8.8.8
Query
mqtt.googleapis.com
8.8.8.8
10.10.10.4
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
8.8.8.8
10.10.10.4
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
pool.ntp.org
10.10.10.14
10.10.10.4
Query
pool.ntp.org
10.10.10.4
8.8.8.8
Query
pool.ntp.org
10.10.10.4
8.8.8.8
Query
pool.ntp.org
8.8.8.8
10.10.10.4
Response
pool.ntp.org
10.10.10.4
10.10.10.14
Response
pool.ntp.org
8.8.8.8
10.10.10.4
Response
pool.ntp.org
10.10.10.4
10.10.10.14
Response
pool.ntp.org
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
pool.ntp.org
10.10.10.14
10.10.10.4
Query
pool.ntp.org
10.10.10.4
8.8.8.8
Query
pool.ntp.org
10.10.10.4
10.10.10.14
Response
pool.ntp.org
10.10.10.4
10.10.10.14
Response
pool.ntp.org
8.8.8.8
10.10.10.4
Response
pool.ntp.org
10.10.10.4
8.8.8.8
Response
pool.ntp.org
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
8.8.8.8
Query
mqtt.googleapis.com
8.8.8.8
10.10.10.4
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
pool.ntp.org
10.10.10.14
10.10.10.4
Query
pool.ntp.org
10.10.10.4
10.10.10.14
Response
pool.ntp.org
10.10.10.4
10.10.10.14
Response
pool.ntp.org
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
pool.ntp.org
10.10.10.4
10.10.10.14
Response
pool.ntp.org
10.10.10.4
10.10.10.14
Response
pool.ntp.org
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.14
10.10.10.4
Query
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
10.10.10.4
10.10.10.14
Response
mqtt.googleapis.com
\ No newline at end of file
+
DNS Module
+
+
+
+
Requests to local DNS server
+
Requests to external DNS servers
+
Total DNS requests
+
Total DNS responses
+
+
+
+
+
71
+
0
+
71
+
84
+
+
+
+
+
+
+
Source
+
Destination
+
Resolved IP
+
Type
+
URL
+
Count
+
+
+
+
+
10.10.10.14
+
10.10.10.4
+
N/A
+
Query
+
mqtt.googleapis.com
+
64
+
+
+
10.10.10.4
+
10.10.10.14
+
173.194.195.206
+
Response
+
mqtt.googleapis.com
+
38
+
+
+
10.10.10.4
+
10.10.10.14
+
2607:f8b0:4001:c11::ce
+
Response
+
mqtt.googleapis.com
+
32
+
+
+
10.10.10.14
+
10.10.10.4
+
N/A
+
Query
+
pool.ntp.org
+
7
+
+
+
10.10.10.4
+
10.10.10.14
+
N/A
+
Response
+
pool.ntp.org
+
4
+
+
+
10.10.10.4
+
10.10.10.14
+
5.78.89.3
+
Response
+
pool.ntp.org
+
2
+
+
+
10.10.10.4
+
10.10.10.14
+
199.68.201.234
+
Response
+
pool.ntp.org
+
2
+
+
+
10.10.10.4
+
10.10.10.14
+
2607:f8b0:4001:c08::ce
+
Response
+
mqtt.googleapis.com
+
6
+
+
+
\ No newline at end of file
diff --git a/testing/unit/dns/reports/dns_report_local_no_dns.html b/testing/unit/dns/reports/dns_report_local_no_dns.html
index 20f3f7511..f144655f5 100644
--- a/testing/unit/dns/reports/dns_report_local_no_dns.html
+++ b/testing/unit/dns/reports/dns_report_local_no_dns.html
@@ -1,4 +1,4 @@
-
DNS Module
+
DNS Module
diff --git a/testing/unit/framework/session_test.py b/testing/unit/framework/session_test.py
new file mode 100644
index 000000000..8c48c6046
--- /dev/null
+++ b/testing/unit/framework/session_test.py
@@ -0,0 +1,57 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Session methods tests"""
+
+from unittest.mock import patch
+from core import session
+
+
+class MockUtil:
+ """mock util functions"""
+
+ @staticmethod
+ def get_sys_interfaces():
+ return {"eth0": "00:1A:2B:3C:4D:5E", "eth1": "66:77:88:99:AA:BB"}
+
+ @staticmethod
+ def diff_dicts(d1, d2): # pylint: disable=W0613
+ return {
+ "items_added": {"eth1": "66:77:88:99:AA:BB"},
+ "items_removed": {"eth2": "00:1B:2C:3D:4E:5F"},
+ }
+
+
+class TestrunSessionMock(session.TestrunSession):
+ def __init__(self): # pylint: disable=W0231
+ self._ifaces = {"eth0": "00:1A:2B:3C:4D:5E", "eth2": "66:77:88:99:AA:BB"}
+
+
+util = MockUtil()
+
+
+@patch("common.util.get_sys_interfaces", side_effect=util.get_sys_interfaces)
+@patch("common.util.diff_dicts", side_effect=util.diff_dicts)
+def test_detect_network_adapters_change(
+ mock_get_sys_interfaces, # pylint: disable=W0613
+ mock_diff_dicts, # pylint: disable=W0613
+):
+ testrun_session = TestrunSessionMock()
+
+ # Test added and removed
+ result = testrun_session.detect_network_adapters_change()
+ assert result == {
+ "adapters_added": {"eth1": "66:77:88:99:AA:BB"},
+ "adapters_removed": {"eth2": "00:1B:2C:3D:4E:5F"},
+ }
diff --git a/testing/unit/framework/util_test.py b/testing/unit/framework/util_test.py
new file mode 100644
index 000000000..ec8fd48fc
--- /dev/null
+++ b/testing/unit/framework/util_test.py
@@ -0,0 +1,61 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Util tests"""
+
+from collections import namedtuple
+from unittest.mock import patch
+from common import util
+from net_orc import ip_control
+
+Snicaddr = namedtuple('snicaddr',
+ ['family', 'address'])
+
+mock_addrs = {
+ 'eth0': [Snicaddr(17, '00:1A:2B:3C:4D:5E')],
+ 'wlan0': [Snicaddr(17, '66:77:88:99:AA:BB')],
+ 'enp0s3': [Snicaddr(17, '11:22:33:44:55:66')]
+}
+
+@patch('psutil.net_if_addrs')
+def test_get_sys_interfaces(mock_net_if_addrs):
+ mock_net_if_addrs.return_value = mock_addrs
+ # Expected result
+ expected = {
+ 'eth0': '00:1A:2B:3C:4D:5E',
+ 'enp0s3': '11:22:33:44:55:66'
+ }
+
+ result = ip_control.IPControl.get_sys_interfaces()
+ # Assert the result
+ assert result == expected
+
+
+def test_diff_dicts():
+ d1 = {'a': 1, 'b': 2}
+ d2 = {'a': 1, 'b': 2}
+ #Assert equal dicts
+ assert not util.diff_dicts(d1, d2)
+ d2 = {'a': 1, 'c': 3}
+ expected = {'items_removed': {'b': 2},'items_added': {'c': 3}}
+ #Assert items added adn removed
+ assert util.diff_dicts(d1, d2) == expected
+ d1 = {'a': 1}
+ d2 = {'b': 2}
+ expected = {
+ 'items_removed': {'a': 1},
+ 'items_added': {'b': 2}
+ }
+ #Assert completely different dicts
+ assert util.diff_dicts(d1, d2) == expected
diff --git a/testing/unit/ntp/ntp_module_test.py b/testing/unit/ntp/ntp_module_test.py
index 20dd88ef1..ed5934048 100644
--- a/testing/unit/ntp/ntp_module_test.py
+++ b/testing/unit/ntp/ntp_module_test.py
@@ -11,12 +11,12 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Module run all the DNS related unit tests"""
+"""Module run all the NTP related unit tests"""
from ntp_module import NTPModule
import unittest
from scapy.all import rdpcap, NTP, wrpcap
import os
-from testreport import TestReport
+import sys
MODULE = 'ntp'
@@ -28,7 +28,6 @@
LOCAL_REPORT = os.path.join(REPORTS_DIR,'ntp_report_local.html')
LOCAL_REPORT_NO_NTP = os.path.join(REPORTS_DIR,'ntp_report_local_no_ntp.html')
-CONF_FILE = 'modules/test/' + MODULE + '/conf/module_config.json'
# Define the capture files to be used for the test
NTP_SERVER_CAPTURE_FILE = os.path.join(CAPTURES_DIR,'ntp.pcap')
@@ -48,8 +47,6 @@ def setUpClass(cls):
# Test the module report generation
def ntp_module_report_test(self):
ntp_module = NTPModule(module=MODULE,
- log_dir=OUTPUT_DIR,
- conf_file=CONF_FILE,
results_dir=OUTPUT_DIR,
ntp_server_capture_file=NTP_SERVER_CAPTURE_FILE,
startup_capture_file=STARTUP_CAPTURE_FILE,
@@ -60,12 +57,6 @@ def ntp_module_report_test(self):
# Read the generated report
with open(report_out_path, 'r', encoding='utf-8') as file:
report_out = file.read()
- formatted_report = self.add_formatting(report_out)
-
- # Write back the new formatted_report value
- out_report_path = os.path.join(OUTPUT_DIR, 'ntp_report_with_ntp.html')
- with open(out_report_path, 'w', encoding='utf-8') as file:
- file.write(formatted_report)
# Read the local good report
with open(LOCAL_REPORT, 'r', encoding='utf-8') as file:
@@ -104,8 +95,6 @@ def ntp_module_report_no_ntp_test(self):
wrpcap(monitor_cap_file, packets_monitor)
ntp_module = NTPModule(module='dns',
- log_dir=OUTPUT_DIR,
- conf_file=CONF_FILE,
results_dir=OUTPUT_DIR,
ntp_server_capture_file=ntp_server_cap_file,
startup_capture_file=startup_cap_file,
@@ -116,12 +105,6 @@ def ntp_module_report_no_ntp_test(self):
# Read the generated report
with open(report_out_path, 'r', encoding='utf-8') as file:
report_out = file.read()
- formatted_report = self.add_formatting(report_out)
-
- # Write back the new formatted_report value
- out_report_path = os.path.join(OUTPUT_DIR,'ntp_report_no_ntp.html')
- with open(out_report_path, 'w', encoding='utf-8') as file:
- file.write(formatted_report)
# Read the local good report
with open(LOCAL_REPORT_NO_NTP, 'r', encoding='utf-8') as file:
@@ -129,16 +112,6 @@ def ntp_module_report_no_ntp_test(self):
self.assertEqual(report_out, report_local)
- def add_formatting(self,body):
- return f'''
-
-
- {TestReport().generate_head()}
-
- {body}
-
- NTP Module
+
diff --git a/testing/unit/protocol/protocol_module_test.py b/testing/unit/protocol/protocol_module_test.py
index 32a0021cd..9d474ab91 100644
--- a/testing/unit/protocol/protocol_module_test.py
+++ b/testing/unit/protocol/protocol_module_test.py
@@ -15,6 +15,7 @@
from protocol_bacnet import BACnet
import unittest
import os
+import sys
from common import logger
import inspect
@@ -46,7 +47,6 @@ def setUpClass(cls):
BACNET = BACnet(log=LOGGER,
captures_dir=CAPTURES_DIR,
capture_file='bacnet.pcap',
- bin_dir='modules/test/protocol/bin',
device_hw_addr=HW_ADDR)
# Test the BACNet traffic for a matching Object ID and HW address
@@ -103,4 +103,9 @@ def bacnet_protocol_validate_device_fail_test(self):
suite.addTest(ProtocolModuleTest('bacnet_protocol_validate_device_fail_test'))
runner = unittest.TextTestRunner()
- runner.run(suite)
+ test_result = runner.run(suite)
+
+ # Check if the tests failed and exit with the appropriate code
+ if not test_result.wasSuccessful():
+ sys.exit(1) # Return a non-zero exit code for failures
+ sys.exit(0) # Return zero for success
diff --git a/testing/unit/report/report_compliant.json b/testing/unit/report/report_compliant.json
index 17e994d20..08ff585ad 100644
--- a/testing/unit/report/report_compliant.json
+++ b/testing/unit/report/report_compliant.json
@@ -68,7 +68,7 @@
},
{
"name": "connection.switch.arp_inspection",
- "description": "Device uses ARP",
+ "description": "Device uses ARP correctly",
"expected_behavior": "Device continues to operate correctly when ARP inspection is enabled on the switch. No functionality is lost with ARP inspection enabled.",
"required_result": "Required",
"result": "Compliant"
diff --git a/testing/unit/report/report_noncompliant.json b/testing/unit/report/report_noncompliant.json
index 98fbeb284..b3ba74c0d 100644
--- a/testing/unit/report/report_noncompliant.json
+++ b/testing/unit/report/report_noncompliant.json
@@ -77,7 +77,7 @@
},
{
"name": "connection.switch.arp_inspection",
- "description": "Device uses ARP",
+ "description": "Device uses ARP correctly",
"expected_behavior": "Device continues to operate correctly when ARP inspection is enabled on the switch. No functionality is lost with ARP inspection enabled.",
"required_result": "Required",
"result": "Compliant"
diff --git a/testing/unit/report/report_test.py b/testing/unit/report/report_test.py
index f92666b2c..e5c8b61a5 100644
--- a/testing/unit/report/report_test.py
+++ b/testing/unit/report/report_test.py
@@ -16,6 +16,9 @@
from testreport import TestReport
import os
import json
+import shutil
+from jinja2 import Template
+import re
MODULE = 'report'
@@ -24,57 +27,171 @@
TEST_FILES_DIR = os.path.join('testing/unit', MODULE)
OUTPUT_DIR = os.path.join(TEST_FILES_DIR, 'output/')
+REPORT_RESOURCES_DIR = 'resources/report'
+
+CSS_PATH = os.path.join(REPORT_RESOURCES_DIR, 'test_report_styles.css')
+HTML_PATH = os.path.join(REPORT_RESOURCES_DIR, 'test_report_template.html')
class ReportTest(unittest.TestCase):
"""Contains and runs all the unit tests concerning DNS behaviors"""
@classmethod
def setUpClass(cls):
+ """Class-level setup to prepare for tests"""
+
+ # Delete old files from output dir
+ if os.path.exists(OUTPUT_DIR) and os.path.isdir(OUTPUT_DIR):
+ shutil.rmtree(OUTPUT_DIR)
+
# Create the output directories and ignore errors if it already exists
os.makedirs(OUTPUT_DIR, exist_ok=True)
def create_report(self, results_file_path):
+ """Create the HTML report from the JSON file"""
+
+ # Create the TestReport object
report = TestReport()
+
# Load the json report data
with open(results_file_path, 'r', encoding='utf-8') as file:
report_json = json.loads(file.read())
+
+ # Populate the report with JSON data
report.from_json(report_json)
- # Load all module html reports
+
+ # Load each module html report
reports_md = []
- #reports_md.append(self.get_module_html_report('tls'))
reports_md.append(self.get_module_html_report('dns'))
reports_md.append(self.get_module_html_report('services'))
reports_md.append(self.get_module_html_report('ntp'))
+
+ # Add all the module reports to the full report
report.add_module_reports(reports_md)
- # Save report to file
+ # Create the HTML filename based on the JSON name
file_name = os.path.splitext(os.path.basename(results_file_path))[0]
report_out_file = os.path.join(OUTPUT_DIR, file_name + '.html')
+
+ # Save report as HTML file
with open(report_out_file, 'w', encoding='utf-8') as file:
file.write(report.to_html())
def report_compliant_test(self):
+ """Generate a report for the compliant test"""
+
+ # Generate a compliant report based on the 'report_compliant.json' file
self.create_report(os.path.join(TEST_FILES_DIR, 'report_compliant.json'))
def report_noncompliant_test(self):
+ """Generate a report for the non-compliant test"""
+
+ # Generate non-compliant report based on the 'report_noncompliant.json' file
self.create_report(os.path.join(TEST_FILES_DIR, 'report_noncompliant.json'))
+ # Generate formatted reports for each report generated from
+ # the test containers.
+ # Not a unit test but can't run from within the test module container and must
+ # be done through the venv. Useful for doing visual inspections
+ # of report formatting changes without having to re-run a new device test.
+ def report_formatting(self):
+ """Apply formatting and generate HTML reports for visual inspection"""
+
+ # List of modules for which to generate formatted reports
+ test_modules = ['conn','dns','ntp','protocol','services','tls']
+
+ # List all items from UNIT_TEST_DIR
+ unit_tests = os.listdir(UNIT_TEST_DIR)
+
+ # Loop through each items from UNIT_TEST_DIR
+ for test in unit_tests:
+
+ # If the module name inside the test_modules list
+ if test in test_modules:
+
+ # Construct the module path of outpit dir for the module
+ output_dir = os.path.join(UNIT_TEST_DIR,test,'output')
+
+ # Check if output dir exists
+ if os.path.isdir(output_dir):
+ # List all files fro output dir
+ output_files = os.listdir(output_dir)
+
+ # Loop through each file
+ for file in output_files:
+
+ # Chck if is an html file
+ if file.endswith('.html'):
+
+ # Construct teh full path of html file
+ report_out_path = os.path.join(output_dir,file)
+
+ # Open the html file in read mode
+ with open(report_out_path, 'r', encoding='utf-8') as f:
+ report_out = f.read()
+ # Add the formatting
+ formatted_report = self.add_html_formatting(report_out)
+
+ # Write back the new formatted_report value
+ out_report_dir = os.path.join(OUTPUT_DIR, test)
+ os.makedirs(out_report_dir, exist_ok=True)
+
+ with open(os.path.join(
+ out_report_dir,file), 'w',
+ encoding='utf-8') as f:
+ f.write(formatted_report)
+
+ def add_html_formatting(self, body):
+ """Wrap the raw report inside a complete HTML structure with styles"""
+
+ # Load the css file
+ with open(CSS_PATH, 'r', encoding='UTF-8') as css_file:
+ styles = css_file.read()
+
+ # Load the html file
+ with open(HTML_PATH, 'r', encoding='UTF-8') as html_file:
+ html_content = html_file.read()
+
+ # Search for head content using regex
+ head = re.search(r'.*?', html_content, re.DOTALL).group(0)
+ # Define the html template
+ html_template = f'''
+
+
+ {head}
+
+ {body}
+
+
+ '''
+ # Create a Jinja2 template from the string
+ template = Template(html_template)
+
+ # Render the template with css styles
+ return template.render(styles=styles, body=body)
+
def get_module_html_report(self, module):
- # Combine the path components using os.path.join
+ """Load the HTML report for a specific module"""
+
+ # Define the path to the module's HTML report file
report_file = os.path.join(
UNIT_TEST_DIR,
os.path.join(module,
os.path.join('reports', module + '_report_local.html')))
+ # Read and return the content of the report file
with open(report_file, 'r', encoding='utf-8') as file:
report = file.read()
return report
if __name__ == '__main__':
+
suite = unittest.TestSuite()
suite.addTest(ReportTest('report_compliant_test'))
suite.addTest(ReportTest('report_noncompliant_test'))
+ # Create html test reports for each module in 'output' dir
+ suite.addTest(ReportTest('report_formatting'))
+
runner = unittest.TextTestRunner()
runner.run(suite)
diff --git a/testing/unit/risk_profile/profiles/risk_profile_valid_high.json b/testing/unit/risk_profile/profiles/risk_profile_valid_high.json
index 338c91abb..c4887535a 100644
--- a/testing/unit/risk_profile/profiles/risk_profile_valid_high.json
+++ b/testing/unit/risk_profile/profiles/risk_profile_valid_high.json
@@ -1,52 +1,62 @@
{
- "name": "Primary profile",
- "version": "1.3-alpha",
- "created": "2024-07-01",
+ "name": "Primary Profile High Risk",
+ "version": "1.4-a",
+ "created": "2024-10-01",
"status": "Valid",
+ "risk": "High",
"questions": [
{
"question": "What type of device is this?",
- "answer": "IoT Gateway"
+ "answer": "IoT Gateway",
+ "risk": "High"
},
{
"question": "How will this device be used at Google?",
- "answer": "sakjdhaskjdh"
+ "answer": "Controlling things"
},
{
"question": "Is this device going to be managed by Google or a third party?",
- "answer": "Google"
+ "answer": "Google",
+ "risk": "Limited"
},
{
"question": "Will the third-party device administrator be able to grant access to authorized Google personnel upon request?",
- "answer": "N/A"
+ "answer": "Yes",
+ "risk": "Limited"
},
{
"question": "Are any of the following statements true about your device?",
"answer": [
- 3
- ]
+ 2
+ ],
+ "risk": "High"
},
{
"question": "Which of the following statements are true about this device?",
"answer": [
- 5
- ]
+ 0,
+ 1
+ ],
+ "risk": "High"
},
{
"question": "Does the network protocol assure server-to-client identity verification?",
- "answer": "Yes"
+ "answer": "No",
+ "risk": "High"
},
{
"question": "Click the statements that best describe the characteristics of this device.",
"answer": [
- 5
- ]
+ 2
+ ],
+ "risk": "High"
},
{
"question": "Are any of the following statements true about this device?",
"answer": [
- 6
- ]
+ 0
+ ],
+ "risk": "High"
},
{
"question": "Comments",
diff --git a/testing/unit/risk_profile/profiles/risk_profile_valid_limited.json b/testing/unit/risk_profile/profiles/risk_profile_valid_limited.json
index fba02d4ba..09905fe1d 100644
--- a/testing/unit/risk_profile/profiles/risk_profile_valid_limited.json
+++ b/testing/unit/risk_profile/profiles/risk_profile_valid_limited.json
@@ -1,52 +1,61 @@
{
- "name": "Primary profile",
- "version": "1.3-alpha",
- "created": "2024-07-01",
+ "name": "Primary Profile Limited Risk",
+ "version": "1.4-a",
+ "created": "2024-10-01",
"status": "Valid",
+ "risk": "Limited",
"questions": [
{
"question": "What type of device is this?",
- "answer": "Sensor - Lighting"
+ "answer": "Controller - Lighting",
+ "risk": "Limited"
},
{
"question": "How will this device be used at Google?",
- "answer": "sakjdhaskjdh"
+ "answer": "Controlling Lights"
},
{
"question": "Is this device going to be managed by Google or a third party?",
- "answer": "Google"
+ "answer": "Google",
+ "risk": "Limited"
},
{
"question": "Will the third-party device administrator be able to grant access to authorized Google personnel upon request?",
- "answer": "N/A"
+ "answer": "N/A",
+ "risk": "Limited"
},
{
"question": "Are any of the following statements true about your device?",
"answer": [
3
- ]
+ ],
+ "risk": "Limited"
},
{
"question": "Which of the following statements are true about this device?",
"answer": [
5
- ]
+ ],
+ "risk": "Limited"
},
{
"question": "Does the network protocol assure server-to-client identity verification?",
- "answer": "Yes"
+ "answer": "Yes",
+ "risk": "Limited"
},
{
"question": "Click the statements that best describe the characteristics of this device.",
"answer": [
5
- ]
+ ],
+ "risk": "Limited"
},
{
"question": "Are any of the following statements true about this device?",
"answer": [
6
- ]
+ ],
+ "risk": "Limited"
},
{
"question": "Comments",
diff --git a/testing/unit/risk_profile/risk_profile_test.py b/testing/unit/risk_profile/risk_profile_test.py
index 23cce43d3..ab5f8f1f2 100644
--- a/testing/unit/risk_profile/risk_profile_test.py
+++ b/testing/unit/risk_profile/risk_profile_test.py
@@ -15,6 +15,7 @@
import unittest
import os
import json
+import sys
from risk_profile import RiskProfile
SECONDS_IN_YEAR = 31536000
@@ -35,9 +36,9 @@ class RiskProfileTest(unittest.TestCase):
def setUpClass(cls):
# Create the output directories and ignore errors if it already exists
os.makedirs(OUTPUT_DIR, exist_ok=True)
- with open(os.path.join('resources',
- 'risk_assessment.json'),
- 'r', encoding='utf-8') as file:
+ with open(os.path.join('resources', 'risk_assessment.json'),
+ 'r',
+ encoding='utf-8') as file:
cls.profile_format = json.loads(file.read())
def risk_profile_high_test(self):
@@ -81,7 +82,6 @@ def risk_profile_rename_test(self):
with open(risk_profile_path, 'r', encoding='utf-8') as file:
risk_profile_json = json.loads(file.read())
-
# Create the RiskProfile object from the json file
risk_profile = RiskProfile(risk_profile_json, self.profile_format)
@@ -158,6 +158,7 @@ def risk_profile_update_risk_test(self):
# Risk should now be limited after update
self.assertEqual(risk_profile.risk, 'Limited')
+
if __name__ == '__main__':
suite = unittest.TestSuite()
@@ -169,4 +170,9 @@ def risk_profile_update_risk_test(self):
suite.addTest(RiskProfileTest('risk_profile_update_risk_test'))
runner = unittest.TextTestRunner()
- runner.run(suite)
+ test_result = runner.run(suite)
+
+ # Check if the tests failed and exit with the appropriate code
+ if not test_result.wasSuccessful():
+ sys.exit(1) # Return a non-zero exit code for failures
+ sys.exit(0) # Return zero for success
diff --git a/testing/unit/run.sh b/testing/unit/run.sh
old mode 100644
new mode 100755
index 72ca9dcb0..90f51ac52
--- a/testing/unit/run.sh
+++ b/testing/unit/run.sh
@@ -14,4 +14,59 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-sudo docker run --rm -it --name unit-test testrun/unit-test /bin/bash ./run_tests.sh
\ No newline at end of file
+# Must be run from the root directory of Testrun
+run_test() {
+ local MODULE_NAME=$1
+ shift
+ local DIRS=("$@")
+
+ # Define the locations of the unit test files
+ local UNIT_TEST_DIR_SRC="$PWD/testing/unit/$MODULE_NAME"
+ local UNIT_TEST_FILE_SRC="$UNIT_TEST_DIR_SRC/${MODULE_NAME}_module_test.py"
+
+ # Define the location in the container to
+ # load the unit test files
+ local UNIT_TEST_DIR_DST="/testing/unit/$MODULE_NAME"
+ local UNIT_TEST_FILE_DST="/testrun/python/src/module_test.py"
+
+ # Build the docker run command
+ local DOCKER_CMD="sudo docker run --rm -it --name ${MODULE_NAME}-unit-test"
+
+
+ # Add volume mounts for the main test file
+ DOCKER_CMD="$DOCKER_CMD -v $UNIT_TEST_FILE_SRC:$UNIT_TEST_FILE_DST"
+
+ # Add volume mounts for additional directories
+ for DIR in "${DIRS[@]}"; do
+ DOCKER_CMD="$DOCKER_CMD -v $UNIT_TEST_DIR_SRC/$DIR:$UNIT_TEST_DIR_DST/$DIR"
+ done
+
+ # Add the container image and entry point
+ DOCKER_CMD="$DOCKER_CMD testrun/${MODULE_NAME}-test $UNIT_TEST_FILE_DST"
+
+ # Execute the docker command
+ eval $DOCKER_CMD
+}
+
+# Run all test module tests from within their containers
+run_test "conn" "captures" "ethtool" "output"
+run_test "dns" "captures" "reports" "output"
+run_test "ntp" "captures" "reports" "output"
+run_test "protocol" "captures" "output"
+run_test "services" "reports" "results" "output"
+run_test "tls" "captures" "CertAuth" "certs" "reports" "root_certs" "output"
+
+# Activate Python virtual environment
+source venv/bin/activate
+
+# Add the framework sources
+PYTHONPATH="$PWD/framework/python/src:$PWD/framework/python/src/common"
+
+# Set the python path with all sources
+export PYTHONPATH
+
+# Run all host level unit tests from within the venv
+python3 testing/unit/risk_profile/risk_profile_test.py
+python3 testing/unit/report/report_test.py
+
+deactivate
\ No newline at end of file
diff --git a/testing/unit/run_report_test.sh b/testing/unit/run_report_test.sh
new file mode 100644
index 000000000..49f4ca6c2
--- /dev/null
+++ b/testing/unit/run_report_test.sh
@@ -0,0 +1,67 @@
+#!/bin/bash -e
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Must be run from the root directory of Testrun
+run_test(){
+
+ local REPORT_TEST_FILE=$1
+
+ # Activate Python virtual environment
+ source venv/bin/activate
+
+ # Add the framework sources
+ PYTHONPATH="$PWD/framework/python/src:$PWD/framework/python/src/common"
+
+ # Set the python path with all sources
+ export PYTHONPATH
+
+ # Temporarily disable 'set -e' to capture exit code
+ set +e
+
+ # Run all host level unit tests from within the venv
+ python3 $REPORT_TEST_FILE
+
+ # Capture the exit code
+ local exit_code=$?
+
+ deactivate
+
+ # Return the captured exit code to the caller
+ return $exit_code
+}
+
+
+# Check if the script received any arguments
+if [[ $# -lt 1 ]]; then
+ echo "Usage: $0 "
+ exit 1
+fi
+
+# Call the run_test function with the provided arguments
+run_test "$@"
+
+# Capture the exit code from the run_test function
+exit_code=$?
+
+# If the exit code is not zero, print an error message
+if [ $exit_code -ne 0 ]; then
+ echo "Tests failed with exit code $exit_code"
+else
+ echo "All tests passed successfully."
+fi
+
+# Exit with the captured exit code
+exit $exit_code
\ No newline at end of file
diff --git a/testing/unit/run_test_module.sh b/testing/unit/run_test_module.sh
new file mode 100644
index 000000000..8e31e6860
--- /dev/null
+++ b/testing/unit/run_test_module.sh
@@ -0,0 +1,82 @@
+#!/bin/bash -e
+
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Must be run from the root directory of Testrun
+
+# Read the JSON file into a variable
+DEVICE_TEST_PACK=$( [directories...]"
+ exit 1
+fi
+
+# Call the run_test function with the provided arguments
+run_test "$@"
+
+# Capture the exit code from the run_test function
+exit_code=$?
+
+# If the exit code is not zero, print an error message
+if [ $exit_code -ne 0 ]; then
+ echo "Tests failed with exit code $exit_code"
+else
+ echo "All tests passed successfully."
+fi
+
+# Exit with the captured exit code
+exit $exit_code
diff --git a/testing/unit/run_tests.sh b/testing/unit/run_tests.sh
deleted file mode 100644
index 48c667934..000000000
--- a/testing/unit/run_tests.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/bash -e
-
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script should be run from within the unit_test directory. If
-# it is run outside this directory, paths will not be resolved correctly.
-
-# Move into the root directory of test-run
-pushd ../../ >/dev/null 2>&1
-
-echo "Root dir: $PWD"
-
-# Add the framework sources
-PYTHONPATH="$PWD/framework/python/src:$PWD/framework/python/src/common"
-
-# Add the test module sources
-PYTHONPATH="$PYTHONPATH:$PWD/modules/test/base/python/src"
-PYTHONPATH="$PYTHONPATH:$PWD/modules/test/conn/python/src"
-PYTHONPATH="$PYTHONPATH:$PWD/modules/test/tls/python/src"
-PYTHONPATH="$PYTHONPATH:$PWD/modules/test/dns/python/src"
-PYTHONPATH="$PYTHONPATH:$PWD/modules/test/services/python/src"
-PYTHONPATH="$PYTHONPATH:$PWD/modules/test/ntp/python/src"
-PYTHONPATH="$PYTHONPATH:$PWD/modules/test/protocol/python/src"
-
-
-# Set the python path with all sources
-export PYTHONPATH
-
-# Run the DHCP Unit tests
-python3 -u $PWD/modules/network/dhcp-1/python/src/grpc_server/dhcp_config_test.py
-python3 -u $PWD/modules/network/dhcp-2/python/src/grpc_server/dhcp_config_test.py
-
-# Run the Conn Module Unit Tests
-python3 -u $PWD/testing/unit/conn/conn_module_test.py
-
-# Run the TLS Module Unit Tests
-python3 -u $PWD/testing/unit/tls/tls_module_test.py
-
-# Run the DNS Module Unit Tests
-python3 -u $PWD/testing/unit/dns/dns_module_test.py
-
-# Run the NMAP Module Unit Tests
-python3 -u $PWD/testing/unit/services/services_module_test.py
-
-# Run the NTP Module Unit Tests
-python3 -u $PWD/testing/unit/ntp/ntp_module_test.py
-
-# Run the Report Unit Tests
-python3 -u $PWD/testing/unit/report/report_test.py
-
-# Run the RiskProfile Unit Tests
-python3 -u $PWD/testing/unit/risk_profile/risk_profile_test.py
-
-# Run the RiskProfile Unit Tests
-python3 -u $PWD/testing/unit/protocol/protocol_module_test.py
-
-popd >/dev/null 2>&1
diff --git a/testing/unit/services/output/services.log b/testing/unit/services/output/services.log
deleted file mode 100644
index 7df3f745b..000000000
--- a/testing/unit/services/output/services.log
+++ /dev/null
@@ -1,6 +0,0 @@
-Jun 17 09:23:01 test_services INFO Module report generated at: testing/unit/services/output/services_report.html
-Jun 17 09:23:01 test_services INFO Module report generated at: testing/unit/services/output/services_report.html
-Jun 17 09:23:01 test_services INFO Module report generated at: testing/unit/services/output/services_report.html
-Jun 17 09:32:48 test_services INFO Module report generated at: testing/unit/services/output/services_report.html
-Jun 17 09:32:48 test_services INFO Module report generated at: testing/unit/services/output/services_report.html
-Jun 17 09:32:48 test_services INFO Module report generated at: testing/unit/services/output/services_report.html
diff --git a/testing/unit/services/reports/services_report_all_closed_local.html b/testing/unit/services/reports/services_report_all_closed_local.html
index a726762d4..356a82d35 100644
--- a/testing/unit/services/reports/services_report_all_closed_local.html
+++ b/testing/unit/services/reports/services_report_all_closed_local.html
@@ -1,4 +1,4 @@
-