Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .github/workflows/L2-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,11 @@ jobs:
&&
sudo cmake --install build/rdkservices

- name: Regenerate bundles for cgroupv2 compatibility
working-directory: Dobby/tests/L2_testing/test_runner/bundle/
run: |
python3 regenerate_bundles.py

- name: Run the l2 test
working-directory: Dobby/tests/L2_testing/test_runner/
run: |
Expand All @@ -234,6 +239,7 @@ jobs:
-d $GITHUB_WORKSPACE
&&
lcov
--ignore-errors unused
-r coverage.info
Comment thread
Sonajeya31 marked this conversation as resolved.
'/usr/include/*'
'*/tests/L1_testing/*'
Expand All @@ -254,3 +260,4 @@ jobs:
DobbyL2TestResults.json
l2coverage
if-no-files-found: warn

Original file line number Diff line number Diff line change
Expand Up @@ -401,3 +401,4 @@ static const char* ociJsonTemplate = R"JSON(
{{/ENABLE_RDK_PLUGINS}}
}
)JSON";

Original file line number Diff line number Diff line change
Expand Up @@ -412,3 +412,4 @@ static const char* ociJsonTemplate = R"JSON(
{{/ENABLE_RDK_PLUGINS}}
}
)JSON";

2 changes: 0 additions & 2 deletions client/tool/source/Main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,6 @@ void containerStopCallback(int32_t cd, const std::string &containerId,
if (state == IDobbyProxyEvents::ContainerState::Stopped && containerId == *id)
{
AI_LOG_INFO("Container %s has stopped", containerId.c_str());
std::lock_guard<std::mutex> locker(gLock);
promise.set_value();
}
Comment thread
Sonajeya31 marked this conversation as resolved.
Comment on lines 100 to 104
}
Expand All @@ -120,7 +119,6 @@ void containerWaitCallback(int32_t cd, const std::string &containerId,
if (state == wp->state && containerId == wp->containerId)
{
AI_LOG_INFO("Wait complete");
std::lock_guard<std::mutex> locker(gLock);
promise.set_value();
}
}
Expand Down
25 changes: 16 additions & 9 deletions tests/L2_testing/test_runner/annotation_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,17 +53,23 @@ def test_container(container_id, expected_output):
"""
test_utils.print_log("Running %s container test" % container_id, test_utils.Severity.debug)

with test_utils.untar_bundle(container_id) as bundle_path:
command = ["DobbyTool",
"start",
container_id,
bundle_path]
spec_path = test_utils.get_container_spec_path(container_id)

command = ["DobbyTool",
"start",
container_id,
spec_path]

status = test_utils.run_command_line(command)
if "started '" + container_id + "' container" not in status.stdout:
return False, "Container did not launch successfully"

status = test_utils.run_command_line(command)
if "started '" + container_id + "' container" not in status.stdout:
return False, "Container did not launch successfully"
result = validate_annotation(container_id, expected_output)

return validate_annotation(container_id, expected_output)
# Stop the container after the test
test_utils.dobby_tool_command("stop", container_id)

return result


def validate_annotation(container_id, expected_output):
Expand Down Expand Up @@ -126,3 +132,4 @@ def validate_annotation(container_id, expected_output):
if __name__ == "__main__":
test_utils.parse_arguments(__file__, True)
execute_test()

26 changes: 18 additions & 8 deletions tests/L2_testing/test_runner/basic_sanity_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,9 @@ def execute_test():
# Test 2
test = tests[2]
stop_dobby_daemon()
result = read_asynchronous(subproc, test.expected_output, 5)
# Some platforms do not emit a deterministic "stopped" log line.
# Verify stop by process absence instead.
result = not check_if_process_present(tests[3].expected_output)
Comment thread
Sonajeya31 marked this conversation as resolved.
output = test_utils.create_simple_test_output(test, result)
output_table.append(output)
test_utils.print_single_result(output)
Expand All @@ -85,17 +87,23 @@ def execute_test():
return test_utils.count_print_results(output_table)


# we need to do this asynchronous as if there is no such string we would end in endless loop
# Uses select() for a true timeout instead of threads — no lingering readers.
# Reads raw bytes via os.read() to avoid Python TextIOWrapper buffering that
# can desynchronise from select()'s kernel-level readiness checks.
def read_asynchronous(proc, string_to_find, timeout):
"""Reads asynchronous from process. Ends when found string or timeout occurred.
"""Reads from process stderr with a real timeout using select().

Unlike a threaded approach, this cannot leak a blocked reader: select()
returns when data is available *or* when the timeout expires, so the
caller always regains control promptly.

Comment on lines +90 to 99
Parameters:
proc (process): process in which we want to read
string_to_find (string): what we want to find in process
proc (process): process whose stderr we read
string_to_find (string): what we want to find in process output
timeout (float): how long we should wait if string not found (seconds)

Returns:
found (bool): True if found string_to_find inside proc.
found (bool): True if string_to_find was found in proc stderr.

"""

Expand Down Expand Up @@ -190,11 +198,13 @@ def stop_dobby_daemon():
"""

test_utils.print_log("Stopping Dobby Daemon", test_utils.Severity.debug)
subproc = test_utils.run_command_line(["sudo", "pkill", "DobbyDaemon"])
sleep(0.2)
subproc = test_utils.run_command_line(["sudo", "pkill", "-9", "DobbyDaemon"])
sleep(1) # Give process time to fully terminate and be reaped
return subproc


if __name__ == "__main__":
test_utils.parse_arguments(__file__, True)
execute_test()


183 changes: 183 additions & 0 deletions tests/L2_testing/test_runner/bundle/regenerate_bundles.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,183 @@
#!/usr/bin/env python3
"""
Script to regenerate L2 test bundles for cgroupv2 compatibility.

This script:
1. Extracts each .tar.gz bundle
2. Patches config.json to remove cgroupv2-incompatible settings
3. Repacks the bundle

Changes made for cgroupv2 compatibility:
- Removes 'swappiness' from memory resources (not supported in cgroupv2)
- Sets realtimeRuntime and realtimePeriod to valid values or removes them
- Updates rootfsPropagation to 'slave' for better compatibility
Comment thread
Sonajeya31 marked this conversation as resolved.
Comment thread
Sonajeya31 marked this conversation as resolved.
Comment thread
Sonajeya31 marked this conversation as resolved.
Comment thread
Sonajeya31 marked this conversation as resolved.
Comment thread
Sonajeya31 marked this conversation as resolved.
"""

import json
import shutil
import sys
import tarfile
from pathlib import Path


def patch_config_for_cgroupv2(config: dict, bundle_name: str = "") -> dict:
"""Patch OCI config.json for cgroupv2 compatibility."""

# Remove swappiness from memory resources (not supported in cgroupv2)
if 'linux' in config and 'resources' in config['linux']:
resources = config['linux']['resources']

if 'memory' in resources:
memory = resources['memory']
if 'swappiness' in memory:
del memory['swappiness']
print(" - Removed 'swappiness' from memory resources")

# Fix cpu realtime settings - remove null values
if 'cpu' in resources:
cpu = resources['cpu']
if cpu.get('realtimeRuntime') is None:
del cpu['realtimeRuntime']
print(" - Removed null 'realtimeRuntime'")
if cpu.get('realtimePeriod') is None:
del cpu['realtimePeriod']
Comment thread
Sonajeya31 marked this conversation as resolved.
print(" - Removed null 'realtimePeriod'")
# Remove cpu section entirely if empty
if not cpu:
del resources['cpu']
print(" - Removed empty 'cpu' section")

# Remove rootfsPropagation entirely - it causes "make rootfs private" errors
# in user namespace environments like GitHub Actions
if 'linux' in config and 'rootfsPropagation' in config['linux']:
del config['linux']['rootfsPropagation']
print(" - Removed linux.rootfsPropagation")

# Remove top-level rootfsPropagation as well
if 'rootfsPropagation' in config:
del config['rootfsPropagation']
print(" - Removed top-level rootfsPropagation")
Comment thread
Sonajeya31 marked this conversation as resolved.

# Remove user namespace - causes issues in GitHub Actions which already uses user namespaces
if 'linux' in config:
# Remove uidMappings and gidMappings
if 'uidMappings' in config['linux']:
del config['linux']['uidMappings']
print(" - Removed uidMappings")
if 'gidMappings' in config['linux']:
del config['linux']['gidMappings']
print(" - Removed gidMappings")

# Remove 'user' from namespaces list
if 'namespaces' in config['linux']:
namespaces = config['linux']['namespaces']
original_len = len(namespaces)
config['linux']['namespaces'] = [ns for ns in namespaces if ns.get('type') != 'user']
if len(config['linux']['namespaces']) < original_len:
print(" - Removed 'user' namespace")

# Fix filelogging bundle - needs terminal: true for logging plugin to capture stdout
if 'filelogging' in bundle_name:
if 'process' in config:
if not config['process'].get('terminal', False):
config['process']['terminal'] = True
print(" - Set 'terminal' to true for logging plugin stdout capture")

return config


def process_bundle(bundle_tarball: Path, backup: bool = True):
"""Extract, patch, and repack a bundle tarball."""

print(f"\nProcessing: {bundle_tarball.name}")

bundle_dir = bundle_tarball.parent
bundle_name = bundle_tarball.name.replace('.tar.gz', '')
extract_path = bundle_dir / bundle_name

# Backup original
if backup:
backup_path = bundle_tarball.with_suffix('.tar.gz.bak')
Comment thread
Sonajeya31 marked this conversation as resolved.
Comment thread
Sonajeya31 marked this conversation as resolved.
Comment thread
Sonajeya31 marked this conversation as resolved.
if not backup_path.exists():
shutil.copy2(bundle_tarball, backup_path)
print(f" Backed up to: {backup_path.name}")

Comment thread
Sonajeya31 marked this conversation as resolved.
# Clean up any stale extraction directory left behind by a prior run
# to avoid mixing old files into the repacked bundle.
if extract_path.exists():
print(f" Removing stale extraction directory: {extract_path.name}")
shutil.rmtree(extract_path)

# Extract (with path-traversal protection)
print(f" Extracting...")
with tarfile.open(bundle_tarball, 'r:gz') as tar:
# Reject members that escape the target directory via absolute paths
# or '..' components to prevent path-traversal attacks.
for member in tar.getmembers():
member_path = (bundle_dir / member.name).resolve()
if not str(member_path).startswith(str(bundle_dir.resolve())):
raise RuntimeError(
f"Tarball member '{member.name}' would escape extraction "
f"directory '{bundle_dir}' — aborting for safety"
)
tar.extractall(path=bundle_dir)

# Find and patch config.json
config_path = extract_path / 'config.json'
if not config_path.exists():
print(f" ERROR: config.json not found at {config_path}")
return False

print(f" Patching config.json...")
with open(config_path, 'r') as f:
config = json.load(f)

patched_config = patch_config_for_cgroupv2(config, bundle_name)

with open(config_path, 'w') as f:
json.dump(patched_config, f, indent=4)

# Repack
print(f" Repacking...")
with tarfile.open(bundle_tarball, 'w:gz') as tar:
tar.add(extract_path, arcname=bundle_name)

# Cleanup extracted folder
shutil.rmtree(extract_path)
print(f" Done!")

return True


def main():
bundle_dir = Path(__file__).parent

# Find all bundle tarballs
bundles = list(bundle_dir.glob('*_bundle.tar.gz'))

if not bundles:
print("No bundles found!")
return 1

print(f"Found {len(bundles)} bundles to process:")
for b in bundles:
print(f" - {b.name}")

# Process each bundle
success_count = 0
for bundle in bundles:
try:
if process_bundle(bundle):
success_count += 1
except Exception as e:
print(f" ERROR processing {bundle.name}: {e}")

print(f"\n{'='*50}")
print(f"Processed {success_count}/{len(bundles)} bundles successfully")

return 0 if success_count == len(bundles) else 1


if __name__ == '__main__':
sys.exit(main())

Loading
Loading