diff --git a/.gitignore b/.gitignore index dbbbbc3..e6037df 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# Generated certificate chain (private keys – never commit) +examples/certs/ + # Byte-compiled / optimized / DLL files *__pycache__/* *.py[cod] @@ -169,3 +172,6 @@ doc/_build/ gpsd_fake.py src\flexstack\facilities\local_dynamic_map\todo.txt storage.json +*.pdf +*.txt +acceptance_tests/ \ No newline at end of file diff --git a/examples/cam_sender_and_receiver.py b/examples/cam_sender_and_receiver.py index 511a2bc..10e3843 100644 --- a/examples/cam_sender_and_receiver.py +++ b/examples/cam_sender_and_receiver.py @@ -173,7 +173,7 @@ def ldm_subscription_callback(data: RequestDataObjectsResp) -> None: ) location_service.add_callback( ca_basic_service.cam_transmission_management.location_service_callback) - + ca_basic_service.start() # Instantiate a Link Layer btp_router.freeze_callbacks() @@ -188,6 +188,7 @@ def ldm_subscription_callback(data: RequestDataObjectsResp) -> None: except KeyboardInterrupt: print("Exiting...") + ca_basic_service.stop() location_service.stop_event.set() location_service.location_service_thread.join() link_layer.sock.close() diff --git a/examples/generate_certificate_chain.py b/examples/generate_certificate_chain.py new file mode 100644 index 0000000..4006f16 --- /dev/null +++ b/examples/generate_certificate_chain.py @@ -0,0 +1,299 @@ +""" +Generate a C-ITS certificate chain: Root CA → Authorization Authority → two Authorization Tickets. + +The script creates four certificates that form the chain of trust required to sign and +verify C-ITS (ITS-S) messages between two independent ITS stations: + +- **Root Certificate Authority (Root CA)**: self-signed, grants all issue permissions + with a chain depth of 2 so it can issue AA certificates. +- **Authorization Authority (AA)**: issued by the Root CA, grants issue permissions for + CAM (psid 36), DENM (psid 37) and VAM (psid 638) with a chain depth of 1 so it can + issue AT certificates. +- **Authorization Ticket 1 (AT1)**: issued by the AA for the first ITS station. +- **Authorization Ticket 2 (AT2)**: issued by the AA for the second ITS station. + +Both ATs share the same Root CA and AA so they trust each other's signed messages. +Each AT is the end-entity certificate used by one ITS station to sign outgoing messages. + +Each certificate is saved to a ``certs/`` sub-directory of the directory where this +script lives. Two files are created for each entity: + +- ``.cert`` – OER-encoded certificate (EtsiTs103097Certificate format). +- ``.pem`` – EC private key in PEM format (PKCS #8 / SEC 1 encoding as produced + by the *ecdsa* library). + +These files are consumed by the :mod:`secured_cam_sender_and_receiver` example which +accepts a ``--at {1,2}`` flag to select which AT the station uses for signing. + +Usage:: + + python examples/generate_certificate_chain.py + +References +---------- +- ETSI TS 103 097 V2.1.1 – Security header and certificate formats for ITS +- ETSI EN 302 636-4-1 V1.4.1 – GeoNetworking +""" + +import os +import sys +import time + +# Ensure ../src (relative to this file) is on PYTHONPATH so local modules can be imported +_this_dir = os.path.dirname(os.path.abspath(__file__)) +_src_dir = os.path.normpath(os.path.join(_this_dir, "..", "src")) +if _src_dir not in sys.path: + sys.path.insert(0, _src_dir) + +from flexstack.security.certificate import OwnCertificate +from flexstack.security.ecdsa_backend import PythonECDSABackend + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- + +# ITS-AID / PSID values covered by the generated certificates +PSID_CAM = 36 +PSID_DENM = 37 +PSID_VAM = 638 + +# Duration choice and value for the certificate validity period. +# Uses the "years" choice of the ETSI TS 103 097 Duration CHOICE type, which accepts +# a Uint16 (0–65535) representing the number of years. +VALIDITY_DURATION = ("years", 10) + +# ITS epoch: 2004-01-01 00:00:00 UTC in Unix seconds. +# ValidityPeriod.start is a Time32 counting seconds *since* this epoch. +ITS_EPOCH = 1072915200 + +# CRACA identifier – must be 0x000000 per §6 of ETSI TS 103 097 (not revoked via 1609.2 mechanisms) +CRACA_ID = b"\x00\x00\x00" + +# Directory where certificate and key files are written +CERTS_DIR = os.path.join(_this_dir, "certs") + + +# --------------------------------------------------------------------------- +# Helper – TBS certificate dictionaries +# --------------------------------------------------------------------------- + +def _current_its_time() -> int: + """ + Return the current UTC time as a Time32 value (seconds since the ITS epoch). + """ + return int(time.time()) - ITS_EPOCH + + +def _make_root_ca_tbs() -> dict: + """ + Return the ToBeSignedCertificate dict for a Root Certificate Authority. + + The Root CA is self-signed and grants *all* subject issue permissions with a + ``minChainLength`` of 2, which allows it to issue authorization authority + certificates (chain depth 1) that can in turn issue authorization tickets. + """ + return { + "id": ("name", "root-ca.example"), + "cracaId": CRACA_ID, + "crlSeries": 0, + "validityPeriod": {"start": _current_its_time(), "duration": VALIDITY_DURATION}, + "certIssuePermissions": [ + { + "subjectPermissions": ("all", None), + "minChainLength": 2, + "chainLengthRange": 0, + "eeType": (b"\x00", 1), + } + ], + "verifyKeyIndicator": ( + "verificationKey", + ("ecdsaNistP256", ("fill", None)), + ), + } + + +def _make_aa_tbs() -> dict: + """ + Return the ToBeSignedCertificate dict for an Authorization Authority. + + The AA is issued by the Root CA via :func:`OwnCertificate.initialize_certificate`. + It carries explicit issue permissions for CAM, DENM and VAM PSIDs so that only + authorization tickets for those services can be issued from this AA. The + ``minChainLength`` of 1 means the AA can issue end-entity (AT) certificates. + """ + return { + "id": ("name", "aa.example"), + "cracaId": CRACA_ID, + "crlSeries": 0, + "validityPeriod": {"start": _current_its_time(), "duration": VALIDITY_DURATION}, + "certIssuePermissions": [ + { + "subjectPermissions": ( + "explicit", + [ + {"psid": PSID_CAM}, + {"psid": PSID_DENM}, + {"psid": PSID_VAM}, + ], + ), + "minChainLength": 1, + "chainLengthRange": 0, + "eeType": (b"\x00", 1), + } + ], + "verifyKeyIndicator": ( + "verificationKey", + ("ecdsaNistP256", ("fill", None)), + ), + } + + +def _make_at_tbs(index: int) -> dict: + """ + Return the ToBeSignedCertificate dict for an Authorization Ticket. + + The AT is the end-entity certificate used by an ITS station to sign outgoing + messages. It carries *application* permissions (``appPermissions``) listing the + ITS-AID / PSID values that messages signed with this certificate may carry. + + Parameters + ---------- + index : int + Station index (1 or 2) embedded in the certificate id to distinguish + the two authorization tickets issued from the same AA. + """ + return { + "id": ("none", None), + "cracaId": CRACA_ID, + "crlSeries": 0, + "validityPeriod": {"start": _current_its_time(), "duration": VALIDITY_DURATION}, + "appPermissions": [ + {"psid": PSID_CAM}, + {"psid": PSID_DENM}, + {"psid": PSID_VAM}, + ], + "verifyKeyIndicator": ( + "verificationKey", + ("ecdsaNistP256", ("fill", None)), + ), + } + + +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- + +def main() -> None: + """ + Generate the certificate chain and write the resulting files to ``certs/``. + + The four certificates (Root CA, AA, AT1, AT2) are generated in order. Each + one is signed either by itself (Root CA) or by the preceding certificate in + the chain. After generation the OER-encoded certificate bytes and the + PEM-encoded private key are written to disk. + + AT1 and AT2 share the same Root CA and AA so either station can verify + messages sent by the other. + """ + os.makedirs(CERTS_DIR, exist_ok=True) + + backend = PythonECDSABackend() + + # ------------------------------------------------------------------ + # 1. Root CA – self-signed + # ------------------------------------------------------------------ + print("Generating Root CA certificate...") + root_ca_tbs = _make_root_ca_tbs() + root_ca = OwnCertificate.initialize_certificate( + backend=backend, + to_be_signed_certificate=root_ca_tbs, + issuer=None, # self-signed + ) + assert root_ca.verify(backend), "Root CA certificate failed self-verification" + + _write_cert(root_ca, backend, "root_ca") + print(f" -> certs/root_ca.cert ({len(root_ca.encode())} bytes)") + print(f" -> certs/root_ca.pem") + + # ------------------------------------------------------------------ + # 2. Authorization Authority – issued by the Root CA + # ------------------------------------------------------------------ + print("Generating Authorization Authority certificate...") + aa_tbs = _make_aa_tbs() + aa = OwnCertificate.initialize_certificate( + backend=backend, + to_be_signed_certificate=aa_tbs, + issuer=root_ca, + ) + assert aa.verify(backend), "AA certificate failed verification" + + _write_cert(aa, backend, "aa") + print(f" -> certs/aa.cert ({len(aa.encode())} bytes)") + print(f" -> certs/aa.pem") + + # ------------------------------------------------------------------ + # 3. Authorization Ticket 1 – issued by the AA (station 1) + # ------------------------------------------------------------------ + print("Generating Authorization Ticket 1 certificate...") + at1_tbs = _make_at_tbs(index=1) + at1_cert = OwnCertificate.initialize_certificate( + backend=backend, + to_be_signed_certificate=at1_tbs, + issuer=aa, + ) + assert at1_cert.verify(backend), "AT1 certificate failed verification" + + _write_cert(at1_cert, backend, "at1") + print(f" -> certs/at1.cert ({len(at1_cert.encode())} bytes)") + print(f" -> certs/at1.pem") + + # ------------------------------------------------------------------ + # 4. Authorization Ticket 2 – issued by the AA (station 2) + # ------------------------------------------------------------------ + print("Generating Authorization Ticket 2 certificate...") + at2_tbs = _make_at_tbs(index=2) + at2_cert = OwnCertificate.initialize_certificate( + backend=backend, + to_be_signed_certificate=at2_tbs, + issuer=aa, + ) + assert at2_cert.verify(backend), "AT2 certificate failed verification" + + _write_cert(at2_cert, backend, "at2") + print(f" -> certs/at2.cert ({len(at2_cert.encode())} bytes)") + print(f" -> certs/at2.pem") + + print("\nCertificate chain generated successfully.") + print(f"Files are located in: {CERTS_DIR}") + print() + print("Run the example with:") + print(" Terminal 1: python examples/secured_cam_sender_and_receiver.py --at 1") + print(" Terminal 2: python examples/secured_cam_sender_and_receiver.py --at 2") + + +def _write_cert(cert: OwnCertificate, backend: PythonECDSABackend, name: str) -> None: + """ + Persist a certificate and its associated private key to files. + + Parameters + ---------- + cert : OwnCertificate + The certificate to persist. + backend : PythonECDSABackend + The ECDSA backend that holds the private key referenced by + ``cert.key_id``. + name : str + Base name used for the output files (without extension). The + certificate is written to ``.cert`` and the private key to + ``.pem`` inside :data:`CERTS_DIR`. + """ + cert_path = os.path.join(CERTS_DIR, f"{name}.cert") + key_path = os.path.join(CERTS_DIR, f"{name}.pem") + with open(cert_path, "wb") as f: + f.write(cert.encode()) + with open(key_path, "wb") as f: + f.write(backend.export_signing_key(cert.key_id)) + + +if __name__ == "__main__": + main() diff --git a/examples/secured_cam_sender_and_receiver.py b/examples/secured_cam_sender_and_receiver.py new file mode 100644 index 0000000..d55ad21 --- /dev/null +++ b/examples/secured_cam_sender_and_receiver.py @@ -0,0 +1,559 @@ +""" +Send and receive CAM (Cooperative Awareness Messages) with C-ITS security. + +This example is an extension of :mod:`cam_sender_and_receiver` that enables the +GeoNetworking security layer so that every outgoing CAM is ETSI TS 103 097-signed +and every incoming secured packet is verified before being delivered to the upper +layers. + +Prerequisites +------------- +Run :mod:`generate_certificate_chain` first to create the certificate files:: + + python examples/generate_certificate_chain.py + +The script expects the following files inside an ``examples/certs/`` directory +(relative to this file): + +- ``root_ca.cert`` / ``root_ca.pem`` – Root CA OER certificate and private key. +- ``aa.cert`` / ``aa.pem`` – Authorization Authority OER cert and key. +- ``at1.cert`` / ``at1.pem`` – Authorization Ticket for station 1. +- ``at2.cert`` / ``at2.pem`` – Authorization Ticket for station 2. + +If any file is missing the script prints an informative message and exits. + +Two-station setup +----------------- +Run two terminals simultaneously to see cross-station CAM verification:: + + # Terminal 1 + python examples/secured_cam_sender_and_receiver.py --at 1 + + # Terminal 2 + python examples/secured_cam_sender_and_receiver.py --at 2 + +Each instance signs outgoing CAMs with its own AT while keeping both AT1 and AT2 +in its :class:`~flexstack.security.certificate_library.CertificateLibrary` as +*known* authorization tickets. This allows every station to verify messages from +either peer, regardless of whether the message carries a full certificate or only +a digest signer. + +Architecture +------------ +The security objects are wired into the GeoNetworking router as follows:: + + CertificateLibrary (trusted root CA + AA, own ATn, known AT1 + AT2) ─┐ + ↓ │ + SignService ─────────────────────────────────────────────────┤ + GNRouter (sign_service, verify_service) + VerifyService ─────────────────────────────────────────────────┘ + +References +---------- +- ETSI TS 103 097 V2.1.1 – Security header and certificate formats for ITS +- ETSI EN 302 636-4-1 V1.4.1 – GeoNetworking (SECURED_PACKET handling) +- ETSI TS 102 723-8 V1.1.1 – ITS-S security services (SN-SAP) +""" + +import os +import sys + +# Ensure ../src (relative to this file) is on PYTHONPATH so local modules can be imported +_this_dir = os.path.dirname(os.path.abspath(__file__)) +_src_dir = os.path.normpath(os.path.join(_this_dir, "..", "src")) +if _src_dir not in sys.path: + sys.path.insert(0, _src_dir) + +import argparse +import datetime +import math +import random +import time +import logging + +from flexstack.facilities.local_dynamic_map.ldm_classes import ComparisonOperators +from flexstack.facilities.ca_basic_service.cam_transmission_management import VehicleData +from flexstack.facilities.ca_basic_service.ca_basic_service import CooperativeAwarenessBasicService +from flexstack.facilities.local_dynamic_map.ldm_constants import CAM +from flexstack.facilities.local_dynamic_map.ldm_classes import ( + AccessPermission, + Circle, + Filter, + FilterStatement, + GeometricArea, + Location, + OrderTupleValue, + OrderingDirection, + SubscribeDataobjectsReq, + SubscribeDataObjectsResp, + RegisterDataConsumerReq, + RegisterDataConsumerResp, + RequestDataObjectsResp, + SubscribeDataobjectsResult, + TimestampIts, +) +from flexstack.facilities.local_dynamic_map.factory import LDMFactory +from flexstack.btp.router import Router as BTPRouter +from flexstack.geonet.gn_address import GNAddress, M, ST, MID +from flexstack.geonet.mib import MIB, GnSecurity +from flexstack.geonet.router import Router as GNRouter +from flexstack.linklayer.raw_link_layer import RawLinkLayer +from flexstack.security.certificate import Certificate, OwnCertificate +from flexstack.security.certificate_library import CertificateLibrary +from flexstack.security.ecdsa_backend import PythonECDSABackend +from flexstack.security.sign_service import SignService +from flexstack.security.verify_service import VerifyService + +logging.basicConfig(level=logging.INFO) + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- + +POSITION_COORDINATES = [41.386931, 2.112104] +CERTS_DIR = os.path.join(_this_dir, "certs") + + +def generate_random_mac_address(locally_administered: bool = True, multicast: bool = False) -> bytes: + """ + Generate a randomized 6-byte MAC address. + + Parameters + ---------- + locally_administered : bool + If *True* (default) the locally-administered bit is set. + multicast : bool + If *True* the multicast bit is set; otherwise the address is unicast. + + Returns + ------- + bytes + Six-byte MAC address. + """ + octets = [random.randint(0x00, 0xFF) for _ in range(6)] + first = octets[0] + if multicast: + first |= 0b00000001 + else: + first &= 0b11111110 + if locally_administered: + first |= 0b00000010 + else: + first &= 0b11111101 + octets[0] = first + return bytes(octets) + + +MAC_ADDRESS = generate_random_mac_address() +STATION_ID = random.randint(1, 2147483647) + + +# --------------------------------------------------------------------------- +# Certificate loading +# --------------------------------------------------------------------------- + +def _cert_path(name: str) -> str: + """Return the absolute path to a certificate file inside :data:`CERTS_DIR`.""" + return os.path.join(CERTS_DIR, name) + + +def _check_cert_files() -> None: + """ + Check that all required certificate files are present. + + Raises + ------ + SystemExit + If any required file is missing, a descriptive message is printed and + the process exits with code 1. + """ + required = [ + "root_ca.cert", "root_ca.pem", + "aa.cert", "aa.pem", + "at1.cert", "at1.pem", + "at2.cert", "at2.pem", + ] + missing = [f for f in required if not os.path.isfile(_cert_path(f))] + if missing: + print("Error: the following certificate files are missing:") + for f in missing: + print(f" {_cert_path(f)}") + print("\nRun the certificate generation script first:") + print(" python examples/generate_certificate_chain.py") + sys.exit(1) + + +def _load_at_as_own( + backend: PythonECDSABackend, index: int, aa: Certificate +) -> OwnCertificate: + """ + Load an Authorization Ticket from disk and return it as an :class:`OwnCertificate`. + + The private key stored in ``at.pem`` is imported into *backend* so + that the returned certificate can be used for signing. + + Parameters + ---------- + backend : PythonECDSABackend + The ECDSA backend that will hold the imported private key. + index : int + AT index (1 or 2) that determines which ``at.cert`` / + ``at.pem`` file pair is read. + aa : Certificate + The Authorization Authority certificate that issued this AT, used to + reconstruct the issuer chain. + + Returns + ------- + OwnCertificate + The AT certificate with a valid ``key_id`` pointing to the imported key. + """ + key_pem = open(_cert_path(f"at{index}.pem"), "rb").read() + key_id = backend.import_signing_key(key_pem) + cert_bytes = open(_cert_path(f"at{index}.cert"), "rb").read() + base = Certificate().decode(cert_bytes, issuer=aa) + return OwnCertificate(certificate=base.certificate, issuer=aa, key_id=key_id) + + +def _load_at_as_known(index: int, aa: Certificate) -> Certificate: + """ + Load an Authorization Ticket from disk as a plain :class:`Certificate`. + + Used to populate the ``known_authorization_tickets`` of the library so that + incoming messages signed with a *digest* signer referencing this AT can be + verified. + + Parameters + ---------- + index : int + AT index (1 or 2) selecting which ``at.cert`` file is read. + aa : Certificate + The Authorization Authority certificate that issued this AT. + + Returns + ------- + Certificate + The AT certificate linked to its issuer chain. + """ + cert_bytes = open(_cert_path(f"at{index}.cert"), "rb").read() + return Certificate().decode(cert_bytes, issuer=aa) + + +def build_security_stack(at_index: int) -> tuple: + """ + Load the certificate chain from disk and construct the security objects. + + Reads the OER certificates (Root CA, AA, AT1, AT2) and the selected AT + private key from :data:`CERTS_DIR` and assembles a + :class:`~flexstack.security.certificate_library.CertificateLibrary`, + a :class:`~flexstack.security.sign_service.SignService` and a + :class:`~flexstack.security.verify_service.VerifyService`. + + Both AT1 and AT2 are registered as *known* authorization tickets so that + this station can verify digest-signed messages from either peer without + waiting for a full-certificate transmission. + + Parameters + ---------- + at_index : int + Which AT (1 or 2) this station uses for signing outgoing messages. + + Returns + ------- + tuple[SignService, VerifyService] + The configured sign and verify services ready to be passed to the + :class:`~flexstack.geonet.router.Router`. + + Raises + ------ + SystemExit + If any certificate file is missing (see :func:`_check_cert_files`). + """ + _check_cert_files() + + backend = PythonECDSABackend() + + # ------------------------------------------------------------------ + # Load Root CA (self-signed) + # ------------------------------------------------------------------ + root_ca_bytes = open(_cert_path("root_ca.cert"), "rb").read() + root_ca = Certificate().decode(root_ca_bytes, issuer=None) + + # ------------------------------------------------------------------ + # Load Authorization Authority (issued by Root CA) + # ------------------------------------------------------------------ + aa_bytes = open(_cert_path("aa.cert"), "rb").read() + aa = Certificate().decode(aa_bytes, issuer=root_ca) + + # ------------------------------------------------------------------ + # Load the signing AT (OwnCertificate with private key) and both ATs + # as known authorization tickets for peer verification. + # ------------------------------------------------------------------ + own_at = _load_at_as_own(backend, at_index, aa) + peer_index = 2 if at_index == 1 else 1 + peer_at = _load_at_as_known(peer_index, aa) + + # ------------------------------------------------------------------ + # Build CertificateLibrary with the full chain of trust. + # - known_authorization_tickets: both AT1 and AT2 (for digest verification) + # - own_certificates: the station's own AT (for signing) + # ------------------------------------------------------------------ + cert_library = CertificateLibrary( + ecdsa_backend=backend, + root_certificates=[root_ca], + aa_certificates=[aa], + at_certificates=[own_at, peer_at], + ) + cert_library.add_own_certificate(own_at) + + sign_service = SignService(backend=backend, certificate_library=cert_library) + verify_service = VerifyService(backend=backend, certificate_library=cert_library, sign_service=sign_service) + + return sign_service, verify_service + + +# --------------------------------------------------------------------------- +# Randomized moving location service +# --------------------------------------------------------------------------- + +class _RandomTrajectoryLocationService: + """ + Thread-based location service that simulates a moving vehicle. + + Updates are emitted every 80 ms (below T_CheckCamGen = 100 ms) so the + CA Basic Service timer always has fresh data. The heading changes by a + random ±5–15 ° on every step, which consistently exceeds the 4 ° Condition-1 + threshold and sustains 10 Hz CAM generation. + """ + + _PERIOD_S: float = 0.08 # 80 ms — below T_CheckCamGen + _BASE_SPEED_MPS: float = 10.0 # ~36 km/h + _EARTH_R: float = 6_371_000.0 + + def __init__(self, start_lat: float, start_lon: float) -> None: + from flexstack.utils.location_service import LocationService + # Reuse the callbacks list from LocationService without inheriting fully + self._callbacks: list = [] + self._lat = start_lat + self._lon = start_lon + self._heading = random.uniform(0.0, 360.0) + self._speed = self._BASE_SPEED_MPS + self.stop_event = __import__('threading').Event() + self.location_service_thread = __import__('threading').Thread( + target=self._run, daemon=True + ) + self.location_service_thread.start() + + def add_callback(self, callback) -> None: + self._callbacks.append(callback) + + def _send(self, tpv: dict) -> None: + for cb in self._callbacks: + cb(tpv) + + def _step(self) -> None: + """Advance the simulated position by one time step.""" + dt = self._PERIOD_S + + # Heading: random signed change guaranteed to exceed the 4 deg threshold + delta = random.uniform(5.0, 15.0) * random.choice((-1, 1)) + self._heading = (self._heading + delta) % 360.0 + + # Speed: small random walk within [5, 20] m/s + self._speed = max(5.0, min(20.0, self._speed + random.uniform(-0.5, 0.5))) + + # Position update (flat-Earth approximation; step sizes are < 2 m) + d = self._speed * dt + heading_r = math.radians(self._heading) + lat_r = math.radians(self._lat) + self._lat += math.degrees(d * math.cos(heading_r) / self._EARTH_R) + self._lon += math.degrees( + d * math.sin(heading_r) / (self._EARTH_R * math.cos(lat_r)) + ) + + def _tpv(self) -> dict: + ts = datetime.datetime.now(datetime.timezone.utc).isoformat()[:-9] + "Z" + return { + "class": "TPV", + "device": "/dev/ttyACM0", + "mode": 3, + "time": ts, + "ept": 0.005, + "lat": self._lat, + "lon": self._lon, + "alt": 0.0, + "epx": 1.0, + "epy": 1.0, + "epv": 5.0, + "track": self._heading, + "speed": self._speed, + "climb": 0.0, + "eps": 0.01, + } + + def _run(self) -> None: + while not self.stop_event.is_set(): + self._step() + self._send(self._tpv()) + time.sleep(self._PERIOD_S) + + +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- + +def main() -> None: + """ + Run the secured CAM sender/receiver loop. + + Parses the ``--at {1,2}`` command-line argument to select which Authorization + Ticket this station uses for signing. Sets up the full ITS-S stack (location + service, GN router with security, BTP router, LDM and CA Basic Service) and then + blocks, sending a CAM every second and printing any received CAMs from the peer + station until a ``KeyboardInterrupt`` is raised. + """ + parser = argparse.ArgumentParser( + description="Secured CAM sender/receiver (select station 1 or 2 via --at)" + ) + parser.add_argument( + "--at", + type=int, + choices=[1, 2], + required=True, + help="Authorization Ticket index for this station (1 or 2)", + ) + args = parser.parse_args() + + print(f"Starting station with AT{args.at}...") + sign_service, verify_service = build_security_stack(args.at) + + # Instantiate a moving location service that drives condition-1 at 10 Hz + location_service = _RandomTrajectoryLocationService( + start_lat=POSITION_COORDINATES[0], + start_lon=POSITION_COORDINATES[1], + ) + + # Instantiate a GN router with security enabled + mib = MIB( + itsGnLocalGnAddr=GNAddress( + m=M.GN_MULTICAST, + st=ST.CYCLIST, + mid=MID(MAC_ADDRESS), + ), + itsGnSecurity=GnSecurity.ENABLED, + ) + gn_router = GNRouter( + mib=mib, + sign_service=sign_service, + verify_service=verify_service, + ) + location_service.add_callback(gn_router.refresh_ego_position_vector) + + # Instantiate a BTP router + btp_router = BTPRouter(gn_router) + gn_router.register_indication_callback(btp_router.btp_data_indication) + + # Instantiate a Local Dynamic Map (LDM) + ldm_location = Location.initializer( + latitude=int(POSITION_COORDINATES[0] * 10 ** 7), + longitude=int(POSITION_COORDINATES[1] * 10 ** 7), + ) + + ldm_area = GeometricArea( + circle=Circle(radius=5000), + rectangle=None, + ellipse=None, + ) + ldm_factory = LDMFactory() + ldm = ldm_factory.create_ldm( + ldm_location, + ldm_maintenance_type="Reactive", + ldm_service_type="Reactive", + ldm_database_type="Dictionary", + ) + location_service.add_callback(ldm_location.location_service_callback) + + # Subscribe to LDM to print received CAMs + register_resp: RegisterDataConsumerResp = ldm.if_ldm_4.register_data_consumer( + RegisterDataConsumerReq( + application_id=CAM, + access_permisions=(AccessPermission.CAM,), + area_of_interest=ldm_area, + ) + ) + if register_resp.result == 2: + sys.exit(1) + + def ldm_subscription_callback(data: RequestDataObjectsResp) -> None: + print( + f'Received CAM from: {data.data_objects[0]["dataObject"]["header"]["stationId"]}' + ) + + subscribe_resp: SubscribeDataObjectsResp = ldm.if_ldm_4.subscribe_data_consumer( + SubscribeDataobjectsReq( + application_id=CAM, + data_object_type=(CAM,), + priority=1, + filter=Filter( + filter_statement_1=FilterStatement( + "header.stationId", + ComparisonOperators.NOT_EQUAL, + STATION_ID, + ) + ), + notify_time=TimestampIts(0), + multiplicity=1, + order=( + OrderTupleValue( + attribute="cam.generationDeltaTime", + ordering_direction=OrderingDirection.ASCENDING, + ), + ), + ), + ldm_subscription_callback, + ) + if subscribe_resp.result != SubscribeDataobjectsResult.SUCCESSFUL: + sys.exit(1) + + # Instantiate a CA Basic Service + vehicle_data = VehicleData( + station_id=STATION_ID, + station_type=5, + drive_direction="forward", + vehicle_length={ + "vehicleLengthValue": 1023, + "vehicleLengthConfidenceIndication": "unavailable", + }, + vehicle_width=62, + ) + ca_basic_service = CooperativeAwarenessBasicService( + btp_router=btp_router, + vehicle_data=vehicle_data, + ldm=ldm, + ) + location_service.add_callback( + ca_basic_service.cam_transmission_management.location_service_callback + ) + + # Instantiate a Link Layer and start the main loop + btp_router.freeze_callbacks() + link_layer = RawLinkLayer( + "lo", MAC_ADDRESS, receive_callback=gn_router.gn_data_indicate + ) + gn_router.link_layer = link_layer + ca_basic_service.start() + + try: + while True: + time.sleep(1) + except KeyboardInterrupt: + print("Exiting...") + + ca_basic_service.stop() + location_service.stop_event.set() + location_service.location_service_thread.join() + link_layer.sock.close() + + +if __name__ == "__main__": + main() diff --git a/examples/secured_vam_sender_and_receiver.py b/examples/secured_vam_sender_and_receiver.py new file mode 100644 index 0000000..66b4339 --- /dev/null +++ b/examples/secured_vam_sender_and_receiver.py @@ -0,0 +1,477 @@ +""" +Send and receive VAM (VRU Awareness Messages) with C-ITS security. + +This example is an extension of :mod:`cam_sender_and_receiver` that enables the +GeoNetworking security layer so that every outgoing VAM is ETSI TS 103 097-signed +and every incoming secured packet is verified before being delivered to the upper +layers. It simulates two pedestrian VRU ITS-Ss exchanging VAMs over a loopback +interface with VBS clustering support enabled. + +Prerequisites +------------- +Run :mod:`generate_certificate_chain` first to create the certificate files:: + + python examples/generate_certificate_chain.py + +The script expects the following files inside an ``examples/certs/`` directory +(relative to this file): + +- ``root_ca.cert`` / ``root_ca.pem`` – Root CA OER certificate and private key. +- ``aa.cert`` / ``aa.pem`` – Authorization Authority OER cert and key. +- ``at1.cert`` / ``at1.pem`` – Authorization Ticket for station 1. +- ``at2.cert`` / ``at2.pem`` – Authorization Ticket for station 2. + +If any file is missing the script prints an informative message and exits. + +Two-station setup +----------------- +Run two terminals simultaneously to see cross-station VAM verification:: + + # Terminal 1 + python examples/secured_vam_sender_and_receiver.py --at 1 + + # Terminal 2 + python examples/secured_vam_sender_and_receiver.py --at 2 + +Each instance signs outgoing VAMs with its own AT while keeping both AT1 and AT2 +in its :class:`~flexstack.security.certificate_library.CertificateLibrary` as +*known* authorization tickets. This allows every station to verify messages from +either peer, regardless of whether the message carries a full certificate or only +a digest signer. + +VBS Clustering +-------------- +Both stations start with ``cluster_support=True`` and ``own_vru_profile="pedestrian"``. +The VBS clustering state machine (ETSI TS 103 300-3 V2.3.1, clause 5.4) will +automatically negotiate cluster leader/follower roles based on received VAMs. + +Architecture +------------ +The security objects are wired into the GeoNetworking router as follows:: + + CertificateLibrary (trusted root CA + AA, own ATn, known AT1 + AT2) ─┐ + ↓ │ + SignService ─────────────────────────────────────────────────┤ + GNRouter (sign_service, verify_service) + VerifyService ─────────────────────────────────────────────────┘ + +References +---------- +- ETSI TS 103 300-3 V2.3.1 – VRU Awareness Basic Service (VAM specification) +- ETSI TS 103 097 V2.1.1 – Security header and certificate formats for ITS +- ETSI EN 302 636-4-1 V1.4.1 – GeoNetworking (SECURED_PACKET handling) +- ETSI TS 102 723-8 V1.1.1 – ITS-S security services (SN-SAP) +""" + +import os +import sys + +# Ensure ../src (relative to this file) is on PYTHONPATH so local modules can be imported +_this_dir = os.path.dirname(os.path.abspath(__file__)) +_src_dir = os.path.normpath(os.path.join(_this_dir, "..", "src")) +if _src_dir not in sys.path: + sys.path.insert(0, _src_dir) + +import argparse +import datetime +import math +import random +import time +import logging + +from flexstack.facilities.local_dynamic_map.ldm_classes import ComparisonOperators +from flexstack.facilities.vru_awareness_service.vam_transmission_management import DeviceDataProvider +from flexstack.facilities.vru_awareness_service.vru_awareness_service import VRUAwarenessService +from flexstack.facilities.local_dynamic_map.ldm_constants import VAM +from flexstack.facilities.local_dynamic_map.ldm_classes import ( + AccessPermission, + Circle, + Filter, + FilterStatement, + GeometricArea, + Location, + OrderTupleValue, + OrderingDirection, + SubscribeDataobjectsReq, + SubscribeDataObjectsResp, + RegisterDataConsumerReq, + RegisterDataConsumerResp, + RequestDataObjectsResp, + SubscribeDataobjectsResult, + TimestampIts, +) +from flexstack.facilities.local_dynamic_map.factory import LDMFactory +from flexstack.btp.router import Router as BTPRouter +from flexstack.geonet.gn_address import GNAddress, M, ST, MID +from flexstack.geonet.mib import MIB, GnSecurity +from flexstack.geonet.router import Router as GNRouter +from flexstack.linklayer.raw_link_layer import RawLinkLayer +from flexstack.security.certificate import Certificate, OwnCertificate +from flexstack.security.certificate_library import CertificateLibrary +from flexstack.security.ecdsa_backend import PythonECDSABackend +from flexstack.security.sign_service import SignService +from flexstack.security.verify_service import VerifyService + +logging.basicConfig(level=logging.INFO) + +# --------------------------------------------------------------------------- +# Constants +# --------------------------------------------------------------------------- + +POSITION_COORDINATES = [41.386931, 2.112104] +CERTS_DIR = os.path.join(_this_dir, "certs") + + +def generate_random_mac_address(locally_administered: bool = True, multicast: bool = False) -> bytes: + """ + Generate a randomized 6-byte MAC address. + + Parameters + ---------- + locally_administered: + Set the locally-administered bit (bit 1 of byte 0). Default: True. + multicast: + Set the multicast/group bit (bit 0 of byte 0). Default: False. + """ + addr = [random.randint(0x00, 0xFF) for _ in range(6)] + if locally_administered: + addr[0] |= 0x02 + else: + addr[0] &= ~0x02 + if multicast: + addr[0] |= 0x01 + else: + addr[0] &= ~0x01 + return bytes(addr) + + +MAC_ADDRESS = generate_random_mac_address() +STATION_ID = random.randint(1, 2147483647) + + +# --------------------------------------------------------------------------- +# Certificate helpers +# --------------------------------------------------------------------------- + +def _cert_path(name: str) -> str: + """Return the absolute path to a certificate file inside CERTS_DIR.""" + return os.path.join(CERTS_DIR, name) + + +def _check_cert_files() -> None: + """Exit with a helpful message if any required certificate file is missing.""" + required = [ + "root_ca.cert", "root_ca.pem", + "aa.cert", "aa.pem", + "at1.cert", "at1.pem", + "at2.cert", "at2.pem", + ] + missing = [f for f in required if not os.path.isfile(_cert_path(f))] + if missing: + print( + "Missing certificate file(s):\n" + + "\n".join(f" {_cert_path(f)}" for f in missing) + + "\n\nRun: python examples/generate_certificate_chain.py" + ) + sys.exit(1) + + +def _load_at_as_own( + backend: PythonECDSABackend, + index: int, + aa: Certificate, +) -> OwnCertificate: + """Load AT *index* together with its private key as the *own* certificate.""" + key_pem = open(_cert_path(f"at{index}.pem"), "rb").read() + key_id = backend.import_signing_key(key_pem) + cert_bytes = open(_cert_path(f"at{index}.cert"), "rb").read() + base = Certificate().decode(cert_bytes, issuer=aa) + return OwnCertificate(certificate=base.certificate, issuer=aa, key_id=key_id) + + +def _load_at_as_known(index: int, aa: Certificate) -> Certificate: + """Load AT *index* as a known (peer) certificate.""" + cert_bytes = open(_cert_path(f"at{index}.cert"), "rb").read() + return Certificate().decode(cert_bytes, issuer=aa) + + +def build_security_stack(at_index: int) -> tuple[SignService, VerifyService]: + """ + Build and return a ``(SignService, VerifyService)`` pair for station *at_index*. + + Both AT1 and AT2 are added to the certificate library so that each station + can verify VAMs from either peer. + + Parameters + ---------- + at_index: + Which Authorization Ticket this station owns (1 or 2). + """ + _check_cert_files() + + backend = PythonECDSABackend() + + # Root CA (self-signed) + root_ca = Certificate().decode(open(_cert_path("root_ca.cert"), "rb").read(), issuer=None) + + # Authorization Authority (issued by root CA) + aa = Certificate().decode(open(_cert_path("aa.cert"), "rb").read(), issuer=root_ca) + + # Own AT and peer AT + peer_index = 2 if at_index == 1 else 1 + own_at = _load_at_as_own(backend, at_index, aa) + peer_at = _load_at_as_known(peer_index, aa) + + # ------------------------------------------------------------------ + cert_library = CertificateLibrary( + ecdsa_backend=backend, + root_certificates=[root_ca], + aa_certificates=[aa], + at_certificates=[own_at, peer_at], + ) + cert_library.add_own_certificate(own_at) + + sign_service = SignService(backend=backend, certificate_library=cert_library) + verify_service = VerifyService(backend=backend, certificate_library=cert_library, sign_service=sign_service) + + return sign_service, verify_service + + +# --------------------------------------------------------------------------- +# Randomized moving location service (pedestrian) +# --------------------------------------------------------------------------- + +class _RandomTrajectoryLocationService: + """ + Thread-based location service that simulates a moving pedestrian VRU. + + Updates are emitted every 100 ms (at T_GenVamMin) so the VRU Awareness Basic + Service timer always has fresh data. The heading changes by a random ±5–15 ° + on every step, which consistently exceeds the 4 ° heading-change threshold + (ETSI TS 103 300-3 V2.3.1, clause 6.4.1 condition 1) and sustains regular + VAM generation. Speed is randomised in a pedestrian range of 0.5–3.0 m/s. + """ + + _PERIOD_S: float = 0.10 # 100 ms — T_GenVamMin + _BASE_SPEED_MPS: float = 1.4 # typical walking pace (~5 km/h) + _EARTH_R: float = 6_371_000.0 + + def __init__(self, start_lat: float, start_lon: float) -> None: + self._callbacks: list = [] + self._lat = start_lat + self._lon = start_lon + self._heading = random.uniform(0.0, 360.0) + self._speed = self._BASE_SPEED_MPS + self.stop_event = __import__('threading').Event() + self.location_service_thread = __import__('threading').Thread( + target=self._run, daemon=True + ) + self.location_service_thread.start() + + def add_callback(self, callback) -> None: + self._callbacks.append(callback) + + def _send(self, tpv: dict) -> None: + for cb in self._callbacks: + cb(tpv) + + def _step(self) -> None: + """Advance the simulated position by one time step.""" + dt = self._PERIOD_S + + # Heading: random signed change guaranteed to exceed the 4 ° threshold + delta = random.uniform(5.0, 15.0) * random.choice((-1, 1)) + self._heading = (self._heading + delta) % 360.0 + + # Speed: small random walk in pedestrian range [0.5, 3.0] m/s + self._speed = max(0.5, min(3.0, self._speed + random.uniform(-0.1, 0.1))) + + # Position update (flat-Earth approximation; step sizes are < 0.3 m) + d = self._speed * dt + heading_r = math.radians(self._heading) + lat_r = math.radians(self._lat) + self._lat += math.degrees(d * math.cos(heading_r) / self._EARTH_R) + self._lon += math.degrees( + d * math.sin(heading_r) / (self._EARTH_R * math.cos(lat_r)) + ) + + def _tpv(self) -> dict: + ts = datetime.datetime.now(datetime.timezone.utc).isoformat()[:-9] + "Z" + return { + "class": "TPV", + "device": "/dev/ttyACM0", + "mode": 3, + "time": ts, + "ept": 0.005, + "lat": self._lat, + "lon": self._lon, + "alt": 0.0, + "epx": 1.0, + "epy": 1.0, + "epv": 5.0, + "track": self._heading, + "speed": self._speed, + "climb": 0.0, + "eps": 0.01, + } + + def _run(self) -> None: + while not self.stop_event.is_set(): + self._step() + self._send(self._tpv()) + time.sleep(self._PERIOD_S) + + +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- + +def main() -> None: + """ + Run the secured VAM sender/receiver loop. + + Parses the ``--at {1,2}`` command-line argument to select which Authorization + Ticket this station uses for signing. Sets up the full ITS-S stack (location + service, GN router with security, BTP router, LDM and VRU Awareness Basic + Service) and then blocks, sending VAMs and printing any received VAMs from the + peer station until a ``KeyboardInterrupt`` is raised. + """ + parser = argparse.ArgumentParser( + description="Secured VAM sender/receiver (select station 1 or 2 via --at)" + ) + parser.add_argument( + "--at", + type=int, + choices=[1, 2], + required=True, + help="Authorization Ticket index for this station (1 or 2)", + ) + args = parser.parse_args() + + print(f"Starting VRU station with AT{args.at}...") + sign_service, verify_service = build_security_stack(args.at) + + # Instantiate a moving location service that simulates a pedestrian VRU + location_service = _RandomTrajectoryLocationService( + start_lat=POSITION_COORDINATES[0], + start_lon=POSITION_COORDINATES[1], + ) + + # Instantiate a GN router with security enabled + mib = MIB( + itsGnLocalGnAddr=GNAddress( + m=M.GN_MULTICAST, + st=ST.PEDESTRIAN, + mid=MID(MAC_ADDRESS), + ), + itsGnSecurity=GnSecurity.ENABLED, + ) + gn_router = GNRouter( + mib=mib, + sign_service=sign_service, + verify_service=verify_service, + ) + location_service.add_callback(gn_router.refresh_ego_position_vector) + + # Instantiate a BTP router + btp_router = BTPRouter(gn_router) + gn_router.register_indication_callback(btp_router.btp_data_indication) + + # Instantiate a Local Dynamic Map (LDM) + ldm_location = Location.initializer( + latitude=int(POSITION_COORDINATES[0] * 10 ** 7), + longitude=int(POSITION_COORDINATES[1] * 10 ** 7), + ) + + ldm_area = GeometricArea( + circle=Circle(radius=5000), + rectangle=None, + ellipse=None, + ) + ldm_factory = LDMFactory() + ldm = ldm_factory.create_ldm( + ldm_location, + ldm_maintenance_type="Reactive", + ldm_service_type="Reactive", + ldm_database_type="Dictionary", + ) + location_service.add_callback(ldm_location.location_service_callback) + + # Subscribe to LDM to print received VAMs + register_resp: RegisterDataConsumerResp = ldm.if_ldm_4.register_data_consumer( + RegisterDataConsumerReq( + application_id=VAM, + access_permisions=(AccessPermission.VAM,), + area_of_interest=ldm_area, + ) + ) + if register_resp.result == 2: + sys.exit(1) + + def ldm_subscription_callback(data: RequestDataObjectsResp) -> None: + print( + f'Received VAM from: {data.data_objects[0]["dataObject"]["header"]["stationId"]}' + ) + + subscribe_resp: SubscribeDataObjectsResp = ldm.if_ldm_4.subscribe_data_consumer( + SubscribeDataobjectsReq( + application_id=VAM, + data_object_type=(VAM,), + priority=1, + filter=Filter( + filter_statement_1=FilterStatement( + "header.stationId", + ComparisonOperators.NOT_EQUAL, + STATION_ID, + ) + ), + notify_time=TimestampIts(0), + multiplicity=1, + order=( + OrderTupleValue( + attribute="vam.generationDeltaTime", + ordering_direction=OrderingDirection.ASCENDING, + ), + ), + ), + ldm_subscription_callback, + ) + if subscribe_resp.result != SubscribeDataobjectsResult.SUCCESSFUL: + sys.exit(1) + + # Instantiate the VRU Awareness Basic Service + device_data_provider = DeviceDataProvider( + station_id=STATION_ID, + station_type=1, # pedestrian (ETSI TS 102 894-2, ITSStationType) + ) + vru_awareness_service = VRUAwarenessService( + btp_router=btp_router, + device_data_provider=device_data_provider, + ldm=ldm, + cluster_support=True, + own_vru_profile="pedestrian", + ) + location_service.add_callback( + vru_awareness_service.vam_transmission_management.location_service_callback + ) + + # Instantiate a Link Layer and start the main loop + btp_router.freeze_callbacks() + link_layer = RawLinkLayer( + "lo", MAC_ADDRESS, receive_callback=gn_router.gn_data_indicate + ) + gn_router.link_layer = link_layer + + try: + while True: + time.sleep(1) + except KeyboardInterrupt: + print("Exiting...") + + location_service.stop_event.set() + location_service.location_service_thread.join() + link_layer.sock.close() + + +if __name__ == "__main__": + main() diff --git a/pyproject.toml b/pyproject.toml index 531d6f4..4bddc87 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "v2xflexstack" -version = "0.10.11" +version = "0.11.0" authors = [ { name = "Jordi Marias-i-Parella", email = "jordi.marias@i2cat.net" }, { name = "Daniel Ulied Guevara", email = "daniel.ulied@i2cat.net" }, diff --git a/src/flexstack/btp/readme.md b/src/flexstack/btp/readme.md index bc57e62..addd43f 100644 --- a/src/flexstack/btp/readme.md +++ b/src/flexstack/btp/readme.md @@ -1,3 +1,3 @@ # Basic Transport Protocol (BTP) Implmentation -The present BTP implementation is based on the ETSI standard ETSI EN 302 636-5-1 V2.1.0 (2017-05). +The present BTP implementation is based on the ETSI standard ETSI EN 302 636-5-1 V2.2.1 (2019-05). diff --git a/src/flexstack/btp/router.py b/src/flexstack/btp/router.py index 8003486..5c8ab46 100644 --- a/src/flexstack/btp/router.py +++ b/src/flexstack/btp/router.py @@ -3,7 +3,7 @@ from types import MappingProxyType import logging -from .btp_header import BTPBHeader +from .btp_header import BTPAHeader, BTPBHeader from .service_access_point import BTPDataIndication, BTPDataRequest from ..geonet.common_header import CommonNH from ..geonet.service_access_point import GNDataIndication, GNDataRequest @@ -80,15 +80,40 @@ def btp_data_request(self, request: BTPDataRequest) -> None: area=request.gn_area, communication_profile=request.communication_profile, traffic_class=request.traffic_class, + security_profile=request.security_profile, + its_aid=request.its_aid, + security_permissions=request.security_permissions, data=data, length=len(data), - max_hop_limit=request.max_hop_limit, + max_hop_limit=request.gn_max_hop_limit, + max_packet_lifetime=request.gn_max_packet_lifetime, ) self.logging.debug( "Sending BTP Data Request: %s", gn_data_request.data) self.gn_router.gn_data_request(gn_data_request) elif request.btp_type == CommonNH.BTP_A: - raise NotImplementedError("BTPADataRequest not implemented") + header = BTPAHeader( + destination_port=request.destination_port, + source_port=request.source_port, + ) + data = header.encode() + request.data + gn_data_request = GNDataRequest( + upper_protocol_entity=request.btp_type, + packet_transport_type=request.gn_packet_transport_type, + area=request.gn_area, + communication_profile=request.communication_profile, + traffic_class=request.traffic_class, + security_profile=request.security_profile, + its_aid=request.its_aid, + security_permissions=request.security_permissions, + data=data, + length=len(data), + max_hop_limit=request.gn_max_hop_limit, + max_packet_lifetime=request.gn_max_packet_lifetime, + ) + self.logging.debug( + "Sending BTP-A Data Request: %s", gn_data_request.data) + self.gn_router.gn_data_request(gn_data_request) else: raise ValueError("Unknown BTP Header Type") @@ -108,7 +133,7 @@ def btp_b_data_indication(self, gn_data_indication: GNDataIndication) -> None: destination_port=header.destination_port, destination_port_info=header.destination_port_info ) - if self.indication_callbacks: + if self.indication_callbacks is not None: callback = self.indication_callbacks.get( indication.destination_port) if callback: @@ -125,7 +150,27 @@ def btp_a_data_indication(self, gn_data_indication: GNDataIndication) -> None: gn_data_indication : GNDataIndication GNDataIndication to handle. """ - raise NotImplementedError("BTPADataIndication not implemented") + base = BTPDataIndication.initialize_with_gn_data_indication( + gn_data_indication) + header = BTPAHeader.decode(gn_data_indication.data) + indication = BTPDataIndication( + source_port=header.source_port, + destination_port=header.destination_port, + destination_port_info=0, + gn_packet_transport_type=base.gn_packet_transport_type, + gn_destination_address=base.gn_destination_address, + gn_source_position_vector=base.gn_source_position_vector, + gn_traffic_class=base.gn_traffic_class, + length=base.length, + data=base.data, + ) + if self.indication_callbacks is not None: + callback = self.indication_callbacks.get( + indication.destination_port) + if callback: + callback(indication) + else: + raise RuntimeError("Indication callbacks not frozen") def btp_data_indication(self, gn_data_indication: GNDataIndication) -> None: """ diff --git a/src/flexstack/btp/service_access_point.py b/src/flexstack/btp/service_access_point.py index 7cadf76..4c21cf9 100644 --- a/src/flexstack/btp/service_access_point.py +++ b/src/flexstack/btp/service_access_point.py @@ -1,5 +1,6 @@ from base64 import b64encode, b64decode from dataclasses import dataclass, field +from typing import Optional from ..geonet.gn_address import GNAddress from ..geonet.service_access_point import ( Area, @@ -10,32 +11,61 @@ CommonNH, ) from ..geonet.position_vector import LongPositionVector +from ..security.security_profiles import SecurityProfile @dataclass(frozen=True) class BTPDataRequest: """ - GN Data Request class. As specified in - ETSI EN 302 636-5-1 V2.1.0 (2017-05). Annex A2 + BTP Data Request class. As specified in + ETSI EN 302 636-5-1 V2.2.1 (2019-05). Annex A.2 Attributes ---------- btp_type : CommonNH - BTP Type. + BTP Type (BTP-A for interactive, BTP-B for non-interactive transport). source_port : int - (16 bit integer) Source Port. + (16 bit integer) Source Port. Optional; only used for BTP-A. destination_port : int (16 bit integer) Destination Port. destination_port_info : int - (16 bit integer) Destination Port Info. + (16 bit integer) Destination Port Info. Optional; only used for BTP-B. gn_packet_transport_type : PacketTransportType - Packet Transport Type. + GN Packet Transport Type (GeoUnicast, SHB, TSB, GeoBroadcast, GeoAnycast). gn_destination_address : GNAddress - Destination Address. + GN Destination Address. Used for GeoUnicast or geographical area for + GeoBroadcast/GeoAnycast. + gn_area : Area + GN Area for GeoBroadcast/GeoAnycast transport. + gn_max_hop_limit : int + GN Maximum Hop Limit. Optional; specifies the number of hops a packet + is allowed to have in the network. + gn_max_packet_lifetime : float or None + GN Maximum Packet Lifetime in seconds. Optional; specifies the maximum + tolerable time a GeoNetworking packet can be buffered until it reaches + its destination. + gn_repetition_interval : int or None + GN Repetition Interval in milliseconds. Optional; specifies the duration + between two consecutive transmissions of the same packet during the + maximum repetition time. + gn_max_repetition_time : int or None + GN Maximum Repetition Time in milliseconds. Optional; specifies the + duration for which the packet will be repeated if the repetition + interval is set. communication_profile : CommunicationProfile - Communication Profile. + GN Communication Profile; determines the LL protocol entity. traffic_class : TrafficClass - Traffic Class. + GN Traffic Class. + security_profile : SecurityProfile + Security profile to apply when the GN router signs the packet. + Defaults to :attr:`SecurityProfile.NO_SECURITY` (no signing). + its_aid : int + ITS-AID (PSID) of the service carried in this packet, used by the + GN router to select the correct signing certificate. Only relevant + when *security_profile* is not ``NO_SECURITY``. + security_permissions : bytes + Sender permissions forwarded to the sign service. Only relevant + when *security_profile* is not ``NO_SECURITY``. length : int Length of the payload. data : bytes @@ -46,13 +76,19 @@ class BTPDataRequest: source_port: int = 0 destination_port: int = 0 destination_port_info: int = 0 - destination_port_info: int = 0 gn_packet_transport_type: PacketTransportType = field( default_factory=PacketTransportType) gn_destination_address: GNAddress = field(default_factory=GNAddress) gn_area: Area = field(default_factory=Area) + gn_max_hop_limit: int = 0 + gn_max_packet_lifetime: Optional[float] = None + gn_repetition_interval: Optional[int] = None + gn_max_repetition_time: Optional[int] = None communication_profile: CommunicationProfile = CommunicationProfile.UNSPECIFIED traffic_class: TrafficClass = field(default_factory=TrafficClass) + security_profile: SecurityProfile = SecurityProfile.NO_SECURITY + its_aid: int = 0 + security_permissions: bytes = b"\x00" length: int = 0 data: bytes = b"" @@ -75,6 +111,9 @@ def to_dict(self) -> dict: self.gn_destination_address.encode() ).decode("utf-8"), "gn_area": self.gn_area.to_dict(), + "gn_max_packet_lifetime": self.gn_max_packet_lifetime, + "gn_repetition_interval": self.gn_repetition_interval, + "gn_max_repetition_time": self.gn_max_repetition_time, "communication_profile": self.communication_profile.value, "traffic_class": b64encode(self.traffic_class.encode_to_bytes()).decode( "utf-8" @@ -126,6 +165,10 @@ def from_dict(cls, data: dict) -> "BTPDataRequest": gn_packet_transport_type=packet_transport_type, gn_destination_address=gn_destination_address, gn_area=area, + gn_max_hop_limit=data.get("gn_max_hop_limit", 1), + gn_max_packet_lifetime=data.get("gn_max_packet_lifetime"), + gn_repetition_interval=data.get("gn_repetition_interval"), + gn_max_repetition_time=data.get("gn_max_repetition_time"), communication_profile=communication_profile, traffic_class=traffic_class, length=length, @@ -136,24 +179,35 @@ def from_dict(cls, data: dict) -> "BTPDataRequest": @dataclass(frozen=True) class BTPDataIndication: """ - GN Data Indication class. As specified in ETSI EN 302 636-5-1 V2.1.0 (2017-05). Annex A3 + BTP Data Indication class. As specified in ETSI EN 302 636-5-1 V2.2.1 (2019-05). Annex A.3 Attributes ---------- source_port : int - (16 bit integer) Source Port. + (16 bit integer) Source Port. Optional; only present for BTP-A. destination_port : int (16 bit integer) Destination Port. destination_port_info : int - (16 bit integer) Destination Port Info. + (16 bit integer) Destination Port Info. Optional; only present for BTP-B. gn_packet_transport_type : PacketTransportType - Packet Transport Type. + GN Packet Transport Type. gn_destination_address : GNAddress - Destination Address. + GN Destination Address for GeoUnicast or geographical area for + GeoBroadcast/GeoAnycast, as generated by the source. gn_source_position_vector : LongPositionVector - Source Position Vector. + GN Source Position Vector; geographical position of the source. + gn_security_report : bytes or None + GN Security Report. Optional; result of the security processing of + the received packet. + gn_certificate_id : bytes or None + GN Certificate Id. Optional; identifier of the certificate used by + the sender. + gn_permissions : bytes or None + GN Permissions. Optional; permissions from the sender's certificate. gn_traffic_class : TrafficClass - Traffic Class. + GN Traffic Class. + gn_remaining_packet_lifetime : float or None + GN Remaining Packet Lifetime in seconds. Optional. length : int Length of the payload. data : bytes @@ -163,13 +217,16 @@ class BTPDataIndication: source_port: int = 0 destination_port: int = 0 destination_port_info: int = 0 - destination_port_info: int = 0 gn_packet_transport_type: PacketTransportType = field( default_factory=PacketTransportType) gn_destination_address: GNAddress = field(default_factory=GNAddress) gn_source_position_vector: LongPositionVector = field( default_factory=LongPositionVector) + gn_security_report: Optional[bytes] = None + gn_certificate_id: Optional[bytes] = None + gn_permissions: Optional[bytes] = None gn_traffic_class: TrafficClass = field(default_factory=TrafficClass) + gn_remaining_packet_lifetime: Optional[float] = None length: int = 0 data: bytes = b"" @@ -215,7 +272,11 @@ def set_destination_port_and_info(self, destination_port: int, destination_port_ gn_packet_transport_type=self.gn_packet_transport_type, gn_destination_address=self.gn_destination_address, gn_source_position_vector=self.gn_source_position_vector, + gn_security_report=self.gn_security_report, + gn_certificate_id=self.gn_certificate_id, + gn_permissions=self.gn_permissions, gn_traffic_class=self.gn_traffic_class, + gn_remaining_packet_lifetime=self.gn_remaining_packet_lifetime, length=self.length, data=self.data, ) @@ -240,9 +301,13 @@ def to_dict(self) -> dict: "gn_source_position_vector": b64encode( self.gn_source_position_vector.encode() ).decode("utf-8"), + "gn_security_report": b64encode(self.gn_security_report).decode("utf-8") if self.gn_security_report is not None else None, + "gn_certificate_id": b64encode(self.gn_certificate_id).decode("utf-8") if self.gn_certificate_id is not None else None, + "gn_permissions": b64encode(self.gn_permissions).decode("utf-8") if self.gn_permissions is not None else None, "gn_traffic_class": b64encode( self.gn_traffic_class.encode_to_bytes() ).decode("utf-8"), + "gn_remaining_packet_lifetime": self.gn_remaining_packet_lifetime, "length": self.length, "data": b64encode(self.data).decode("utf-8"), } @@ -280,6 +345,13 @@ def from_dict(cls, data: dict) -> "BTPDataIndication": b64decode(traffic_b64)) else: gn_traffic_class = TrafficClass() + security_report_b64 = data.get("gn_security_report") + gn_security_report = b64decode(security_report_b64) if security_report_b64 is not None else None + certificate_id_b64 = data.get("gn_certificate_id") + gn_certificate_id = b64decode(certificate_id_b64) if certificate_id_b64 is not None else None + permissions_b64 = data.get("gn_permissions") + gn_permissions = b64decode(permissions_b64) if permissions_b64 is not None else None + gn_remaining_packet_lifetime = data.get("gn_remaining_packet_lifetime") length = data.get("length", 0) data_b64 = data.get("data") payload = b64decode(data_b64) if data_b64 else b"" @@ -290,7 +362,11 @@ def from_dict(cls, data: dict) -> "BTPDataIndication": gn_packet_transport_type=packet_transport_type, gn_destination_address=gn_destination_address, gn_source_position_vector=source_position_vector, + gn_security_report=gn_security_report, + gn_certificate_id=gn_certificate_id, + gn_permissions=gn_permissions, gn_traffic_class=gn_traffic_class, + gn_remaining_packet_lifetime=gn_remaining_packet_lifetime, length=length, data=payload, ) diff --git a/src/flexstack/facilities/ca_basic_service/ca_basic_service.py b/src/flexstack/facilities/ca_basic_service/ca_basic_service.py index 5c67b78..dfe47d0 100644 --- a/src/flexstack/facilities/ca_basic_service/ca_basic_service.py +++ b/src/flexstack/facilities/ca_basic_service/ca_basic_service.py @@ -1,7 +1,8 @@ """ -Cooperatie Awareness Basic Service +Cooperative Awareness Basic Service -This file contains the class for the Cooperative Awareness Basic Service. +This file contains the class for the Cooperative Awareness Basic Service, +strictly following ETSI TS 103 900 V2.2.1 (2025-02). """ from __future__ import annotations import logging @@ -21,7 +22,10 @@ class CooperativeAwarenessBasicService: """ - Cooperative Awareness Basic Service + Cooperative Awareness Basic Service — ETSI TS 103 900 V2.2.1. + + Call :meth:`start` to activate the service and :meth:`stop` to deactivate + it in accordance with §6.1.2. Attributes ---------- @@ -29,8 +33,6 @@ class CooperativeAwarenessBasicService: BTP Router. cam_coder : CAMCoder CAM Coder. - vehicle_data : VehicleData - Vehicle Data. cam_transmission_management : CAMTransmissionManagement CAM Transmission Management. cam_reception_management : CAMReceptionManagement @@ -44,7 +46,7 @@ def __init__( ldm: LDMFacility | None = None, ) -> None: """ - Initialize the Cooperative Awareness Basic Service. + Initialise the Cooperative Awareness Basic Service. Parameters ---------- @@ -52,7 +54,7 @@ def __init__( BTP Router. vehicle_data : VehicleData Vehicle Data. - ldm: LDMFacility + ldm : LDMFacility or None Local Dynamic Map (LDM) Service. """ self.logging = logging.getLogger("ca_basic_service") @@ -73,4 +75,22 @@ def __init__( ca_basic_service_ldm=ca_basic_service_ldm, ) - self.logging.info("CA Basic Service Started!") + self.logging.info("CA Basic Service initialised.") + + def start(self) -> None: + """ + Activate the CA service (§6.1.2). + + Starts the T_CheckCamGen timer in the transmission management. + """ + self.cam_transmission_management.start() + self.logging.info("CA Basic Service started.") + + def stop(self) -> None: + """ + Deactivate the CA service (§6.1.2). + + Stops the T_CheckCamGen timer in the transmission management. + """ + self.cam_transmission_management.stop() + self.logging.info("CA Basic Service stopped.") diff --git a/src/flexstack/facilities/ca_basic_service/cam_asn1.py b/src/flexstack/facilities/ca_basic_service/cam_asn1.py index 79670a3..d2e541f 100644 --- a/src/flexstack/facilities/ca_basic_service/cam_asn1.py +++ b/src/flexstack/facilities/ca_basic_service/cam_asn1.py @@ -1,10 +1,32 @@ # pylint: skip-file +# ------------------------------------------------------------------- +# asn1tools adaptation notes (ETSI TS 103 900 V2.2.1, CAM-PDU-Descriptions) +# ------------------------------------------------------------------- +# 1. "WITH SUCCESSORS" removed from IMPORTS. +# The raw ETSI file imports the CDD module with +# "... major-version-4 (4) minor-version-2 (2)} WITH SUCCESSORS" +# asn1tools raises a ParseError on that keyword. Removing it has no +# functional impact because the CDD module is compiled in the same +# compilation unit (ETSI_ITS_CDD_ASN1_DESCRIPTIONS is prepended). +# +# 2. CLASS / information-object-class constructs are *kept unchanged*. +# asn1tools supports the CLASS keyword and WITH SYNTAX notation. +# The open-type field EXTENSION-CONTAINER-ID-AND-TYPE.&Type(...) +# is treated by asn1tools as raw bytes (opaque UPER octets). +# This matches the UPER spec for open types (X.691 §12): +# open_type_encoding = length_determinant || inner_uper_bytes +# To encode an extension container, encode the inner type (e.g. +# TwoWheelerContainer) to bytes first, then pass those bytes as +# containerData. To decode, take the containerData bytes and decode +# them against the appropriate type. This encoding is fully +# wire-interoperable with implementations using unaltered ASN.1. +# ------------------------------------------------------------------- from ...utils.asn1.etsi_its_cdd import ETSI_ITS_CDD_ASN1_DESCRIPTIONS CAM_ASN1_DESCRIPTIONS = ( ETSI_ITS_CDD_ASN1_DESCRIPTIONS + """ -CAM-PDU-Descriptions {itu-t (0) identified-organization (4) etsi (0) itsDomain (5) wg1 (1) camPduRelease2 (103900) major-version-2 (2) minor-version-1 (1)} +CAM-PDU-Descriptions {itu-t (0) identified-organization (4) etsi (0) itsDomain (5) wg1 (1) camPduRelease2 (103900) major-version-2 (2) minor-version-2 (2)} DEFINITIONS AUTOMATIC TAGS ::= @@ -14,8 +36,11 @@ ItsPduHeader, CauseCodeV2, ReferencePosition, AccelerationControl, Curvature, CurvatureCalculationMode, Heading, LanePosition, EmergencyPriority, EmbarkationStatus, Speed, DriveDirection, AccelerationComponent, StationType, ExteriorLights, DangerousGoodsBasic, SpecialTransportType, LightBarSirenInUse, VehicleRole, VehicleLength, VehicleWidth, Path, RoadworksSubCauseCode, ClosedLanes, TrafficRule, SpeedLimit, SteeringWheelAngle, PerformanceClass, YawRate, -PtActivation, ProtectedCommunicationZonesRSU, CenDsrcTollingZone, GenerationDeltaTime, BasicContainer +PtActivation, ProtectedCommunicationZonesRSU, CenDsrcTollingZone, GenerationDeltaTime, BasicContainer, BrakeControl, VehicleHeight2, WiperStatus, +GeneralizedLanePositions, PathPredictedList, CartesianAngle, Wgs84Angle, StabilityChangeIndication, VruSubProfileBicyclist, VruMovementControl, +BasicLaneConfiguration, PolygonalLine, MetaInformation, ConfidenceLevels, VehicleMovementControl +-- Adaptation: asn1tools-incompatible clause removed from import below; see Python header. FROM ETSI-ITS-CDD {itu-t (0) identified-organization (4) etsi (0) itsDomain (5) wg1 (1) 102894 cdd (2) major-version-4 (4) minor-version-2 (2)} ; @@ -61,13 +86,16 @@ * * @field specialVehicleContainer: The special container of the CAM shall be present as defined in clause 6.1.2. * The content of the container shall be set according to the value of the vehicleRole component as specified in Table 5. +* +* @field extensionContainers: the list of CAM extension containers, including its container type identifier and the container itself. */ CamParameters ::= SEQUENCE { basicContainer BasicContainer, highFrequencyContainer HighFrequencyContainer, lowFrequencyContainer LowFrequencyContainer OPTIONAL, specialVehicleContainer SpecialVehicleContainer OPTIONAL, - ... + ..., + extensionContainers WrappedExtensionContainers OPTIONAL } /** @@ -268,7 +296,7 @@ * @field lightBarSirenInUse: it indicates whether light-bar or a siren is in use by the vehicle originating the CAM. * * @field closedLanes: an optional component which provides information about the opening/closure status of the lanes ahead. Lanes are counted from -* the outside boarder of the road. If a lane is closed to traffic, the corresponding bit shall be set to 1. +* the inside boarder of the road. If a lane is closed to traffic, the corresponding bit shall be set to 1. */ RoadWorksContainerBasic ::= SEQUENCE { roadworksSubCauseCode RoadworksSubCauseCode OPTIONAL, @@ -332,10 +360,206 @@ * @field protectedCommunicationZonesRSU: an optional Information about position of a CEN DSRC Tolling Station operating in the 5,8 GHz frequency * band. If this information is provided by RSUs a receiving vehicle ITS-S is prepared to adopt mitigation techniques when being in the vicinity of * CEN DSRC tolling stations. - */ RSUContainerHighFrequency ::= SEQUENCE { protectedCommunicationZonesRSU ProtectedCommunicationZonesRSU OPTIONAL, + ... +} + +/** +* This information object class is an abstract template to instantiate containers. +* +* It shall include the following components: +* +* @field &id: the identifier of the container type. +* +* @field &Type: the container content. +* +*/ +EXTENSION-CONTAINER-ID-AND-TYPE ::= CLASS { + &id ExtensionContainerId UNIQUE, + &Type +} WITH SYNTAX {&Type IDENTIFIED BY &id} + +/** +* This DE represents the identifier of the container type. +*/ +ExtensionContainerId ::= INTEGER (1..16,...) + +/** +* These value assignments represent specific values of the container type identifier. +*/ +twoWheelerContainer ExtensionContainerId ::= 1 +eHorizonLocationSharingContainer ExtensionContainerId ::= 2 +veryLowFrequencyContainer ExtensionContainerId ::= 3 +pathPredictionContainer ExtensionContainerId ::= 4 +generalizedLanePositionsContainer ExtensionContainerId ::= 5 +vehicleMovementControlContainer ExtensionContainerId ::= 6 + +/** +* This information object set represents the association between the container type and the container content. +*/ +ExtensionContainers EXTENSION-CONTAINER-ID-AND-TYPE ::= { + {TwoWheelerContainer IDENTIFIED BY twoWheelerContainer} | + {EHorizonLocationSharingContainer IDENTIFIED BY eHorizonLocationSharingContainer} | + {VeryLowFrequencyContainer IDENTIFIED BY veryLowFrequencyContainer} | + {PathPredictionContainer IDENTIFIED BY pathPredictionContainer} | + {GeneralizedLanePositionsContainer IDENTIFIED BY generalizedLanePositionsContainer} | + {VehicleMovementControlContainer IDENTIFIED BY vehicleMovementControlContainer}, + ... +} + +/** +* This DF represents a CAM container preceded by its type identifier and a length indicator. +* +* It shall include the following components: +* +* @field containerId: the identifier of the container type. +* +* @field containerData: the container content consistent with the container type. +* +*/ +WrappedExtensionContainer ::= SEQUENCE { + containerId EXTENSION-CONTAINER-ID-AND-TYPE.&id( {ExtensionContainers} ), + containerData EXTENSION-CONTAINER-ID-AND-TYPE.&Type( {ExtensionContainers}{@containerId} ) +} + +/** +* This DF represents a list of CAM containers, each with their type identifier. +*/ +WrappedExtensionContainers ::= SEQUENCE SIZE(1..8,...) OF WrappedExtensionContainer + +/** +* This type contains detailed information about two wheelers. It is meant to use for StationType +* cyclist, moped and motorcycle. +* +* It shall include the following components: +* +* @field typeSpecificInformation: this data field contains type specific information about two wheelers. +* +* @field rollAngle: this data field describes the roll angle of the two wheeler. +* +* @field orientation: this data field describes the orientation of the two wheeler. +* +* @field stabilityChangeIndication: this data field describes if the two wheeler is about to lose control. +* +*/ +TwoWheelerContainer ::= SEQUENCE { + typeSpecificInformation TwoWheelerTypeSpecificInformation OPTIONAL, + rollAngle CartesianAngle OPTIONAL, + orientation Wgs84Angle OPTIONAL, + stabilityChangeIndication StabilityChangeIndication OPTIONAL, + ... +} + +/** +* This type contains type specific information about a two wheeler. +* +* It includes one of the following components: +* +* @field cyclist: it contains cyclist-specific information. +* +*/ +TwoWheelerTypeSpecificInformation ::= CHOICE { + cyclist CyclistTypeSpecificInformation, + ... +} + +/** +* This type contains type-specific information about cyclists. +* +* It shall include the following components: +* +* @field vruSubProfileBicyclist: it indicates the detailed type of the cyclist. +* +* @field vruMovementControl: it includes information about the movement control of the bicycle. +* +*/ +CyclistTypeSpecificInformation ::= SEQUENCE { + vruSubProfileBicyclist VruSubProfileBicyclist (unavailable | bicyclist | e-scooter | pedelec | speed-pedelec | roadbike | childrensbike) OPTIONAL, + vruMovementControl VruMovementControl OPTIONAL, + ... +} + +/** +* This type contains contextual, map-based location information. +* eHorizon is defined as a tool to convey the part of the road network and its characteristics derived from map data located in front of and behind the vehicle along the road. +* +* It shall include the following components: +* +* @field segmentAhead: the road segment that the vehicle is predicted to reach, starting from the reference position. +* At least one node must be filled in so that the current map position can be calculated on the receiver side. +* +* @field nodeProbabilities: confidence values for each node in segmentAhead, indicating how confident we are that the ITS station will reach that point. +* +* @field segmentBehind: the road segment that the vehicle has passed, based on the collected data, starting from the reference position. +* At least one node must be filled in so that the current map position can be calculated on the receiver side. +* +* @field laneLevelDetails: provides information about the configuration of the road at the position indicated by the component referencePosition of the Basic Container and for a given reference direction. +* +* @field segmentSource: it represents the origin of the map-specific data. +* +*/ +EHorizonLocationSharingContainer ::= SEQUENCE { + segmentAhead PolygonalLine, + nodeProbabilities ConfidenceLevels OPTIONAL, + segmentBehind PolygonalLine, + laneLevelDetails BasicLaneConfiguration OPTIONAL, + segmentSource MetaInformation (WITH COMPONENTS {..., confidenceValue ABSENT}) OPTIONAL, + ... +} + +/** +* This type represents the very low frequency container. +* +* It shall include the following components: +* +* @field vehicleHeight: this component represents the height of the vehicle that originates the CAM. +* +* @field wiperStatus: this component represents the status of the wipers of the vehicle that originates the CAM, at the time indicated by generationDeltaTime. +* +* @field brakeControl: this component represents the status of the brake control system of the vehicle that originates the CAM, +* at the time indicated by generationDeltaTime and during the period 10 seconds before that time. +*/ +VeryLowFrequencyContainer ::= SEQUENCE { + vehicleHeight VehicleHeight2 OPTIONAL, + wiperStatus WiperStatus OPTIONAL, + brakeControl BrakeControl OPTIONAL, + ... +} + +/** +* This type represents the path prediction container. +* +* Contains information about the possible future paths of ITS station. +*/ +PathPredictionContainer ::= SEQUENCE { + pathPredictedList PathPredictedList, + ... +} + +/** +* This type represents the generalized lane positions container. +* +* Contains detailed information about the transversal position of the ITS station with respect to the road and potentially about the lane type. +*/ +GeneralizedLanePositionsContainer ::= SEQUENCE { + generalizedLanePositions GeneralizedLanePositions, + ... +} + +/** +* This type represents the vehicle movement control container. +* +* Contains information about the current vehicle movement control status +* of ITS station. +* This contains: +* - brake and acceleration pedal position status information +* - mechanism for lateral, longitudinal movements dimensions +* of the vehicle +*/ +VehicleMovementControlContainer ::= SEQUENCE { + vehicleMovementControl VehicleMovementControl, ... } diff --git a/src/flexstack/facilities/ca_basic_service/cam_coder.py b/src/flexstack/facilities/ca_basic_service/cam_coder.py index 467ee5f..9a241ec 100644 --- a/src/flexstack/facilities/ca_basic_service/cam_coder.py +++ b/src/flexstack/facilities/ca_basic_service/cam_coder.py @@ -1,11 +1,43 @@ +from __future__ import annotations + """ CAM Coder. This file contains the class for the CAM Coder. + +Extension-container (WrappedExtensionContainer) open-type handling +------------------------------------------------------------------ +The CAM ASN.1 uses an information-object CLASS to associate each +ExtensionContainerId value with a concrete container type. asn1tools +treats the open-type field ``containerData`` as raw bytes (the UPER +encoding of the inner container without any additional wrapper). + +To *encode* an extension container, call + ``encode_extension_container(type_name, value)`` +which returns the bytes that must be placed in ``containerData``. + +To *decode* an extension container, call + ``decode_extension_container(container_id, data_bytes)`` +which dispatches on ``container_id`` and returns the decoded dict. + +This pattern is fully wire-interoperable with implementations compiled +from the unaltered ETSI ASN.1 module because the on-wire representation +is identical: UPER length-determinant followed by the inner type's UPER +encoding (X.691 §12, open-type encoding). """ import asn1tools from .cam_asn1 import CAM_ASN1_DESCRIPTIONS +# Maps ExtensionContainerId integer values to ASN.1 type names. +_EXTENSION_CONTAINER_ID_TO_TYPE: dict[int, str] = { + 1: "TwoWheelerContainer", + 2: "EHorizonLocationSharingContainer", + 3: "VeryLowFrequencyContainer", + 4: "PathPredictionContainer", + 5: "GeneralizedLanePositionsContainer", + 6: "VehicleMovementControlContainer", +} + class CAMCoder: """ @@ -54,3 +86,60 @@ def decode(self, cam: bytes) -> dict: CAM message. """ return self.asn_coder.decode("CAM", cam) + + def encode_extension_container(self, container_id: int, value: dict) -> bytes: + """ + Encode an extension container value to the raw bytes that go into + the ``containerData`` open-type field of a WrappedExtensionContainer. + + Parameters + ---------- + container_id : int + The ExtensionContainerId (1=TwoWheeler, 2=eHorizon, 3=VLF, + 4=PathPrediction, 5=GenLanePos, 6=VehicleMovementControl). + value : dict + The container content as a Python dict. + + Returns + ------- + bytes + UPER-encoded inner container bytes suitable for + ``containerData``. + + Raises + ------ + ValueError + If *container_id* is not a known ExtensionContainerId. + """ + type_name = _EXTENSION_CONTAINER_ID_TO_TYPE.get(container_id) + if type_name is None: + raise ValueError(f"Unknown ExtensionContainerId: {container_id}") + return self.asn_coder.encode(type_name, value) + + def decode_extension_container(self, container_id: int, data: bytes) -> dict: + """ + Decode the raw ``containerData`` bytes of a WrappedExtensionContainer + into a Python dict. + + Parameters + ---------- + container_id : int + The ExtensionContainerId value from the same + WrappedExtensionContainer. + data : bytes + The raw ``containerData`` bytes as returned by the CAM decoder. + + Returns + ------- + dict + Decoded container content. + + Raises + ------ + ValueError + If *container_id* is not a known ExtensionContainerId. + """ + type_name = _EXTENSION_CONTAINER_ID_TO_TYPE.get(container_id) + if type_name is None: + raise ValueError(f"Unknown ExtensionContainerId: {container_id}") + return self.asn_coder.decode(type_name, data) diff --git a/src/flexstack/facilities/ca_basic_service/cam_reception_management.py b/src/flexstack/facilities/ca_basic_service/cam_reception_management.py index c563454..c296a44 100644 --- a/src/flexstack/facilities/ca_basic_service/cam_reception_management.py +++ b/src/flexstack/facilities/ca_basic_service/cam_reception_management.py @@ -1,10 +1,18 @@ """ CA Reception Management. -This file contains the class for the CA Reception Management. +This file contains the class for the CA Reception Management, +strictly following ETSI TS 103 900 V2.2.1 (2025-02). + +Key standard-compliance additions: + - Decoding exceptions are caught and logged; the LDM/applications are NOT + updated with corrupt data (Annex B.3.3.1). + - Application callbacks (IF.CAM) can be registered via + :meth:`add_application_callback`. """ from __future__ import annotations import logging +from typing import Callable from .cam_transmission_management import GenerationDeltaTime from .cam_ldm_adaptation import CABasicServiceLDM @@ -16,7 +24,7 @@ class CAMReceptionManagement: """ - This class is responsible for the CAM reception management. + CAM Reception Management — ETSI TS 103 900 V2.2.1 §6.2 / Annex B.3. Attributes ---------- @@ -24,7 +32,7 @@ class CAMReceptionManagement: CAM Coder object. btp_router : BTPRouter BTP Router object. - ca_basic_service_ldm : CABasicServiceLDM + ca_basic_service_ldm : CABasicServiceLDM or None CA Basic Service LDM. """ @@ -34,18 +42,6 @@ def __init__( btp_router: BTPRouter, ca_basic_service_ldm: CABasicServiceLDM | None = None, ) -> None: - """ - Initialize the CAM Reception Management. - - Parameters - ---------- - cam_coder : CAMCoder - CAM Coder object. - btp_router : BTPRouter - BTP Router object. - ldm: LDMFacility - Local Dynamic Map where the data will be stashed. - """ self.logging = logging.getLogger("ca_basic_service") self.cam_coder = cam_coder @@ -54,27 +50,61 @@ def __init__( port=2001, callback=self.reception_callback ) self.ca_basic_service_ldm = ca_basic_service_ldm + self._application_callbacks: list[Callable[[dict], None]] = [] + + def add_application_callback(self, callback: Callable[[dict], None]) -> None: + """ + Register an application callback (IF.CAM — §6.2). + + The callback receives the decoded CAM dict (with an added + ``utc_timestamp`` key) whenever a valid CAM is received. + + Parameters + ---------- + callback : callable + Function accepting a single CAM dict argument. + """ + self._application_callbacks.append(callback) def reception_callback(self, btp_indication: BTPDataIndication) -> None: """ - Callback for the reception of a CAM. + BTP indication callback for received CAMs. + + Decoding exceptions are caught here so that the LDM and application + layers are never updated with malformed data (Annex B.3.3.1). Parameters ---------- btp_indication : BTPDataIndication - BTP Data Indication. + BTP Data Indication carrying the raw CAM payload. """ - cam = self.cam_coder.decode(btp_indication.data) + try: + cam = self.cam_coder.decode(btp_indication.data) + except Exception: + self.logging.exception( + "CAM decoding failed (Annex B.3.3.1) — discarding packet" + ) + return + generation_delta_time = GenerationDeltaTime( msec=cam["cam"]["generationDeltaTime"] ) utc_timestamp = generation_delta_time.as_timestamp_in_certain_point( - int(TimeService.time()*1000)) + int(TimeService.time() * 1000) + ) cam["utc_timestamp"] = utc_timestamp + if self.ca_basic_service_ldm is not None: self.ca_basic_service_ldm.add_provider_data_to_ldm(cam) + + for cb in self._application_callbacks: + try: + cb(cam) + except Exception: + self.logging.exception("Application CAM callback raised an exception") + self.logging.info( - "Received CAM with timestamp: %s, station_id: %s", + "Received CAM: generationDeltaTime=%s, stationId=%s", cam["cam"]["generationDeltaTime"], cam["header"]["stationId"], ) diff --git a/src/flexstack/facilities/ca_basic_service/cam_transmission_management.py b/src/flexstack/facilities/ca_basic_service/cam_transmission_management.py index 8ae94e4..e87ef8c 100644 --- a/src/flexstack/facilities/ca_basic_service/cam_transmission_management.py +++ b/src/flexstack/facilities/ca_basic_service/cam_transmission_management.py @@ -1,12 +1,30 @@ """ CAM Transmission Management -This file implements the CAM Transmission Management required by the CAM Basic Service. +This file implements the CAM Transmission Management required by the CAM Basic Service, +strictly following ETSI TS 103 900 V2.2.1 (2025-02). + +Key behavioural changes versus the pre-standard implementation: + - Timer-based (T_CheckCamGen) instead of GPS-callback-reactive (§6.1.3, Annex B). + - T_GenCam is initialised to T_GenCamMax (not T_GenCamMin) as mandated by §6.1.3. + - Condition 1 (dynamics: heading/position/speed) and Condition 2 (time) are both + evaluated on every T_CheckCamGen tick. + - N_GenCam counter resets T_GenCam to T_GenCamMax after N_GenCam consecutive + condition-1 CAMs. + - Low-Frequency, Special-Vehicle, Very-Low-Frequency and Two-Wheeler extension + containers are included according to §6.1.3. + - CAM construction failures are handled per Annex B.2.5 (skip, continue timer). + - GN max packet lifetime set to 1000 ms per §5.3.4.1. + - CA service start/stop correspond to ITS-S activation/deactivation per §6.1.2. """ from __future__ import annotations -from math import trunc + import logging +import random +import threading +from math import atan2, cos, radians, sin, sqrt, trunc +from typing import Optional from dateutil import parser from dataclasses import dataclass, field from .cam_coder import CAMCoder @@ -18,19 +36,51 @@ CommunicationProfile, TrafficClass, ) -from ...utils.time_service import ITS_EPOCH_MS, ELAPSED_MILLISECONDS +from ...security.security_profiles import SecurityProfile +from ...utils.time_service import ITS_EPOCH_MS, ELAPSED_MILLISECONDS, TimeService from .cam_ldm_adaptation import CABasicServiceLDM -T_GEN_CAM_MIN = 100 # T_GenCamMin [in ms] -T_GEN_CAM_MAX = 1000 # ms -T_CHECK_CAM_GEN = ( - # T_CheckCamGen [in ms] Shall be equal to or less than T_GenCamMin - T_GEN_CAM_MIN -) -T_GEN_CAM_DCC = ( - # T_GenCam_DCC [in ms] T_GenCamMin ≤ T_GenCam_DCC ≤ T_GenCamMax - T_GEN_CAM_MIN -) +# --------------------------------------------------------------------------- +# Timing constants (ETSI TS 103 900 V2.2.1 §6.1.3) +# --------------------------------------------------------------------------- +T_GEN_CAM_MIN = 100 # T_GenCamMin [ms] +T_GEN_CAM_MAX = 1000 # T_GenCamMax [ms] +T_CHECK_CAM_GEN = T_GEN_CAM_MIN # T_CheckCamGen ≤ T_GenCamMin [ms] +T_GEN_CAM_DCC = T_GEN_CAM_MIN # T_GenCam_DCC ∈ [T_GenCamMin, T_GenCamMax] [ms] + +# --------------------------------------------------------------------------- +# Optional-container intervals (§6.1.3) +# --------------------------------------------------------------------------- +N_GEN_CAM_DEFAULT = 3 # N_GenCam: max consecutive high-dynamic CAMs +T_GEN_CAM_LF_MS = 500 # Low-frequency container minimum interval [ms] +T_GEN_CAM_SPECIAL_MS = 500 # Special-vehicle container minimum interval [ms] +T_GEN_CAM_VLF_MS = 10_000 # Very-low-frequency container minimum interval [ms] + +# --------------------------------------------------------------------------- +# Station types that must include the Two-Wheeler extension container (§6.1.3) +# cyclist(2), moped(3), motorcycle(4) +# --------------------------------------------------------------------------- +TWO_WHEELER_STATION_TYPES: frozenset = frozenset({2, 3, 4}) + +# --------------------------------------------------------------------------- +# VehicleRole enumeration names (index = integer value, §6.1.3 / CDD) +# --------------------------------------------------------------------------- +_VEHICLE_ROLE_NAMES = [ + "default", "publicTransport", "specialTransport", "dangerousGoods", + "roadWork", "rescue", "emergency", "safetyCar", + "agricultural", "commercial", "military", "roadOperator", + "taxi", "reserved1", "reserved2", "reserved3", +] + + +def _haversine_m(lat1: float, lon1: float, lat2: float, lon2: float) -> float: + """Return the great-circle distance in metres between two WGS-84 points.""" + R = 6_371_000.0 + dlat = radians(lat2 - lat1) + dlon = radians(lon2 - lon1) + a = (sin(dlat / 2) ** 2 + + cos(radians(lat1)) * cos(radians(lat2)) * sin(dlon / 2) ** 2) + return R * 2 * atan2(sqrt(a), sqrt(max(0.0, 1.0 - a))) @dataclass(frozen=True) @@ -50,6 +100,15 @@ class VehicleData: Vehicle Length as specified in ETSI TS 102 894-2 V2.3.1 (2024-08). vehicle_width : int Vehicle Width as specified in ETSI TS 102 894-2 V2.3.1 (2024-08). + vehicle_role : int + VehicleRole (0=default). Used in the Low-Frequency container and to + decide whether a Special-Vehicle container is required (§6.1.3). + exterior_lights : bytes + ExteriorLights BIT STRING (SIZE(8)). One byte; bits ordered MSB→LSB + correspond to lowBeam(0)…parkingLights(7). Default = all off. + special_vehicle_data : dict or None + Special vehicle container data (CHOICE value dict), e.g. + ``("emergencyContainer", {...})``. None if not applicable. """ station_id: int = 0 @@ -62,6 +121,9 @@ class VehicleData: } ) vehicle_width: int = 62 + vehicle_role: int = 0 + exterior_lights: bytes = field(default=b"\x00") + special_vehicle_data: Optional[dict] = None def __check_valid_station_id(self) -> None: if self.station_id < 0 or self.station_id > 4294967295: @@ -86,12 +148,22 @@ def __check_valid_vehicle_width(self) -> None: if self.vehicle_width < 0 or self.vehicle_width > 62: raise ValueError("Vehicle width must be between 0 and 62") + def __check_valid_vehicle_role(self) -> None: + if self.vehicle_role < 0 or self.vehicle_role > 15: + raise ValueError("vehicle_role must be between 0 and 15") + + def __check_valid_exterior_lights(self) -> None: + if len(self.exterior_lights) < 1: + raise ValueError("exterior_lights must be at least 1 byte") + def __post_init__(self) -> None: self.__check_valid_station_id() self.__check_valid_station_type() self.__check_valid_drive_direction() self.__check_valid_vehicle_length() self.__check_valid_vehicle_width() + self.__check_valid_vehicle_role() + self.__check_valid_exterior_lights() @dataclass(frozen=True) @@ -545,32 +617,30 @@ def __str__(self) -> str: class CAMTransmissionManagement: """ - CAM Transmission Management class. - This sub-function ahould implement the protocol operation of the originating ITS-S, as specified - in ETSI TS 102 894-2 V2.3.1 (2024-08) clause C.2, including in particular: - - Activation and termination of CAM transmission operation. - - Determination of the CAM generation frequency. - - Trigger the generation of CAM. + CAM Transmission Management — ETSI TS 103 900 V2.2.1 §6.1. - By now, it implements the same algorithms but being reactive to when a new position is received. + Protocol operation is timer-based (T_CheckCamGen) rather than + GPS-callback-reactive. Call :meth:`start` to activate the service and + :meth:`stop` to deactivate it (§6.1.2). + + The :meth:`location_service_callback` only updates the current position + cache; the T_CheckCamGen timer evaluates CAM generation conditions on + every tick (§6.1.3, Annex B.2.4). Attributes ---------- btp_router : BTPRouter BTP Router. vehicle_data : VehicleData - Vehicle Data. + Vehicle Data (static parameters). cam_coder : CAMCoder - CAM Coder. - ca_basic_service_ldm : CABasicServiceLDM - CA Basic Service LDM. + CAM encoder/decoder. + ca_basic_service_ldm : CABasicServiceLDM or None + Local Dynamic Map adapter; may be None. t_gen_cam : int - Time between CAM generations. - last_cam_sent : CooperativeAwarenessMessage - Last CAM sent. - current_cam_to_send : CooperativeAwarenessMessage - Current CAM to send. - + Current T_GenCam upper bound [ms]. Starts at T_GenCamMax per §6.1.3. + last_cam_generation_delta_time : GenerationDeltaTime or None + GenerationDeltaTime of the most recently sent CAM (legacy attribute). """ def __init__( @@ -578,68 +648,351 @@ def __init__( btp_router: BTPRouter, cam_coder: CAMCoder, vehicle_data: VehicleData, - ca_basic_service_ldm: CABasicServiceLDM | None = None, + ca_basic_service_ldm: Optional[CABasicServiceLDM] = None, ) -> None: - """ - Initialize the CAM Transmission Management. - """ self.logging = logging.getLogger("ca_basic_service") self.btp_router: BTPRouter = btp_router self.vehicle_data = vehicle_data self.cam_coder = cam_coder self.ca_basic_service_ldm = ca_basic_service_ldm - # self.T_GenCam_DCC = T_GenCamMin We don't have a DCC yet. - self.t_gen_cam = T_GEN_CAM_MIN - self.last_cam_generation_delta_time: GenerationDeltaTime | None = None + + # §6.1.3 — T_GenCam starts at T_GenCamMax (not T_GenCamMin!) + self.t_gen_cam: int = T_GEN_CAM_MAX + self._n_gen_cam_counter: int = 0 # consecutive condition-1 CAMs + + # Dynamics state of the last transmitted CAM + self._last_cam_time_ms: Optional[int] = None + self._last_cam_heading: Optional[float] = None # degrees + self._last_cam_lat: Optional[float] = None + self._last_cam_lon: Optional[float] = None + self._last_cam_speed: Optional[float] = None # m/s + + # Container timing state + self._cam_count: int = 0 # CAMs sent since start() + self._last_lf_time_ms: Optional[int] = None + self._last_vlf_time_ms: Optional[int] = None + self._last_special_time_ms: Optional[int] = None + + # Path history for Low-Frequency container (§6.1.3). + # Stored as list of (lat, lon, time_ms) oldest→newest; max 40 entries. + self._path_history: list = [] + + # Current GPS/position data from the location service + self._current_tpv: Optional[dict] = None + self._tpv_lock = threading.Lock() + + # T_CheckCamGen timer + self._active: bool = False + self._timer: Optional[threading.Timer] = None + + # Legacy compatibility attribute + self.last_cam_generation_delta_time: Optional[GenerationDeltaTime] = None + + # ------------------------------------------------------------------ + # Service lifecycle (§6.1.2) + # ------------------------------------------------------------------ + + def start(self) -> None: + """Activate the CA service. Starts the T_CheckCamGen timer (§6.1.2).""" + if self._active: + return + self._active = True + # Reset per-activation state + self._cam_count = 0 + self._last_cam_time_ms = None + self._last_cam_heading = None + self._last_cam_lat = None + self._last_cam_lon = None + self._last_cam_speed = None + self._last_lf_time_ms = None + self._last_vlf_time_ms = None + self._last_special_time_ms = None + self._path_history.clear() + self.t_gen_cam = T_GEN_CAM_MAX + self._n_gen_cam_counter = 0 + # Annex B.2.4 step 1 — non-clock-synchronised start (random initial delay) + initial_delay_s = random.uniform(0.0, T_CHECK_CAM_GEN / 1000.0) + self._schedule_next_check(initial_delay_s) + + def stop(self) -> None: + """Deactivate the CA service. Cancels the T_CheckCamGen timer (§6.1.2).""" + self._active = False + if self._timer is not None: + self._timer.cancel() + self._timer = None + + # ------------------------------------------------------------------ + # Location service integration + # ------------------------------------------------------------------ def location_service_callback(self, tpv: dict) -> None: """ - Callback function for location service. - - The Cooperative Awareness Service gets triggered everytime the location service gets a - new position. - - TODO: Once the DCC is implemented, all conditions should be checked before sending a CAM. - 1) The time elapsed since the last CAM generation is equal to or greater than T_GenCam_Dcc, - as applicable, and one of the following ITS-S dynamics related conditions is given: - - the absolute difference between the current heading of the originating ITS-S and the - heading included in the CAM previously transmitted by the originating ITS-S exceeds 4°; - - the distance between the current position of the originating ITS-S and the position - included in the CAM previously transmitted by the originating ITS-S exceeds 4 m; - - the absolute difference between the current speed of the originating ITS-S and the - speed included in the CAM previously transmitted by the originating ITS-S exceeds - 0,5 m/s. - 2) The time elapsed since the last CAM generation is equal to or greater than T_GenCam and, - in the case of ITS-G5, is also equal to or greater than T_GenCam_Dcc. - If one of the above two conditions is satisfied, a CAM shall be generated immediately. + Cache the latest position data (§6.1.3). + + This method no longer triggers CAM generation directly. The + T_CheckCamGen timer evaluates generation conditions at each tick. Parameters ---------- tpv : dict - GPSD TP + GPSD TPV message or compatible position dict. + """ + with self._tpv_lock: + self._current_tpv = tpv + + # ------------------------------------------------------------------ + # Timer callbacks (Annex B.2.4) + # ------------------------------------------------------------------ + + def _schedule_next_check(self, delay_s: Optional[float] = None) -> None: + """Schedule the next T_CheckCamGen evaluation.""" + if not self._active: + return + if delay_s is None: + delay_s = T_CHECK_CAM_GEN / 1000.0 + self._timer = threading.Timer(delay_s, self._check_cam_conditions) + self._timer.daemon = True + self._timer.start() + + def _check_cam_conditions(self) -> None: + """ + T_CheckCamGen expiry callback (Annex B.2.4 steps 2–7). + + Evaluates conditions 1 and 2, generates and sends a CAM if either is + satisfied, then reschedules the timer. + """ + if not self._active: + return + try: + self._evaluate_and_maybe_send() + finally: + # Annex B.2.4 step 8 — always restart T_CheckCamGen + self._schedule_next_check() + + def _evaluate_and_maybe_send(self) -> None: + """Evaluate CAM generation conditions and send if required.""" + with self._tpv_lock: + tpv = self._current_tpv + if tpv is None: + return + + now_ms = int(TimeService.time() * 1000) + + # First CAM after activation — send immediately (no elapsed constraint) + if self._last_cam_time_ms is None: + self._generate_and_send_cam(tpv, now_ms, condition=1) + return + + elapsed_ms = now_ms - self._last_cam_time_ms + + # Condition 1 (§6.1.3): elapsed ≥ T_GenCam_DCC AND dynamics changed + if elapsed_ms >= T_GEN_CAM_DCC and self._check_dynamics(tpv): + self._generate_and_send_cam(tpv, now_ms, condition=1) + return + + # Condition 2 (§6.1.3): elapsed ≥ T_GenCam AND elapsed ≥ T_GenCam_DCC + if elapsed_ms >= self.t_gen_cam and elapsed_ms >= T_GEN_CAM_DCC: + self._generate_and_send_cam(tpv, now_ms, condition=2) + + # ------------------------------------------------------------------ + # Dynamics check — §6.1.3 Condition 1 + # ------------------------------------------------------------------ + + def _check_dynamics(self, tpv: dict) -> bool: + """ + Return True if at least one dynamics threshold is exceeded. + + Thresholds (§6.1.3): + * |Δheading| > 4° + * |Δposition| > 4 m (haversine) + * |Δspeed| > 0,5 m/s + """ + if self._last_cam_heading is None: + return True # No reference — treat as changed + + # Heading + if "track" in tpv: + diff = abs(tpv["track"] - self._last_cam_heading) + if diff > 180.0: + diff = 360.0 - diff + if diff > 4.0: + return True + + # Position + if ("lat" in tpv and "lon" in tpv + and self._last_cam_lat is not None + and self._last_cam_lon is not None): + if _haversine_m(self._last_cam_lat, self._last_cam_lon, + tpv["lat"], tpv["lon"]) > 4.0: + return True + + # Speed + if "speed" in tpv and self._last_cam_speed is not None: + if abs(tpv["speed"] - self._last_cam_speed) > 0.5: + return True + + return False + + # ------------------------------------------------------------------ + # Optional container inclusion rules (§6.1.3) + # ------------------------------------------------------------------ + + def _should_include_lf(self, now_ms: int) -> bool: + """Low-Frequency container: first CAM, then every ≥ 500 ms.""" + if self._cam_count == 0: + return True + if self._last_lf_time_ms is None: + return True + return (now_ms - self._last_lf_time_ms) >= T_GEN_CAM_LF_MS + + def _should_include_special_vehicle(self, now_ms: int) -> bool: + """Special-Vehicle container: first CAM (if role ≠ default), then ≥ 500 ms.""" + if self.vehicle_data.vehicle_role == 0: + return False + if self._cam_count == 0: + return True + if self._last_special_time_ms is None: + return True + return (now_ms - self._last_special_time_ms) >= T_GEN_CAM_SPECIAL_MS + + def _should_include_vlf( + self, now_ms: int, include_lf: bool, include_special: bool + ) -> bool: + """ + Very-Low-Frequency extension container (§6.1.3): + - Second CAM after activation (cam_count == 1). + - After that: ≥ 10 s elapsed AND LF/special containers NOT included. """ + if self._cam_count == 1: + return True + if self._last_vlf_time_ms is None: + return False + return ( + (now_ms - self._last_vlf_time_ms) >= T_GEN_CAM_VLF_MS + and not include_lf + and not include_special + ) + + def _should_include_two_wheeler(self) -> bool: + """Two-Wheeler extension container in ALL CAMs for cyclist/moped/motorcycle.""" + return self.vehicle_data.station_type in TWO_WHEELER_STATION_TYPES + + # ------------------------------------------------------------------ + # Low-Frequency container helpers + # ------------------------------------------------------------------ + + def _build_lf_container(self, tpv: dict) -> dict: + """Build the BasicVehicleContainerLowFrequency dict.""" + role_idx = self.vehicle_data.vehicle_role + role_name = ( + _VEHICLE_ROLE_NAMES[role_idx] + if 0 <= role_idx < len(_VEHICLE_ROLE_NAMES) + else "default" + ) + return { + "vehicleRole": role_name, + "exteriorLights": (self.vehicle_data.exterior_lights, 8), + "pathHistory": self._get_path_history(tpv), + } + + def _get_path_history(self, current_tpv: dict) -> list: + """ + Convert the stored path history to a list of PathPoint dicts relative + to the current position. Entries outside the DeltaLatitude/DeltaLongitude + valid range (-131071..131072) are dropped. + """ + current_lat = current_tpv.get("lat") + current_lon = current_tpv.get("lon") + if current_lat is None or current_lon is None or not self._path_history: + return [] + + now_ms = int(TimeService.time() * 1000) + result = [] + for h_lat, h_lon, h_time_ms in reversed(self._path_history): + delta_lat = round((h_lat - current_lat) * 10_000_000) + delta_lon = round((h_lon - current_lon) * 10_000_000) + if not (-131071 <= delta_lat <= 131072): + break + if not (-131071 <= delta_lon <= 131072): + break + delta_time_10ms = max(1, min(65534, round((now_ms - h_time_ms) / 10))) + result.append({ + "pathPosition": { + "deltaLatitude": delta_lat, + "deltaLongitude": delta_lon, + "deltaAltitude": 12800, # unavailable + }, + "pathDeltaTime": delta_time_10ms, + }) + if len(result) >= 23: # ASN.1 WITH COMPONENTS limit in LF container + break + return result + + # ------------------------------------------------------------------ + # CAM generation and transmission (Annex B.2.4/B.2.5) + # ------------------------------------------------------------------ + + def _generate_and_send_cam( + self, tpv: dict, now_ms: int, condition: int + ) -> None: + """ + Build the CAM, encode it and transmit it via BTP (Annex B.2.4 steps 3–6). + + If CAM construction or encoding fails (Annex B.2.5) the transmission is + skipped and the timer continues. + """ + elapsed_ms = ( + (now_ms - self._last_cam_time_ms) + if self._last_cam_time_ms is not None + else 0 + ) + + include_lf = self._should_include_lf(now_ms) + include_special = self._should_include_special_vehicle(now_ms) + include_vlf = self._should_include_vlf(now_ms, include_lf, include_special) + include_two_wheeler = self._should_include_two_wheeler() + + # Build the CAM PDU cam = CooperativeAwarenessMessage() cam.fullfill_with_vehicle_data(self.vehicle_data) cam.fullfill_with_tpv_data(tpv) - if self.last_cam_generation_delta_time is None: + if include_lf: + cam.cam["cam"]["camParameters"]["lowFrequencyContainer"] = ( + "basicVehicleContainerLowFrequency", + self._build_lf_container(tpv), + ) + + if include_special and self.vehicle_data.special_vehicle_data is not None: + cam.cam["cam"]["camParameters"]["specialVehicleContainer"] = ( + self.vehicle_data.special_vehicle_data + ) + + extension_containers = [] + if include_two_wheeler: + tw_bytes = self.cam_coder.encode_extension_container(1, {}) + extension_containers.append({"containerId": 1, "containerData": tw_bytes}) + if include_vlf: + vlf_bytes = self.cam_coder.encode_extension_container(3, {}) + extension_containers.append({"containerId": 3, "containerData": vlf_bytes}) + if extension_containers: + cam.cam["cam"]["camParameters"]["extensionContainers"] = extension_containers + + # Annex B.2.5 — construction exception: skip this transmission + try: self._send_cam(cam) + except Exception: + self.logging.exception( + "CAM construction or encoding failed (Annex B.2.5) — skipping" + ) return - received_generation_delta_time = GenerationDeltaTime.from_timestamp( - parser.parse(tpv["time"]).timestamp() - ) - if ( - received_generation_delta_time - self.last_cam_generation_delta_time - >= self.t_gen_cam - ): - self._send_cam(cam) + + # Update state after successful transmission (Annex B.2.4 step 5) + self._update_send_state(tpv, now_ms, elapsed_ms, condition, + include_lf, include_special, include_vlf) def _send_cam(self, cam: CooperativeAwarenessMessage) -> None: - """ - Send the next CAM. - """ - if self.ca_basic_service_ldm is not None: - self.ca_basic_service_ldm.add_provider_data_to_ldm(cam.cam) + """Encode and transmit a CAM PDU via BTP-B/SHB (§5.3.4.1).""" data = self.cam_coder.encode(cam.cam) request = BTPDataRequest( btp_type=CommonNH.BTP_B, @@ -647,17 +1000,78 @@ def _send_cam(self, cam: CooperativeAwarenessMessage) -> None: gn_packet_transport_type=PacketTransportType(), communication_profile=CommunicationProfile.UNSPECIFIED, traffic_class=TrafficClass(), + gn_max_packet_lifetime=1.0, # §5.3.4.1: max 1000 ms + security_profile=SecurityProfile.COOPERATIVE_AWARENESS_MESSAGE, + its_aid=36, data=data, length=len(data), ) - self.btp_router.btp_data_request(request) + if self.ca_basic_service_ldm is not None: + self.ca_basic_service_ldm.add_provider_data_to_ldm(cam.cam) self.logging.info( - "Sent CAM message with timestamp: %d, station_id: %d", + "Sent CAM: generationDeltaTime=%d, stationId=%d", cam.cam["cam"]["generationDeltaTime"], cam.cam["header"]["stationId"], ) + # ------------------------------------------------------------------ + # Post-transmission state update (§6.1.3 T_GenCam management) + # ------------------------------------------------------------------ + + def _update_send_state( + self, + tpv: dict, + now_ms: int, + elapsed_ms: int, + condition: int, + include_lf: bool, + include_special: bool, + include_vlf: bool, + ) -> None: + """Update all state variables after a successful CAM transmission.""" + # T_GenCam management (§6.1.3 Annex B.2.4 step 5) + if condition == 1: + # Set T_GenCam to elapsed time (clamped to [T_GenCamMin, T_GenCamMax]) + self.t_gen_cam = max(T_GEN_CAM_MIN, min(T_GEN_CAM_MAX, elapsed_ms)) + self._n_gen_cam_counter += 1 + if self._n_gen_cam_counter >= N_GEN_CAM_DEFAULT: + self.t_gen_cam = T_GEN_CAM_MAX + self._n_gen_cam_counter = 0 + else: + self._n_gen_cam_counter = 0 + self.t_gen_cam = T_GEN_CAM_MAX + + # Update last-CAM dynamics reference + self._last_cam_time_ms = now_ms + if "track" in tpv: + self._last_cam_heading = tpv["track"] + if "lat" in tpv and "lon" in tpv: + self._last_cam_lat = tpv["lat"] + self._last_cam_lon = tpv["lon"] + if "speed" in tpv: + self._last_cam_speed = tpv["speed"] + + # Update path history (add current position) + if "lat" in tpv and "lon" in tpv: + self._path_history.append((tpv["lat"], tpv["lon"], now_ms)) + if len(self._path_history) > 40: + self._path_history.pop(0) + + # Update container timing + if include_lf: + self._last_lf_time_ms = now_ms + if include_special: + self._last_special_time_ms = now_ms + if include_vlf: + self._last_vlf_time_ms = now_ms + + self._cam_count += 1 + + # Legacy compatibility self.last_cam_generation_delta_time = GenerationDeltaTime( - msec=cam.cam["cam"]["generationDeltaTime"] + msec=int( + (TimeService.time() * 1000 - ITS_EPOCH_MS + ELAPSED_MILLISECONDS) + % 65536 + ) ) diff --git a/src/flexstack/facilities/ca_basic_service/readme.md b/src/flexstack/facilities/ca_basic_service/readme.md index 4a9ad39..910f553 100644 --- a/src/flexstack/facilities/ca_basic_service/readme.md +++ b/src/flexstack/facilities/ca_basic_service/readme.md @@ -2,4 +2,4 @@ The present CA Basic Service is implemented following the **ETSI EN 302 637-2 V1.4.1 (2019-04)** standard. -And renewed to the second release following the **ETSI TS 103 900 V2.1.1 (2023-11)**. \ No newline at end of file +And renewed to the second release following the **ETSI TS 103 900 V2.2.1 (2025-02)**. \ No newline at end of file diff --git a/src/flexstack/facilities/decentralized_environmental_notification_service/denm_transmission_management.py b/src/flexstack/facilities/decentralized_environmental_notification_service/denm_transmission_management.py index e7b440b..9302717 100644 --- a/src/flexstack/facilities/decentralized_environmental_notification_service/denm_transmission_management.py +++ b/src/flexstack/facilities/decentralized_environmental_notification_service/denm_transmission_management.py @@ -8,6 +8,7 @@ from .denm_coder import DENMCoder from ...btp.router import Router as BTPRouter from ...btp.service_access_point import BTPDataRequest, CommonNH, CommunicationProfile +from ...security.security_profiles import SecurityProfile from ...applications.road_hazard_signalling_service.service_access_point import ( DENRequest, ) @@ -263,6 +264,8 @@ def transmit_denm( ]["longitude"], ), communication_profile=CommunicationProfile.UNSPECIFIED, + security_profile=SecurityProfile.DECENTRALIZED_ENVIRONMENTAL_NOTIFICATION_MESSAGE, + its_aid=37, data=data, length=len(data), ) diff --git a/src/flexstack/facilities/vru_awareness_service/vam_constants.py b/src/flexstack/facilities/vru_awareness_service/vam_constants.py index 3916fb3..8c9cfdb 100644 --- a/src/flexstack/facilities/vru_awareness_service/vam_constants.py +++ b/src/flexstack/facilities/vru_awareness_service/vam_constants.py @@ -1,32 +1,142 @@ """ -Constants extracted from:ETSI TS 103 300-3 V2.2.1 (2023-02) +Constants extracted from: ETSI TS 103 300-3 V2.3.1 (2025-12) + +Table 16: Parameters for VAM generation in case of using direct communications (clause 6.2) Table 17: Parameters for VAM generation triggering (clause 6.4) +Table 14: Parameters for VRU clustering decisions (clause 5.4.2) +Table 15: Cluster membership parameters (clause 5.4.2) -Parametrs: minimumSafeLateralDistance and minimumSafeLongitudinalDistance are - not defined here, as they are not static(they depend on the VRU speed) +Note: minimumSafeLateralDistance and minimumSafeLongitudinalDistance are + not static constants — they depend on the VRU speed and are therefore + not defined here (see ETSI TS 103 300-2 clause 6.5.10.5). """ -T_GENVAMMIN = 100 # ms -T_GENVAMMAX = 5000 # ms -T_CHECKVAMGEN = T_GENVAMMIN # ms Shall be equal to or less than T_GenvamMin -T_GENVAM_DCC = T_GENVAMMIN # ms T_GenvamMin ≤ T_Genvam_DCC ≤ T_GenvamMax + +# --------------------------------------------------------------------------- +# Table 16 – VAM generation parameters (clause 6.2) +# --------------------------------------------------------------------------- + +#: Minimum time between consecutive VAM generation events [ms]. +T_GENVAMMIN = 100 + +#: Maximum time between consecutive VAM generation events [ms]. +T_GENVAMMAX = 5000 + +#: Minimum time between consecutive LF-container inclusions [ms]. +#: The LF container is also included in the first VAM and whenever a cluster +#: operation container is present (spec clause 6.2). +T_GENVAM_LFMIN = 2000 + +#: VAM generation-check period; shall be ≤ T_GenVamMin [ms]. +T_CHECKVAMGEN = T_GENVAMMIN + +#: DCC-provided inter-VAM gap; T_GenVamMin ≤ T_GenVam_DCC ≤ T_GenVamMax [ms]. +T_GENVAM_DCC = T_GENVAMMIN + +#: Maximum time allowed for assembling a VAM packet in the facilities layer [ms]. +T_ASSEMBLEVAM = 50 + +# --------------------------------------------------------------------------- +# Table 17 – VAM generation triggering thresholds (clause 6.4) +# --------------------------------------------------------------------------- + +#: Minimum Euclidean position change to trigger a new VAM [m]. MINREFERENCEPOINTPOSITIONCHANGETHRESHOLD = 4 + +#: Minimum ground-speed change to trigger a new VAM [m/s]. MINGROUNDSPEEDCHANGETHRESHOLD = 0.5 + +#: Minimum heading-vector orientation change to trigger a new VAM [degrees]. MINGROUNDVELOCITYORIENTATIONCHANGETHRESHOLD = 4 + +#: Minimum trajectory-interception probability change to trigger a new VAM [%]. MINTRAJECTORYINTERCEPTIONPROBCHANGETHRESHOLD = 10 -NUMSKIPVAMSFORREDUNDANCYMITIGATION = 2 # Value can range from 2-10 + +#: Maximum number of consecutive VAMs that may be skipped for redundancy +#: mitigation (range 2-10 per spec Table 17). +NUMSKIPVAMSFORREDUNDANCYMITIGATION = 2 + +#: Minimum cluster bounding-box distance change to trigger a new cluster VAM [m]. MINCLUSTERDISTANCECHANGETHRESHOLD = 2 + +#: Minimum safe vertical distance between ego-VRU and another participant [m]. MINIMUMSAFEVERTICALDISTANCE = 5 -""" -From ETSI TS 103 300-2 V2.1.1 (2020-05), page 47; -The VRU Basic Service shall interact with the VRU profile management entity in - the management layer to learn -whether the ITS-S has the VRU role activated. +# --------------------------------------------------------------------------- +# Table 14 – VRU clustering decision parameters (clause 5.4.2) +# --------------------------------------------------------------------------- -TODO: Create VRU Profile Mangement +#: Minimum number of nearby VRU devices needed before a potential cluster +#: leader will create a cluster (recommended range: 3–5). +NUM_CREATE_CLUSTER = 3 -Since it's not yet created the values will be declared here -""" +#: Maximum distance between a VRU and the cluster edge for joining/creation [m] +#: (recommended range: 3–5 m). +MAX_CLUSTER_DISTANCE = 5 + +#: Maximum relative speed difference within a cluster expressed as a fraction +#: (5 % per spec). +MAX_CLUSTER_VELOCITY_DIFFERENCE = 0.05 + +#: Maximum distance for a *combined* VRU cluster (recommended range: 1–2 m). +MAX_COMBINED_CLUSTER_DISTANCE = 2 + +#: Initial cluster cardinality size set immediately after cluster creation. +MIN_CLUSTER_SIZE = 1 + +#: Maximum cluster cardinality (number of active ITS-S). +MAX_CLUSTER_SIZE = 20 + +#: Number of VAMs with former identifiers to transmit after a cancelled- or +#: failed-join before resuming pseudonymisation. +NUM_CLUSTER_VAM_REPEAT = 3 + +# --------------------------------------------------------------------------- +# Table 15 – Cluster membership timing parameters (clause 5.4.2) [seconds] +# --------------------------------------------------------------------------- + +#: Cluster IDs received within this window must not be reused by a new leader. +TIME_CLUSTER_UNIQUENESS_THRESHOLD = 30.0 + +#: Duration for which the breakup indication is broadcast before disbanding. +TIME_CLUSTER_BREAKUP_WARNING = 3.0 + +#: Duration for which the join intention is advertised in individual VAMs. +TIME_CLUSTER_JOIN_NOTIFICATION = 3.0 + +#: Time the joining VRU waits for the leader to acknowledge membership. +TIME_CLUSTER_JOIN_SUCCESS = 0.5 + +#: Duration for which the cluster-ID change intent is advertised. +TIME_CLUSTER_ID_CHANGE_NOTIFICATION = 3.0 + +#: After a cluster-ID change, the old ID is valid in leave indications for +#: this long. +TIME_CLUSTER_ID_PERSIST = 3.0 + +#: If no cluster VAM arrives within this window, the leader is assumed lost. +TIME_CLUSTER_CONTINUITY = 2.0 + +#: Duration for which the leave indication is included in individual VAMs +#: after leaving a cluster. +TIME_CLUSTER_LEAVE_NOTIFICATION = 1.0 + +#: Window during which a combined-VRU cluster opportunity is advertised. +TIME_COMBINED_VRU_CLUSTER_OPPORTUNITY = 15.0 + +# --------------------------------------------------------------------------- +# VRU role (clause 4.2, Table 1) +# --------------------------------------------------------------------------- + +#: The device user is considered a VRU. +VRU_ROLE_ON = "VRU_ROLE_ON" + +#: The device user is NOT considered a VRU (zero-risk area, e.g. inside a bus). +VRU_ROLE_OFF = "VRU_ROLE_OFF" + +# --------------------------------------------------------------------------- +# Default VRU profile (informative placeholder; real value from VRU profile +# management entity per ETSI TS 103 300-2). +# --------------------------------------------------------------------------- VRU_PROFILE = { "Type": "Cyclist", @@ -37,23 +147,3 @@ "TrajectoryAmbiguity": "Medium", "ClusterSize": 1, } - -""" -ETSI TS 103 300-3 V2.2.1 (2023-02) - 5.4.1 VRU clustering functional overview -States: The support of the clustering function is optional in the VBS for all -VRU profiles. -however the same document in section C.2.3 Protocol data, states; -The VRU Basic Service (VBS) stores at least the following information for the -VAM originating ITS-S operation: -VAM generation time; -• ITS-S position as included in VAM; -• ITS-S speed as included in VAM; -• ITS-S heading as included in VAM; -• VRU role; -• VRU profile; -• VBS cluster state. -VRU role, VRU profile nad VBS cluster state will be hardcoded (VRU_Profile already is). -""" - -VRU_ROLE = "VRU_ROLE_ON" -VRU_CLUSTER_STATE = "VRU-ACTIVE-STANDALONE" diff --git a/src/flexstack/facilities/vru_awareness_service/vam_reception_management.py b/src/flexstack/facilities/vru_awareness_service/vam_reception_management.py index a51755a..fbc3da9 100644 --- a/src/flexstack/facilities/vru_awareness_service/vam_reception_management.py +++ b/src/flexstack/facilities/vru_awareness_service/vam_reception_management.py @@ -1,10 +1,12 @@ from __future__ import annotations import logging +from typing import Optional from .vam_coder import VAMCoder from ...btp.service_access_point import BTPDataIndication from ...btp.router import Router as BTPRouter from .vam_ldm_adaptation import VRUBasicServiceLDM +from .vru_clustering import VBSClusteringManager from ..ca_basic_service.cam_transmission_management import GenerationDeltaTime from ...utils.time_service import TimeService @@ -25,18 +27,24 @@ def __init__( vam_coder: VAMCoder, btp_router: BTPRouter, vru_basic_service_ldm: VRUBasicServiceLDM | None = None, + clustering_manager: Optional[VBSClusteringManager] = None, ) -> None: """ - Initialize the vam Reception Management. + Initialise the VAM Reception Management. Parameters ---------- - vam_coder : vamCoder - vam Coder object. - btp_router : BTPRouter + vam_coder: + VAM ASN.1 coder. + btp_router: BTP Router. - vru_basic_service_ldm : VRUBasicServiceLDM | None - VRU Basic Service LDM. + vru_basic_service_ldm: + Optional LDM adapter. + clustering_manager: + Optional VBS clustering state machine. When provided, each + received VAM is forwarded to + :meth:`~.vru_clustering.VBSClusteringManager.on_received_vam` + so that nearby-VRU and cluster tables stay up to date. """ self.logging = logging.getLogger("vru_basic_service") self.vam_coder = vam_coder @@ -45,6 +53,7 @@ def __init__( port=2018, callback=self.reception_callback ) self.vru_basic_service_ldm = vru_basic_service_ldm + self.clustering_manager: Optional[VBSClusteringManager] = clustering_manager def reception_callback(self, btp_indication: BTPDataIndication) -> None: """ @@ -62,6 +71,8 @@ def reception_callback(self, btp_indication: BTPDataIndication) -> None: vam["utc_timestamp"] = utc_timestamp if self.vru_basic_service_ldm is not None: self.vru_basic_service_ldm.add_provider_data_to_ldm(vam) + if self.clustering_manager is not None: + self.clustering_manager.on_received_vam(vam) self.logging.debug("Recieved message; %s", vam) self.logging.info( "Recieved VAM message with timestamp: %s, station_id: %s", diff --git a/src/flexstack/facilities/vru_awareness_service/vam_transmission_management.py b/src/flexstack/facilities/vru_awareness_service/vam_transmission_management.py index bcf74b4..0aee51b 100644 --- a/src/flexstack/facilities/vru_awareness_service/vam_transmission_management.py +++ b/src/flexstack/facilities/vru_awareness_service/vam_transmission_management.py @@ -21,7 +21,9 @@ ) from ...utils.time_service import TimeService from ..ca_basic_service.cam_transmission_management import CooperativeAwarenessMessage, GenerationDeltaTime +from ...security.security_profiles import SecurityProfile from . import vam_constants +from .vru_clustering import VBSClusteringManager @dataclass(frozen=True) @@ -492,11 +494,11 @@ def fullfill_high_frequency_container_with_tpv_data(self, tpv: dict) -> None: """ if "track" in tpv.keys(): self.vam["vam"]["vamParameters"]["vruHighFrequencyContainer"]["heading"][ - "headingValue" + "value" ] = int(tpv["track"]*10) if "epd" in tpv.keys(): self.vam["vam"]["vamParameters"]["vruHighFrequencyContainer"]["heading"][ - "headingConfidence" + "confidence" ] = self.create_heading_confidence(tpv["epd"]) if "speed" in tpv.keys(): if int(tpv["speed"] * 100) > 16381: @@ -511,27 +513,31 @@ def fullfill_high_frequency_container_with_tpv_data(self, tpv: dict) -> None: class VAMTransmissionManagement: """ - vam Transmission Management class. - This sub-function implements the protocol operation of the originating ITS-S, as specified in - ETSI TS 103 300-3 V2.2.1 clause 6, including in particular: - - Activation and termination of vam transmission operation. - - Determination of the vam generation frequency. - - Trigger the generation of vam. + VAM Transmission Management. + + Implements the originating-ITS-S protocol operation specified in + ETSI TS 103 300-3 V2.3.1 (2025-12), clause 6, including: + + * Activation and termination of VAM transmission. + * VAM generation frequency determination (Table 16). + * Triggering VAM generation according to conditions 1–4 of clause 6.4.1. + * Periodic inclusion of the VRU Low-Frequency Container (clause 6.2). + * Integration with the VBS clustering state machine (clause 5.4). Attributes ---------- - vam_coder : vamCoder - vam Coder object. - T_Genvam_DCC : int - Time to wait between vams according to the DCC. - T_Genvam : int - Time to wait between vams. - N_Genvam : int - Consecutive vams to be generated. - last_vam_sent : dict - Last vam sent. - current_vam_to_send : dict - Current vam to send. + btp_router: + BTP router used to send the encoded VAM. + device_data_provider: + Static device parameters (station ID, station type, etc.). + vru_basic_service_ldm: + Optional LDM adapter for storing own VAMs. + vam_coder: + ASN.1 encode/decode wrapper. + clustering_manager: + Optional VBS clustering state machine. When provided, VAM + transmission is suppressed in VRU_PASSIVE state and cluster + containers are appended to outgoing VAMs. """ def __init__( @@ -540,15 +546,31 @@ def __init__( vam_coder: VAMCoder, device_data_provider: DeviceDataProvider, vru_basic_service_ldm: VRUBasicServiceLDM | None = None, + clustering_manager: VBSClusteringManager | None = None, ) -> None: """ - Initialize the vam Transmission Management. + Initialise VAM Transmission Management. + + Parameters + ---------- + btp_router: + BTP router instance. + vam_coder: + VAM ASN.1 coder. + device_data_provider: + Immutable device parameters. + vru_basic_service_ldm: + Optional LDM adapter. + clustering_manager: + Optional VBS clustering manager. Pass ``None`` to disable + cluster-aware transmission. """ self.logging = logging.getLogger("vru_basic_service") self.btp_router: BTPRouter = btp_router self.device_data_provider = device_data_provider self.vru_basic_service_ldm = vru_basic_service_ldm self.vam_coder = vam_coder + self.clustering_manager: VBSClusteringManager | None = clustering_manager # self.T_Genvam_DCC = T_GenvamMin We don't have a DCC yet. self.t_genvam = vam_constants.T_GENVAMMIN self.n_genvam = 1 @@ -556,6 +578,11 @@ def __init__( self.last_sent_position: tuple[float, float] = (0.0, 0.0) self.last_vam_info_lock = threading.Lock() self.last_vam_speed: float = 0.0 + self.last_vam_heading: float = 0.0 + #: Time (seconds since epoch) when the LF container was last included. + self.last_lf_vam_time: float | None = None + #: True until the first VAM has been sent; used to force LF inclusion. + self.is_first_vam: bool = True def location_service_callback(self, tpv: dict) -> None: """ @@ -624,6 +651,13 @@ def location_service_callback(self, tpv: dict) -> None: vam_to_send.fullfill_with_tpv_data(tpv) self.logging.debug("Fullfilled VAM with TPV data %s", tpv) + # Suppress individual VAMs when passive (clustering state machine). + if ( + self.clustering_manager is not None + and not self.clustering_manager.should_transmit_vam() + ): + return + if self.last_vam_generation_delta_time is None: self.send_next_vam(vam=vam_to_send) return @@ -656,13 +690,82 @@ def location_service_callback(self, tpv: dict) -> None: ): self.send_next_vam(vam=vam_to_send) return + # Condition 4 (clause 6.4.1): heading change exceeds threshold. + if "track" in tpv: + heading_diff = abs(tpv["track"] - self.last_vam_heading) % 360.0 + if heading_diff > 180.0: + heading_diff = 360.0 - heading_diff + if heading_diff > vam_constants.MINGROUNDVELOCITYORIENTATIONCHANGETHRESHOLD: + self.send_next_vam(vam=vam_to_send) + return + + def _attach_lf_container_if_due(self, vam: VAMMessage) -> None: + """Attach ``vruLowFrequencyContainer`` to *vam* when required. + + The Low-Frequency Container shall be included (clause 6.2): + + * On the first transmitted VAM. + * When the time elapsed since the last LF container exceeds + ``T_GenVamLFMin`` (2 000 ms). + * Whenever a ``VruClusterOperationContainer`` is also present, so + that receivers can correlate cluster operations with a full VRU + profile update. - def send_next_vam(self, vam: VAMMessage) -> None: + Parameters + ---------- + vam: + The :class:`VAMMessage` that is about to be transmitted. """ - Send the next vam. + import time as _time_module + now = _time_module.time() + has_cluster_op = ( + "vruClusterOperationContainer" + in vam.vam["vam"]["vamParameters"] + ) + lf_due = ( + self.is_first_vam + or self.last_lf_vam_time is None + or (now - self.last_lf_vam_time) * 1000 + >= vam_constants.T_GENVAM_LFMIN + or has_cluster_op + ) + if lf_due: + # The LF container carries VRU profile and device usage. + # Profile is derived from the station type in the basic container. + vam.vam["vam"]["vamParameters"]["vruLowFrequencyContainer"] = { + "profileAndSubprofile": ("pedestrian", "unavailable") + } + self.last_lf_vam_time = now + + def send_next_vam(self, vam: VAMMessage) -> None: + """Encode and send *vam* via the BTP router. + + Before encoding, this method: - BTP Port Number: 2018 + * Attaches cluster containers from the clustering state machine + (when a :class:`~.vru_clustering.VBSClusteringManager` is + configured). + * Attaches the ``VruLowFrequencyContainer`` when inclusion criteria + are met (clause 6.2). + + BTP destination port: 2018 (as specified in Table B.2 of + ETSI TS 103 300-3 V2.3.1). """ + params = vam.vam["vam"]["vamParameters"] + + # Attach cluster containers when clustering is active. + if self.clustering_manager is not None: + cluster_info = self.clustering_manager.get_cluster_information_container() + if cluster_info is not None: + params["vruClusterInformationContainer"] = cluster_info + cluster_op = self.clustering_manager.get_cluster_operation_container() + if cluster_op is not None: + params["vruClusterOperationContainer"] = cluster_op + + # Attach LF container when due (must be done *after* cluster-op is + # present so has_cluster_op detection inside the helper works). + self._attach_lf_container_if_due(vam) + if self.vru_basic_service_ldm is not None: vam_ldm = vam.vam.copy() vam_ldm["utc_timestamp"] = int(TimeService.time()*1000) @@ -676,6 +779,8 @@ def send_next_vam(self, vam: VAMMessage) -> None: gn_packet_transport_type=PacketTransportType(), communication_profile=CommunicationProfile.UNSPECIFIED, traffic_class=TrafficClass(), + security_profile=SecurityProfile.VRU_AWARENESS_MESSAGE, + its_aid=638, data=data, length=len(data), ) @@ -699,3 +804,9 @@ def send_next_vam(self, vam: VAMMessage) -> None: self.last_vam_speed = vam.vam["vam"]["vamParameters"][ "vruHighFrequencyContainer" ]["speed"]["speedValue"] / 100 + self.last_vam_heading = ( + vam.vam["vam"]["vamParameters"][ + "vruHighFrequencyContainer" + ]["heading"]["value"] / 10.0 + ) + self.is_first_vam = False diff --git a/src/flexstack/facilities/vru_awareness_service/vru_awareness_service.py b/src/flexstack/facilities/vru_awareness_service/vru_awareness_service.py index fc436f1..68fb695 100644 --- a/src/flexstack/facilities/vru_awareness_service/vru_awareness_service.py +++ b/src/flexstack/facilities/vru_awareness_service/vru_awareness_service.py @@ -7,25 +7,32 @@ from ...btp.router import Router as BTPRouter from .vam_coder import VAMCoder from .vam_reception_management import VAMReceptionManagement +from .vru_clustering import VBSClusteringManager from ..local_dynamic_map.ldm_facility import LDMFacility class VRUAwarenessService: - """ - VRU Basis Service + """VRU Awareness Basic Service (VBS). + + Top-level service object that wires together encoding, transmission, + reception, LDM storage, and the optional VBS clustering state machine + defined in ETSI TS 103 300-3 V2.3.1 (2025-12), clause 5.4. Attributes ---------- - btp_router : BTPRouter - BTP Router. - vam_coder : VAMCoder - vam Coder. - device_data_provider : DeviceDataProvider - Vehicle Data. - vam_transmission_management : vamTransmissionManagement - vam Transmission Management. - vam_reception_management : vamReceptionManagement - vam Reception Management. + btp_router: + BTP Router used to send and receive VAMs. + vam_coder: + ASN.1 encoder/decoder for VAMs. + device_data_provider: + Static device parameters (station ID, station type, etc.). + vam_transmission_management: + VAM generation and transmission engine. + vam_reception_management: + VAM reception and LDM injection engine. + clustering_manager: + VBS clustering state machine (``None`` when + ``cluster_support=False``). """ def __init__( @@ -33,18 +40,28 @@ def __init__( btp_router: BTPRouter, device_data_provider: DeviceDataProvider, ldm: LDMFacility | None = None, + cluster_support: bool = True, + own_vru_profile: str = "pedestrian", ) -> None: - """ - Initialize the Cooperative Awareness Basic Service. + """Initialise the VRU Awareness Basic Service. Parameters ---------- - btp_router : BTPRouter + btp_router: BTP Router. - device_data_provider : DeviceDataProvider - Vehicle Data. - ldm: LDM Facility - Local Dynamic Map Facility that will be used to provide data to the LDM. + device_data_provider: + Static device parameters. + ldm: + Local Dynamic Map Facility. When provided, transmitted and + received VAMs are stored for other facilities to query. + cluster_support: + When ``True`` (default), the VBS clustering state machine is + instantiated and integrated with transmission and reception + management per clause 5.4 of ETSI TS 103 300-3 V2.3.1. + own_vru_profile: + ASN.1 VRU profile string, e.g. ``"pedestrian"``. Used by the + clustering manager to populate ``clusterProfiles`` in transmitted + cluster VAMs. """ self.logging = logging.getLogger("vru_basic_service") @@ -55,16 +72,25 @@ def __init__( if ldm is not None: vru_basic_service_ldm = VRUBasicServiceLDM(ldm, (AccessPermission.VAM,), 5) + self.clustering_manager: VBSClusteringManager | None = None + if cluster_support: + self.clustering_manager = VBSClusteringManager( + own_station_id=device_data_provider.station_id, + own_vru_profile=own_vru_profile, + ) + self.vam_transmission_management = VAMTransmissionManagement( btp_router=btp_router, vam_coder=self.vam_coder, device_data_provider=self.device_data_provider, vru_basic_service_ldm=vru_basic_service_ldm, + clustering_manager=self.clustering_manager, ) self.vam_reception_management = VAMReceptionManagement( vam_coder=self.vam_coder, btp_router=self.btp_router, vru_basic_service_ldm=vru_basic_service_ldm, + clustering_manager=self.clustering_manager, ) self.logging.info("VRU Basic Service Started!") diff --git a/src/flexstack/facilities/vru_awareness_service/vru_clustering.py b/src/flexstack/facilities/vru_awareness_service/vru_clustering.py new file mode 100644 index 0000000..1aeedf9 --- /dev/null +++ b/src/flexstack/facilities/vru_awareness_service/vru_clustering.py @@ -0,0 +1,1215 @@ +""" +VRU Basic Service (VBS) Clustering State Machine. + +Implements the VRU cluster management function specified in +ETSI TS 103 300-3 V2.3.1 (2025-12), clause 5.4. + +The clustering function is optional for all VRU profiles (clause 5.4.1) +and is recommended for VRU Profile 1 (pedestrian) when conditions are met. + +Architecture overview +--------------------- +``VBSClusteringManager`` is the single entry-point. It is created by +``VRUAwarenessService``, injected into ``VAMTransmissionManagement`` and +``VAMReceptionManagement``, and orchestrates: + +* **State machine** – four VBS states (Table 5): + ``VRU_IDLE``, ``VRU_ACTIVE_STANDALONE``, + ``VRU_ACTIVE_CLUSTER_LEADER``, ``VRU_PASSIVE``. + +* **Cluster creation** – triggered when enough nearby VRUs are visible and + no joinable cluster exists (clause 5.4.2.4). + +* **Cluster joining** – three-phase handshake: + notification → passive → confirmation (or failure) (clause 5.4.2.2). + +* **Cluster leaving** – with leave-reason notification period + (clause 5.4.2.2). + +* **Cluster breakup** – by the cluster leader, with a warning period and + reason code (clause 5.4.2.2). + +* **Leader-lost detection** – timeout on ``timeClusterContinuity`` + (clause 5.4.2.2). + +* **Container generation** – ``VruClusterInformationContainer`` (leader) + and ``VruClusterOperationContainer`` (joining/leaving/breaking-up). + +Thread safety +------------- +All public methods acquire ``_lock`` so the class is safe to call from +a GPS-callback thread and a BTP-reception thread simultaneously. +""" + +from __future__ import annotations + +import logging +import random +import threading +import time +from dataclasses import dataclass, field +from enum import Enum, unique +from typing import Callable, Dict, Optional + +from . import vam_constants + +__all__ = [ + "VBSState", + "ClusterLeaveReason", + "ClusterBreakupReason", + "VBSClusteringManager", +] + +logger = logging.getLogger("vru_basic_service") + + +# --------------------------------------------------------------------------- +# Public enums +# --------------------------------------------------------------------------- + + +@unique +class VBSState(Enum): + """VBS clustering states as defined in ETSI TS 103 300-3 V2.3.1, Table 5. + + VRU_IDLE + The device user is not considered a VRU (role is ``VRU_ROLE_OFF``). + The VBS remains operational to monitor role changes. + + VRU_ACTIVE_STANDALONE + VAMs are sent with information about this individual VRU only. + The VRU may indicate cluster-join or cluster-leave intentions in + the ``VruClusterOperationContainer``. + + VRU_ACTIVE_CLUSTER_LEADER + The VRU leads a cluster and transmits *cluster* VAMs that describe + the entire cluster. Only VRU Profile 1 and Profile 2 may be in + this state. + + VRU_PASSIVE + The VRU is a cluster member. It does **not** transmit VAMs except + when it is in the process of leaving a cluster (leave-notification + period) or when it is located in a low-risk geographical area. + """ + + VRU_IDLE = "VRU-IDLE" + VRU_ACTIVE_STANDALONE = "VRU-ACTIVE-STANDALONE" + VRU_ACTIVE_CLUSTER_LEADER = "VRU-ACTIVE-CLUSTER-LEADER" + VRU_PASSIVE = "VRU-PASSIVE" + + +@unique +class ClusterLeaveReason(Enum): + """Reasons for leaving a cluster. + + Values match the ASN.1 ``ClusterLeaveReason`` enumeration in the VAM + module (ETSI TS 103 300-3 V2.3.1, clause 7.3.5 and Table 12). + """ + + NOT_PROVIDED = "notProvided" + CLUSTER_LEADER_LOST = "clusterLeaderLost" + CLUSTER_DISBANDED_BY_LEADER = "clusterDisbandedByLeader" + OUT_OF_CLUSTER_BOUNDING_BOX = "outOfClusterBoundingBox" + OUT_OF_CLUSTER_SPEED_RANGE = "outOfClusterSpeedRange" + JOINING_ANOTHER_CLUSTER = "joiningAnotherCluster" + CANCELLED_JOIN = "cancelledJoin" + FAILED_JOIN = "failedJoin" + SAFETY_CONDITION = "safetyCondition" + + +@unique +class ClusterBreakupReason(Enum): + """Reasons for breaking up a cluster. + + Values match the ASN.1 ``ClusterBreakupReason`` enumeration in the VAM + module (ETSI TS 103 300-3 V2.3.1, clause 7.3.5 and Table 13). + """ + + NOT_PROVIDED = "notProvided" + CLUSTERING_PURPOSE_COMPLETED = "clusteringPurposeCompleted" + LEADER_MOVED_OUT_OF_BOUNDING_BOX = "leaderMovedOutOfClusterBoundingBox" + JOINING_ANOTHER_CLUSTER = "joiningAnotherCluster" + ENTERING_LOW_RISK_AREA = "enteringLowRiskAreaBasedOnMaps" + RECEPTION_OF_CPM_CONTAINING_CLUSTER = "receptionOfCpmContainingCluster" + + +# --------------------------------------------------------------------------- +# Internal join/leave sub-states +# --------------------------------------------------------------------------- + + +@unique +class _JoinSubstate(Enum): + """Internal substates of the cluster-joining procedure.""" + + NONE = "none" + """Not joining any cluster.""" + + NOTIFY = "notify" + """Sending join-intent notification in individual VAMs for + ``timeClusterJoinNotification`` seconds (clause 5.4.2.2).""" + + WAITING = "waiting" + """Individual VAMs have stopped; waiting up to ``timeClusterJoinSuccess`` + for the cluster leader to acknowledge membership.""" + + JOINED = "joined" + """In VRU_PASSIVE state; fully admitted cluster member.""" + + CANCELLED = "cancelled" + """Join cancelled; sending leave notification for + ``timeClusterLeaveNotification`` seconds (clause 5.4.2.2).""" + + FAILED = "failed" + """Join failed; sending leave notification and returning to STANDALONE.""" + + +@unique +class _LeaveSubstate(Enum): + """Internal substates of the cluster-leaving procedure.""" + + NONE = "none" + """Not leaving any cluster.""" + + NOTIFY = "notify" + """Sending leave indication for ``timeClusterLeaveNotification`` seconds + (clause 5.4.2.2).""" + + +# --------------------------------------------------------------------------- +# Internal data structures +# --------------------------------------------------------------------------- + + +@dataclass +class _NearbyVRU: + """Position and kinematic snapshot of a recently observed VRU ITS-S.""" + + station_id: int + lat: float + lon: float + speed: float + heading: float + last_seen: float + + +@dataclass +class _NearbyCluster: + """Information about a recently observed VRU cluster.""" + + cluster_id: int + leader_station_id: int + cardinality: int + lat: float + lon: float + speed: float + heading: float + bounding_box_radius: Optional[float] + last_seen: float + + +@dataclass +class _ClusterState: + """Own cluster state when this device is acting as cluster leader.""" + + cluster_id: int + cardinality: int = vam_constants.MIN_CLUSTER_SIZE + #: Set of VRU profile strings currently observed in the cluster, + #: e.g. {"pedestrian"}. + profiles: "set[str]" = field(default_factory=set) + #: Bounding-box radius in metres; grows as members join. + radius: float = float(vam_constants.MAX_CLUSTER_DISTANCE) + #: Timestamp when the breakup warning was started (None = not breaking up). + breakup_started: Optional[float] = None + #: Reason for the pending breakup. + breakup_reason: Optional[ClusterBreakupReason] = None + #: Station IDs of devices currently executing the join notification phase. + pending_members: "set[int]" = field(default_factory=set) + + +# --------------------------------------------------------------------------- +# Helper +# --------------------------------------------------------------------------- + + +def _heading_diff(h1: float, h2: float) -> float: + """Return the absolute angular difference in degrees, handling wrap-around. + + Parameters + ---------- + h1, h2: + Heading values in degrees in the range [0, 360). + + Returns + ------- + float + Absolute angular difference in [0, 180]. + """ + diff = abs(h1 - h2) % 360.0 + return diff if diff <= 180.0 else 360.0 - diff + + +def _haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> float: + """Approximate great-circle distance between two WGS-84 points [m]. + + Uses the Haversine formula. Accuracy is sufficient for the short + inter-VRU distances (< 100 m) encountered in clustering decisions. + + Parameters + ---------- + lat1, lon1, lat2, lon2: + Coordinates in decimal degrees. + + Returns + ------- + float + Distance in metres. + """ + import math + + r = 6_371_000.0 # Earth radius in metres + phi1, phi2 = math.radians(lat1), math.radians(lat2) + dphi = math.radians(lat2 - lat1) + dlam = math.radians(lon2 - lon1) + a = math.sin(dphi / 2) ** 2 + math.cos(phi1) * math.cos(phi2) * math.sin(dlam / 2) ** 2 + return r * 2.0 * math.atan2(math.sqrt(a), math.sqrt(1.0 - a)) + + +# --------------------------------------------------------------------------- +# Main class +# --------------------------------------------------------------------------- + + +class VBSClusteringManager: + """VRU Basic Service clustering state machine. + + Implements the cluster management function described in + ETSI TS 103 300-3 V2.3.1, clause 5.4. + + Parameters + ---------- + own_station_id: + The station ID of the local ITS-S (used to generate cluster IDs and + build cluster containers). + own_vru_profile: + The VRU profile string as it appears in the ASN.1 enumeration, e.g. + ``"pedestrian"``, ``"bicyclistAndLightVruVehicle"``, + ``"motorcyclist"``, ``"animal"``. Used to populate + ``clusterProfiles`` in the cluster information container. + time_fn: + Callable returning the current POSIX timestamp in seconds. + Defaults to :func:`time.time`. Override in unit tests to inject a + fake clock. + """ + + def __init__( + self, + own_station_id: int, + own_vru_profile: str = "pedestrian", + time_fn: Callable[[], float] = time.time, + ) -> None: + self._own_station_id = own_station_id + self._own_vru_profile = own_vru_profile + self._time_fn = time_fn + + # --- VBS state --- + self._state: VBSState = VBSState.VRU_ACTIVE_STANDALONE + + # --- Own cluster data (valid in VRU_ACTIVE_CLUSTER_LEADER) --- + self._cluster: Optional[_ClusterState] = None + + # --- Joined cluster data (valid in VRU_PASSIVE) --- + self._joined_cluster_id: Optional[int] = None + self._leader_station_id: Optional[int] = None + self._last_leader_vam_time: Optional[float] = None + + # --- Join and leave sub-state machine (VRU_ACTIVE_STANDALONE) --- + self._join_substate: _JoinSubstate = _JoinSubstate.NONE + self._join_target_cluster_id: Optional[int] = None + self._join_started: Optional[float] = None + self._join_leave_reason: Optional[ClusterLeaveReason] = None + self._join_leave_started: Optional[float] = None + + # --- Leave notification state (VRU_PASSIVE leaving) --- + self._leave_substate: _LeaveSubstate = _LeaveSubstate.NONE + self._leave_reason: Optional[ClusterLeaveReason] = None + self._leave_cluster_id: Optional[int] = None + self._leave_started: Optional[float] = None + + # --- Nearby VRU and cluster tables --- + self._nearby_vrus: Dict[int, _NearbyVRU] = {} + self._nearby_clusters: Dict[int, _NearbyCluster] = {} + + # --- Recently seen cluster IDs (for uniqueness threshold) --- + self._seen_cluster_ids: Dict[int, float] = {} # id → first-seen time + + self._lock = threading.RLock() # Reentrant: public methods may call other public methods + logger.debug("VBSClusteringManager initialised (station_id=%d)", own_station_id) + + # ------------------------------------------------------------------ + # Public properties + # ------------------------------------------------------------------ + + @property + def state(self) -> VBSState: + """Current VBS clustering state (thread-safe).""" + with self._lock: + return self._state + + # ------------------------------------------------------------------ + # VRU role management (clause 4.2, Table 1) + # ------------------------------------------------------------------ + + def set_vru_role_on(self) -> None: + """Transition from VRU_IDLE to VRU_ACTIVE_STANDALONE. + + Called when the VRU profile management entity notifies that the + device user is now considered a VRU (VRU_ROLE_ON, clause 5.4.2.2). + """ + with self._lock: + if self._state is VBSState.VRU_IDLE: + self._state = VBSState.VRU_ACTIVE_STANDALONE + logger.info( + "VBS state: VRU_IDLE → VRU_ACTIVE_STANDALONE (VRU_ROLE_ON)" + ) + + def set_vru_role_off(self) -> None: + """Transition to VRU_IDLE (VRU_ROLE_OFF). + + Called when the VRU profile management entity notifies that the + device user is no longer considered a VRU (clause 5.4.2.2). + Any active cluster relationships are abandoned without notification. + """ + with self._lock: + self._state = VBSState.VRU_IDLE + self._cluster = None + self._joined_cluster_id = None + self._leader_station_id = None + self._last_leader_vam_time = None + self._join_substate = _JoinSubstate.NONE + self._leave_substate = _LeaveSubstate.NONE + logger.info("VBS state → VRU_IDLE (VRU_ROLE_OFF)") + + # ------------------------------------------------------------------ + # Clustering creation (clause 5.4.2.2) + # ------------------------------------------------------------------ + + def try_create_cluster( + self, + own_lat: float, + own_lon: float, + ) -> bool: + """Attempt to create a new VRU cluster. + + A cluster is created when all conditions from clause 5.4.2.4 are met: + + * The device is in VRU_ACTIVE_STANDALONE. + * Enough nearby devices (``NUM_CREATE_CLUSTER``) are visible within + ``MAX_CLUSTER_DISTANCE`` metres. + * No joinable cluster has been found (caller responsibility). + + On success the state transitions to VRU_ACTIVE_CLUSTER_LEADER. + + Parameters + ---------- + own_lat, own_lon: + Current estimated position of this ITS-S in decimal degrees. + + Returns + ------- + bool + ``True`` if a cluster was successfully created. + """ + with self._lock: + if self._state is not VBSState.VRU_ACTIVE_STANDALONE: + return False + + # Count nearby VRUs within MAX_CLUSTER_DISTANCE + now = self._time_fn() + nearby_count = sum( + 1 + for vru in self._nearby_vrus.values() + if ( + now - vru.last_seen < vam_constants.T_GENVAMMAX / 1000.0 + and _haversine_distance(own_lat, own_lon, vru.lat, vru.lon) + <= vam_constants.MAX_CLUSTER_DISTANCE + ) + ) + if nearby_count < vam_constants.NUM_CREATE_CLUSTER: + return False + + # Generate a locally unique non-zero cluster ID + cluster_id = self._generate_unique_cluster_id(now) + if cluster_id is None: + logger.warning( + "Could not generate a unique cluster ID; cluster creation aborted." + ) + return False + + self._cluster = _ClusterState( + cluster_id=cluster_id, + cardinality=vam_constants.MIN_CLUSTER_SIZE, + profiles={self._own_vru_profile}, + radius=float(vam_constants.MAX_CLUSTER_DISTANCE), + ) + self._state = VBSState.VRU_ACTIVE_CLUSTER_LEADER + logger.info( + "VBS state: VRU_ACTIVE_STANDALONE → VRU_ACTIVE_CLUSTER_LEADER " + "(cluster_id=%d, cardinality=%d)", + cluster_id, + self._cluster.cardinality, + ) + return True + + # ------------------------------------------------------------------ + # Cluster joining (clause 5.4.2.2) + # ------------------------------------------------------------------ + + def initiate_join(self, cluster_id: int) -> bool: + """Begin the cluster-join notification phase. + + The VRU will include ``clusterJoinInfo`` in its individual VAMs for + ``timeClusterJoinNotification`` seconds, then enter VRU_PASSIVE if + the join is successful (clause 5.4.2.2). + + Parameters + ---------- + cluster_id: + Identifier of the cluster to join. Use ``0`` if the cluster was + indicated by a non-VAM message and has no identifier. + + Returns + ------- + bool + ``True`` if the join initiation was accepted (device is in + VRU_ACTIVE_STANDALONE and not already joining). + """ + with self._lock: + if self._state is not VBSState.VRU_ACTIVE_STANDALONE: + return False + if self._join_substate is not _JoinSubstate.NONE: + return False # already in a join procedure + + self._join_substate = _JoinSubstate.NOTIFY + self._join_target_cluster_id = cluster_id + self._join_started = self._time_fn() + logger.info( + "Cluster join initiated: cluster_id=%d (notification phase starts)", + cluster_id, + ) + return True + + def cancel_join(self) -> None: + """Cancel an in-progress join and start leave-notification phase. + + Called when joining conditions are no longer met while in + ``_JoinSubstate.NOTIFY`` (clause 5.4.2.2 — "Cancelled-join + handling"). + """ + with self._lock: + if self._join_substate not in (_JoinSubstate.NOTIFY, _JoinSubstate.WAITING): + return + self._join_leave_reason = ClusterLeaveReason.CANCELLED_JOIN + self._join_leave_started = self._time_fn() + self._join_substate = _JoinSubstate.CANCELLED + logger.info( + "Cluster join cancelled (cluster_id=%d); leave-notification phase started.", + self._join_target_cluster_id, + ) + + def confirm_join_failed(self) -> None: + """Mark the join as failed and start leave-notification phase. + + Called when ``timeClusterJoinSuccess`` expires without the cluster + leader acknowledging membership (clause 5.4.2.2 — "Failed-join + handling"). + """ + with self._lock: + if self._join_substate is not _JoinSubstate.WAITING: + return + self._join_leave_reason = ClusterLeaveReason.FAILED_JOIN + self._join_leave_started = self._time_fn() + self._join_substate = _JoinSubstate.FAILED + logger.warning( + "Cluster join failed (cluster_id=%d); reverting to STANDALONE.", + self._join_target_cluster_id, + ) + + # ------------------------------------------------------------------ + # Cluster leaving (clause 5.4.2.2) + # ------------------------------------------------------------------ + + def trigger_leave_cluster( + self, reason: ClusterLeaveReason = ClusterLeaveReason.NOT_PROVIDED + ) -> None: + """Begin the cluster-leave notification phase. + + Valid from both VRU_PASSIVE and VRU_ACTIVE_STANDALONE (during the + join notification phase). + + Parameters + ---------- + reason: + The reason for leaving the cluster (Table 12). + """ + with self._lock: + if self._state is VBSState.VRU_PASSIVE: + self._do_leave_to_standalone(reason) + logger.info( + "Cluster leave initiated (reason: %s).", + reason.value, + ) + elif ( + self._state is VBSState.VRU_ACTIVE_STANDALONE + and self._join_substate is _JoinSubstate.NOTIFY + ): + self.cancel_join() + + # ------------------------------------------------------------------ + # Cluster breakup (clause 5.4.2.2) + # ------------------------------------------------------------------ + + def trigger_breakup_cluster( + self, reason: ClusterBreakupReason = ClusterBreakupReason.NOT_PROVIDED + ) -> bool: + """Initiate the cluster breakup warning phase. + + The leader will include ``clusterBreakupInfo`` in cluster VAMs for + ``timeClusterBreakupWarning`` seconds before disbanding and + transitioning to VRU_ACTIVE_STANDALONE (clause 5.4.2.2). + + Parameters + ---------- + reason: + The reason for breaking up the cluster (Table 13). + + Returns + ------- + bool + ``True`` if the breakup was successfully initiated. + """ + with self._lock: + if self._state is not VBSState.VRU_ACTIVE_CLUSTER_LEADER: + return False + if self._cluster is None: + return False + if self._cluster.breakup_started is not None: + return False # already in breakup warning phase + + self._cluster.breakup_started = self._time_fn() + self._cluster.breakup_reason = reason + logger.info( + "Cluster breakup initiated (cluster_id=%d, reason=%s); " + "warning phase started.", + self._cluster.cluster_id, + reason.value, + ) + return True + + # ------------------------------------------------------------------ + # Periodic maintenance – call on every VAM generation event + # ------------------------------------------------------------------ + + def update( + self, + own_lat: float, + own_lon: float, + own_speed: float, + own_heading: float, + ) -> None: + """Advance all time-based state transitions. + + Must be called once per VAM generation check cycle + (i.e. at least every ``T_CheckVamGen`` ms) so that: + + * The join notification phase concludes and the device enters passive. + * Failed-join detection fires after ``timeClusterJoinSuccess``. + * The leave notification phase concludes. + * Breakup warning timeout results in the STANDALONE transition. + * Cluster-leader-lost timeout triggers a leave. + + Parameters + ---------- + own_lat, own_lon: + Current estimated position in decimal degrees. + own_speed: + Current speed in m/s. + own_heading: + Current heading in degrees (0–360). + """ + with self._lock: + now = self._time_fn() + self._expire_nearby_tables(now) + + if self._state is VBSState.VRU_ACTIVE_STANDALONE: + self._update_standalone(now, own_lat, own_lon, own_speed, own_heading) + + elif self._state is VBSState.VRU_ACTIVE_CLUSTER_LEADER: + self._update_leader(now, own_lat, own_lon, own_speed) + + elif self._state is VBSState.VRU_PASSIVE: + self._update_passive(now, own_lat, own_lon, own_speed) + + # ------------------------------------------------------------------ + # VAM reception (called from VAMReceptionManagement) + # ------------------------------------------------------------------ + + def on_received_vam(self, vam: dict) -> None: + """Process a received VAM for cluster management purposes. + + Updates the internal tables of nearby VRUs and clusters, advances + the join-/leave-confirmation sub-states, and handles cluster-leader- + lost recovery. + + Parameters + ---------- + vam: + Decoded VAM as a Python dict (the full VAM structure produced by + :class:`~flexstack.facilities.vru_awareness_service.vam_coder.VAMCoder`). + """ + with self._lock: + try: + self._process_received_vam(vam) + except (KeyError, TypeError) as exc: + logger.debug("on_received_vam: skipping malformed VAM – %s", exc) + + # ------------------------------------------------------------------ + # Container generation (called by VAMTransmissionManagement) + # ------------------------------------------------------------------ + + def get_cluster_information_container(self) -> Optional[dict]: + """Return a ``VruClusterInformationContainer`` dict or ``None``. + + Only non-``None`` when the device is in VRU_ACTIVE_CLUSTER_LEADER + and a valid cluster state exists. The returned dict is suitable for + direct inclusion in the ``vamParameters`` dict passed to the + :class:`~flexstack.facilities.vru_awareness_service.vam_coder.VAMCoder`. + """ + with self._lock: + if ( + self._state is not VBSState.VRU_ACTIVE_CLUSTER_LEADER + or self._cluster is None + ): + return None + + cluster_info: dict = { + "clusterId": self._cluster.cluster_id, + "clusterBoundingBoxShape": { + "circular": { + "radius": max(1, int(self._cluster.radius)) + } + }, + "clusterCardinalitySize": self._cluster.cardinality, + } + if self._cluster.profiles: + cluster_info["clusterProfiles"] = self._encode_cluster_profiles( + self._cluster.profiles + ) + + return {"vruClusterInformation": cluster_info} + + def get_cluster_operation_container(self) -> Optional[dict]: + """Return a ``VruClusterOperationContainer`` dict or ``None``. + + Returns an appropriate container during: + + * *Join-notification* phase – ``clusterJoinInfo``. + * *Leave-notification* phase – ``clusterLeaveInfo``. + * *Cancelled/failed-join* phase – ``clusterLeaveInfo``. + * *Breakup-warning* phase – ``clusterBreakupInfo``. + + Returns ``None`` when no cluster operation is in progress. + """ + with self._lock: + now = self._time_fn() + + if self._state is VBSState.VRU_ACTIVE_STANDALONE: + return self._standalone_operation_container(now) + + if self._state is VBSState.VRU_PASSIVE: + return self._passive_operation_container(now) + + if self._state is VBSState.VRU_ACTIVE_CLUSTER_LEADER: + return self._leader_operation_container(now) + + return None + + def should_transmit_vam(self) -> bool: + """Return ``True`` when the VBS should transmit a VAM. + + A VRU in VRU_PASSIVE must NOT transmit individual VAMs unless it is + in the leave-notification phase (clause 6.3). A device in VRU_IDLE + must NOT transmit VAMs (Table 1). + """ + with self._lock: + if self._state is VBSState.VRU_IDLE: + return False + if self._state is VBSState.VRU_PASSIVE: + # Only allowed while sending leave notifications + return self._leave_substate is _LeaveSubstate.NOTIFY + return True # STANDALONE or CLUSTER_LEADER + + # ------------------------------------------------------------------ + # Member-tracking helpers (called from on_received_vam) + # ------------------------------------------------------------------ + + def _process_received_vam(self, vam: dict) -> None: + """Internal VAM processing (must be called with ``_lock`` held).""" + header = vam["header"] + sender_id: int = header["stationId"] + params = vam["vam"]["vamParameters"] + basic = params["basicContainer"] + + lat = basic["referencePosition"]["latitude"] / 1e7 + lon = basic["referencePosition"]["longitude"] / 1e7 + hf = params.get("vruHighFrequencyContainer", {}) + speed = hf.get("speed", {}).get("speedValue", 0) / 100.0 + heading = hf.get("heading", {}).get("value", 0) / 10.0 + now = self._time_fn() + + # Update nearby VRU table + self._nearby_vrus[sender_id] = _NearbyVRU( + station_id=sender_id, + lat=lat, + lon=lon, + speed=speed, + heading=heading, + last_seen=now, + ) + + # --- Cluster information container --- + cluster_info_ctr = params.get("vruClusterInformationContainer") + if cluster_info_ctr: + vci = cluster_info_ctr["vruClusterInformation"] + c_id: int = vci.get("clusterId", 0) + cardinality: int = vci.get("clusterCardinalitySize", 1) + # Extract radius from circular bounding box if present + bbox = vci.get("clusterBoundingBoxShape") + radius: Optional[float] = None + if bbox and "circular" in bbox: + radius = float(bbox["circular"].get("radius", vam_constants.MAX_CLUSTER_DISTANCE)) + + self._nearby_clusters[c_id] = _NearbyCluster( + cluster_id=c_id, + leader_station_id=sender_id, + cardinality=cardinality, + lat=lat, + lon=lon, + speed=speed, + heading=heading, + bounding_box_radius=radius, + last_seen=now, + ) + # Track cluster IDs for uniqueness threshold + if c_id not in self._seen_cluster_ids: + self._seen_cluster_ids[c_id] = now + + # If we are the leader of this cluster, update cardinality/profiles + if ( + self._state is VBSState.VRU_ACTIVE_CLUSTER_LEADER + and self._cluster is not None + and self._cluster.cluster_id == c_id + and sender_id != self._own_station_id + ): + # Another leader claiming the same ID → trigger ID change + logger.warning( + "Duplicate cluster ID %d detected from station %d; " + "cluster ID change required.", + c_id, + sender_id, + ) + + # Join-waiting phase confirmation: a VAM from the leader that still + # advertises the same cluster ID means we are accepted (clause 5.4.2.2). + if ( + self._state is VBSState.VRU_ACTIVE_STANDALONE + and self._join_substate is _JoinSubstate.WAITING + and self._join_target_cluster_id is not None + and c_id == self._join_target_cluster_id + ): + self._complete_join(sender_id) + + # --- Cluster operation container --- + cluster_op_ctr = params.get("vruClusterOperationContainer") + if cluster_op_ctr: + join_info = cluster_op_ctr.get("clusterJoinInfo") + leave_info = cluster_op_ctr.get("clusterLeaveInfo") + breakup_info = cluster_op_ctr.get("clusterBreakupInfo") + + # Leader tracks join/leave notifications to update cardinality + if ( + self._state is VBSState.VRU_ACTIVE_CLUSTER_LEADER + and self._cluster is not None + ): + if join_info and join_info.get("clusterId") == self._cluster.cluster_id: + self._cluster.pending_members.add(sender_id) + self._cluster.cardinality = max( + vam_constants.MIN_CLUSTER_SIZE, + len(self._cluster.pending_members) + 1, + ) + logger.debug( + "Station %d joining cluster %d; cardinality now %d.", + sender_id, + self._cluster.cluster_id, + self._cluster.cardinality, + ) + if leave_info and leave_info.get("clusterId") == self._cluster.cluster_id: + self._cluster.pending_members.discard(sender_id) + self._cluster.cardinality = max( + vam_constants.MIN_CLUSTER_SIZE, + len(self._cluster.pending_members) + 1, + ) + logger.debug( + "Station %d leaving cluster %d; cardinality now %d.", + sender_id, + self._cluster.cluster_id, + self._cluster.cardinality, + ) + + # Breakup from leader + if breakup_info: + if self._state is VBSState.VRU_PASSIVE and ( + self._leader_station_id == sender_id + or self._joined_cluster_id + == self._nearby_clusters.get(0, _NearbyCluster(0, 0, 0, 0, 0, 0, 0, None, 0)).cluster_id + ): + reason_str = breakup_info.get( + "clusterBreakupReason", "clusterDisbandedByLeader" + ) + if reason_str == ClusterBreakupReason.RECEPTION_OF_CPM_CONTAINING_CLUSTER.value: + # May stay PASSIVE per clause 5.4.2.2 + logger.info( + "Cluster %d broken up by leader (CPM reason); " + "remaining in VRU_PASSIVE.", + self._joined_cluster_id, + ) + else: + self._do_leave_to_standalone(ClusterLeaveReason.CLUSTER_DISBANDED_BY_LEADER) + + # Refresh leader heartbeat when we are passive + if ( + self._state is VBSState.VRU_PASSIVE + and self._leader_station_id == sender_id + ): + self._last_leader_vam_time = now + + def _complete_join(self, leader_station_id: int) -> None: + """Finalize a join: transition to VRU_PASSIVE (must be called with lock).""" + self._join_substate = _JoinSubstate.JOINED + self._joined_cluster_id = self._join_target_cluster_id + self._leader_station_id = leader_station_id + self._last_leader_vam_time = self._time_fn() + self._state = VBSState.VRU_PASSIVE + logger.info( + "VBS state: VRU_ACTIVE_STANDALONE → VRU_PASSIVE (joined cluster %d, " + "leader station %d).", + self._joined_cluster_id, + leader_station_id, + ) + + def _do_leave_to_standalone(self, reason: ClusterLeaveReason) -> None: + """Immediately transition back to STANDALONE (must be called with lock).""" + self._leave_substate = _LeaveSubstate.NOTIFY + self._leave_reason = reason + self._leave_cluster_id = self._joined_cluster_id + self._leave_started = self._time_fn() + self._joined_cluster_id = None + self._leader_station_id = None + self._last_leader_vam_time = None + self._join_substate = _JoinSubstate.NONE + self._join_target_cluster_id = None + self._state = VBSState.VRU_ACTIVE_STANDALONE + logger.info( + "VBS state: VRU_PASSIVE → VRU_ACTIVE_STANDALONE (reason: %s).", + reason.value, + ) + + # ------------------------------------------------------------------ + # Periodic state update helpers (called from update()) + # ------------------------------------------------------------------ + + def _update_standalone( + self, + now: float, + own_lat: float, + own_lon: float, + own_speed: float, + own_heading: float, + ) -> None: + """Handle STANDALONE state time-based transitions (lock held).""" + # --- Join notification phase --- + if self._join_substate is _JoinSubstate.NOTIFY: + assert self._join_started is not None + if now - self._join_started >= vam_constants.TIME_CLUSTER_JOIN_NOTIFICATION: + # Stop sending individual VAMs; wait for confirmation + self._join_substate = _JoinSubstate.WAITING + self._join_started = now # reuse as "waiting started" + self._state = VBSState.VRU_ACTIVE_STANDALONE # stays standalone while waiting + logger.info( + "Join notification complete (cluster_id=%d); " + "waiting for leader acknowledgement.", + self._join_target_cluster_id, + ) + + # --- Join waiting phase --- + elif self._join_substate is _JoinSubstate.WAITING: + assert self._join_started is not None + if now - self._join_started >= vam_constants.TIME_CLUSTER_JOIN_SUCCESS: + self.confirm_join_failed() + + # --- Cancelled / failed join leave-notification phase --- + elif self._join_substate in (_JoinSubstate.CANCELLED, _JoinSubstate.FAILED): + assert self._join_leave_started is not None + if now - self._join_leave_started >= vam_constants.TIME_CLUSTER_LEAVE_NOTIFICATION: + self._join_substate = _JoinSubstate.NONE + self._join_target_cluster_id = None + self._join_leave_reason = None + self._join_leave_started = None + logger.debug("Leave-notification after failed/cancelled join complete.") + + # --- Leave-notification phase (after leaving from PASSIVE) --- + if self._leave_substate is _LeaveSubstate.NOTIFY: + assert self._leave_started is not None + if now - self._leave_started >= vam_constants.TIME_CLUSTER_LEAVE_NOTIFICATION: + self._leave_substate = _LeaveSubstate.NONE + self._leave_reason = None + self._leave_cluster_id = None + self._leave_started = None + logger.debug("Leave-notification period complete.") + + def _update_leader( + self, + now: float, + own_lat: float, + own_lon: float, + own_speed: float, + ) -> None: + """Handle CLUSTER_LEADER state time-based transitions (lock held).""" + if self._cluster is None: + return + + # --- Breakup warning timeout → transition to STANDALONE --- + if ( + self._cluster.breakup_started is not None + and now - self._cluster.breakup_started >= vam_constants.TIME_CLUSTER_BREAKUP_WARNING + ): + logger.info( + "Breakup warning period complete (cluster_id=%d); " + "transitioning to VRU_ACTIVE_STANDALONE.", + self._cluster.cluster_id, + ) + self._cluster = None + self._state = VBSState.VRU_ACTIVE_STANDALONE + + def _update_passive( + self, + now: float, + own_lat: float, + own_lon: float, + own_speed: float, + ) -> None: + """Handle PASSIVE state time-based transitions (lock held).""" + # --- Leader-lost detection --- + if ( + self._last_leader_vam_time is not None + and now - self._last_leader_vam_time >= vam_constants.TIME_CLUSTER_CONTINUITY + ): + logger.warning( + "Cluster leader (station %d) lost after %.1f s silence; " + "leaving cluster.", + self._leader_station_id, + vam_constants.TIME_CLUSTER_CONTINUITY, + ) + self._do_leave_to_standalone(ClusterLeaveReason.CLUSTER_LEADER_LOST) + return + + # --- Leave notification timeout --- + if self._leave_substate is _LeaveSubstate.NOTIFY: + assert self._leave_started is not None + if now - self._leave_started >= vam_constants.TIME_CLUSTER_LEAVE_NOTIFICATION: + self._leave_substate = _LeaveSubstate.NONE + self._leave_reason = None + self._leave_cluster_id = None + self._leave_started = None + logger.debug("Leave-notification period complete (passive).") + + # ------------------------------------------------------------------ + # Container-building helpers + # ------------------------------------------------------------------ + + def _standalone_operation_container(self, now: float) -> Optional[dict]: + """Build operation container for STANDALONE state (lock held).""" + if self._join_substate is _JoinSubstate.NOTIFY: + # Include clusterJoinInfo + elapsed = now - (self._join_started or now) + remaining_s = max( + 0.0, + vam_constants.TIME_CLUSTER_JOIN_NOTIFICATION - elapsed, + ) + # joinTime is DeltaTimeQuarterSecond (0..127, units 0.25 s) + join_time = min(127, int(remaining_s / 0.25)) + return { + "clusterJoinInfo": { + "clusterId": self._join_target_cluster_id or 0, + "joinTime": join_time, + } + } + + if self._join_substate in (_JoinSubstate.CANCELLED, _JoinSubstate.FAILED): + return { + "clusterLeaveInfo": { + "clusterId": self._join_target_cluster_id or 0, + "clusterLeaveReason": ( + self._join_leave_reason or ClusterLeaveReason.NOT_PROVIDED + ).value, + } + } + + # Leave notification from prior cluster membership + if self._leave_substate is _LeaveSubstate.NOTIFY: + return { + "clusterLeaveInfo": { + "clusterId": self._leave_cluster_id or 0, + "clusterLeaveReason": ( + self._leave_reason or ClusterLeaveReason.NOT_PROVIDED + ).value, + } + } + + return None + + def _passive_operation_container(self, now: float) -> Optional[dict]: + """Build operation container for PASSIVE state (lock held).""" + if self._leave_substate is _LeaveSubstate.NOTIFY: + return { + "clusterLeaveInfo": { + "clusterId": self._leave_cluster_id or 0, + "clusterLeaveReason": ( + self._leave_reason or ClusterLeaveReason.NOT_PROVIDED + ).value, + } + } + return None + + def _leader_operation_container(self, now: float) -> Optional[dict]: + """Build operation container for CLUSTER_LEADER state (lock held).""" + if self._cluster is None: + return None + if self._cluster.breakup_started is not None: + elapsed = now - self._cluster.breakup_started + remaining_s = max( + 0.0, + vam_constants.TIME_CLUSTER_BREAKUP_WARNING - elapsed, + ) + breakup_time = min(127, int(remaining_s / 0.25)) + return { + "clusterBreakupInfo": { + "clusterBreakupReason": ( + self._cluster.breakup_reason or ClusterBreakupReason.NOT_PROVIDED + ).value, + "breakupTime": breakup_time, + } + } + return None + + # ------------------------------------------------------------------ + # Utility helpers + # ------------------------------------------------------------------ + + def _generate_unique_cluster_id(self, now: float) -> Optional[int]: + """Return a non-zero cluster ID that is locally unique. + + Per clause 5.4.2.2 the ID must differ from any cluster ID received + within ``timeClusterUniquenessThreshold`` seconds. + + Returns ``None`` if no unique ID can be found within 100 attempts. + """ + # Prune stale entries + self._seen_cluster_ids = { + k: v + for k, v in self._seen_cluster_ids.items() + if now - v < vam_constants.TIME_CLUSTER_UNIQUENESS_THRESHOLD + } + recent_ids = set(self._seen_cluster_ids.keys()) + + for _ in range(100): + candidate = random.randint(1, 255) + if candidate not in recent_ids: + return candidate + return None # pragma: no cover – extremely unlikely + + @staticmethod + def _encode_cluster_profiles(profiles: "set[str]") -> bytes: + """Encode a set of VRU profile names as a 4-bit BIT STRING byte. + + The ASN.1 definition of ``VruClusterProfiles`` (SIZE(4)) is:: + + VruClusterProfiles ::= BIT STRING { + pedestrian (0), + bicyclistAndLightVRUvehicle (1), + motorcyclist (2), + animal (3) + } (SIZE (4)) + + The returned single byte has the four named bits packed into the most- + significant nibble, as expected by the UPER codec. + + Parameters + ---------- + profiles: + Set of ASN.1 profile-name strings, e.g. + ``{"pedestrian", "bicyclistAndLightVruVehicle"}``. + + Returns + ------- + bytes + A single byte carrying the 4-bit BIT STRING. + """ + bit = 0 + profile_bits = { + "pedestrian": 0x80, + "bicyclistAndLightVruVehicle": 0x40, + "motorcyclist": 0x20, + "animal": 0x10, + } + for name, mask in profile_bits.items(): + if name in profiles: + bit |= mask + return bytes([bit]) + + def _expire_nearby_tables(self, now: float) -> None: + """Remove VRU / cluster entries older than T_GenVamMax (lock held).""" + max_age = vam_constants.T_GENVAMMAX / 1000.0 + self._nearby_vrus = { + k: v for k, v in self._nearby_vrus.items() if now - v.last_seen < max_age + } + self._nearby_clusters = { + k: v for k, v in self._nearby_clusters.items() if now - v.last_seen < max_age + } + + # ------------------------------------------------------------------ + # Convenience / introspection + # ------------------------------------------------------------------ + + def get_nearby_vru_count(self) -> int: + """Return the number of recently observed nearby VRUs.""" + with self._lock: + return len(self._nearby_vrus) + + def get_nearby_cluster_count(self) -> int: + """Return the number of recently observed nearby clusters.""" + with self._lock: + return len(self._nearby_clusters) + + def get_cluster_id(self) -> Optional[int]: + """Return the current cluster ID. + + Returns the *led* cluster ID when acting as leader, + the *joined* cluster ID when passive, or ``None`` otherwise. + """ + with self._lock: + if self._state is VBSState.VRU_ACTIVE_CLUSTER_LEADER and self._cluster: + return self._cluster.cluster_id + if self._state is VBSState.VRU_PASSIVE: + return self._joined_cluster_id + return None diff --git a/src/flexstack/geonet/basic_header.py b/src/flexstack/geonet/basic_header.py index ece4530..8ddfbe7 100644 --- a/src/flexstack/geonet/basic_header.py +++ b/src/flexstack/geonet/basic_header.py @@ -1,5 +1,6 @@ from enum import Enum from dataclasses import dataclass, field +from typing import Optional from .exceptions import DecodeError from .mib import MIB @@ -205,6 +206,39 @@ def initialize_with_mib_and_rhl(cls, mib: MIB, rhl: int) -> "BasicHeader": rhl=rhl, ) + @classmethod + def initialize_with_mib_request_and_rhl( + cls, mib: MIB, max_packet_lifetime: Optional[float], rhl: int + ) -> "BasicHeader": + """ + Initialize the Basic Header from MIB, optional max packet lifetime, and RHL. + + Uses max_packet_lifetime when provided, otherwise falls back to + itsGnDefaultPacketLifetime. As specified in ETSI EN 302 636-4-1 + V1.4.1 (2020-01). Section 10.3.2 (LT field). + + Parameters + ---------- + mib : MIB + MIB. + max_packet_lifetime : float or None + Maximum packet lifetime in seconds from the GN-DATA.request, or None + to use the MIB default (itsGnDefaultPacketLifetime). + rhl : int + Remaining hop limit. + """ + if max_packet_lifetime is not None: + lt = LT().set_value_in_millis(int(max_packet_lifetime * 1000)) + else: + lt = LT().set_value_in_seconds(mib.itsGnDefaultPacketLifetime) + return cls( + version=1, + nh=BasicNH.COMMON_HEADER, + reserved=0, + lt=lt, + rhl=rhl, + ) + def set_version(self, version: int) -> "BasicHeader": """ Set the version. diff --git a/src/flexstack/geonet/common_header.py b/src/flexstack/geonet/common_header.py index 6deb777..1cac7dc 100644 --- a/src/flexstack/geonet/common_header.py +++ b/src/flexstack/geonet/common_header.py @@ -12,6 +12,7 @@ LocationServiceHST, ) from .exceptions import DecodeError +from .mib import MIB @dataclass(frozen=True) @@ -32,8 +33,8 @@ class CommonHeader: tc : TrafficClass (8 bit unsigned integer) Traffic class that represents Facility-layer requirements on packet transport. flags : int - (8 bit unsigned integer) Flags. Bit 0 Indicates whether the ITS-S is mobile or stationary - (GN protocol constant itsGnIsMobile) Bit 1 to 7 Reserved. + (8 bit unsigned integer) Flags. Bit 0 (MSB) Indicates whether the ITS-S is mobile or stationary + (GN protocol constant itsGnIsMobile) Bits 1 to 7 Reserved. pl : int (16 bit unsigned integer) Payload Length. Indicates the length of the payload in octets. mhl : int @@ -53,7 +54,7 @@ class CommonHeader: mhl: int = 0 @classmethod - def initialize_with_request(cls, request: GNDataRequest) -> "CommonHeader": + def initialize_with_request(cls, request: GNDataRequest, mib: MIB) -> "CommonHeader": """ Initializes the Common Header with a GNDataRequest. @@ -61,17 +62,21 @@ def initialize_with_request(cls, request: GNDataRequest) -> "CommonHeader": ---------- request : GNDataRequest GNDataRequest to use. + mib : MIB + Management Information Base. The flags byte is derived from + mib.itsGnIsMobile (§10.3.4). """ nh = request.upper_protocol_entity ht = request.packet_transport_type.header_type hst = request.packet_transport_type.header_subtype tc = request.traffic_class + flags = mib.itsGnIsMobile.value << 7 pl = request.length if ht == HeaderType.TSB and hst == TopoBroadcastHST.SINGLE_HOP: mhl = 1 else: mhl = request.max_hop_limit - return cls(nh=nh, reserved=0, ht=ht, hst=hst, tc=tc, flags=0, pl=pl, mhl=mhl) # type: ignore + return cls(nh=nh, reserved=0, ht=ht, hst=hst, tc=tc, flags=flags, pl=pl, mhl=mhl) # type: ignore def encode_to_int(self) -> int: """ @@ -150,10 +155,16 @@ def decode_from_bytes(cls, header: bytes) -> "CommonHeader": return cls.decode_from_int(int.from_bytes(header[0:8], "big")) @classmethod - def initialize_beacon(cls) -> CommonHeader: + def initialize_beacon(cls, mib: MIB) -> CommonHeader: """ Initializes a Common Header for a beacon message. + Parameters + ---------- + mib : MIB + Management Information Base. The flags byte is derived from + mib.itsGnIsMobile (§10.3.4). + Returns ------- CommonHeader : @@ -163,4 +174,4 @@ def initialize_beacon(cls) -> CommonHeader: ht = HeaderType.BEACON hst = HeaderSubType.UNSPECIFIED tc = TrafficClass() - return cls(nh=nh, reserved=0, ht=ht, hst=hst, tc=tc, flags=0, pl=0, mhl=1) + return cls(nh=nh, reserved=0, ht=ht, hst=hst, tc=tc, flags=mib.itsGnIsMobile.value, pl=0, mhl=1) diff --git a/src/flexstack/geonet/guc_extended_header.py b/src/flexstack/geonet/guc_extended_header.py new file mode 100644 index 0000000..8edddef --- /dev/null +++ b/src/flexstack/geonet/guc_extended_header.py @@ -0,0 +1,107 @@ +from dataclasses import dataclass, field +from .exceptions import DecodeError +from .position_vector import LongPositionVector, ShortPositionVector + + +@dataclass(frozen=True) +class GUCExtendedHeader: + """ + GUC Extended Header class. As specified in ETSI EN 302 636-4-1 V1.4.1 (2020-01). Section 9.8.2 (Table 11). + + Layout (48 bytes after Basic + Common headers): + SN 2 octets (octets 12-13 of full GUC packet) + Reserved 2 octets (octets 14-15) + SO PV 24 octets (octets 16-39) Long Position Vector + DE PV 20 octets (octets 40-59) Short Position Vector + + Attributes + ---------- + sn : int + Sequence number (16-bit unsigned). + reserved : int + Reserved. Set to 0. + so_pv : LongPositionVector + Source Long Position Vector (ego GeoAdhoc router). + de_pv : ShortPositionVector + Destination Short Position Vector (position of the destination GeoAdhoc router). + """ + + sn: int = 0 + reserved: int = 0 + so_pv: LongPositionVector = field(default_factory=LongPositionVector) + de_pv: ShortPositionVector = field(default_factory=ShortPositionVector) + + @classmethod + def initialize_with_request_sequence_number_ego_pv_de_pv( + cls, + sequence_number: int, + ego_pv: LongPositionVector, + de_pv: ShortPositionVector, + ) -> "GUCExtendedHeader": + """ + Initialize the GUC Extended Header for a new outgoing GUC packet. + + Parameters + ---------- + sequence_number : int + The current local sequence number (clause 8.3). + ego_pv : LongPositionVector + The ego position vector (SO PV field, clause 8.2). + de_pv : ShortPositionVector + The destination Short Position Vector (DE PV field, clause 8.5 / LocT). + """ + return cls(sn=sequence_number, so_pv=ego_pv, de_pv=de_pv) + + def with_de_pv(self, de_pv: ShortPositionVector) -> "GUCExtendedHeader": + """Return a copy of this header with an updated DE PV (used by forwarder step 8).""" + return GUCExtendedHeader( + sn=self.sn, + reserved=self.reserved, + so_pv=self.so_pv, + de_pv=de_pv, + ) + + def encode(self) -> bytes: + """ + Encode the GUC Extended Header to bytes (48 bytes). + + Returns + ------- + bytes + Encoded bytes. + """ + return ( + self.sn.to_bytes(2, "big") + + self.reserved.to_bytes(2, "big") + + self.so_pv.encode() + + self.de_pv.encode() + ) + + @classmethod + def decode(cls, header: bytes) -> "GUCExtendedHeader": + """ + Decode the GUC Extended Header from bytes. + + Parameters + ---------- + header : bytes + 48 bytes of the GUC Extended Header. + + Returns + ------- + GUCExtendedHeader + Decoded GUC Extended Header. + + Raises + ------ + DecodeError + If the header is too short. + """ + if len(header) < 48: + raise DecodeError( + f"GUC Extended Header too short: expected 48 bytes, got {len(header)}") + sn = int.from_bytes(header[0:2], "big") + reserved = int.from_bytes(header[2:4], "big") + so_pv = LongPositionVector.decode(header[4:28]) + de_pv = ShortPositionVector.decode(header[28:48]) + return cls(sn=sn, reserved=reserved, so_pv=so_pv, de_pv=de_pv) diff --git a/src/flexstack/geonet/location_table.py b/src/flexstack/geonet/location_table.py index d34eb98..1d8d332 100644 --- a/src/flexstack/geonet/location_table.py +++ b/src/flexstack/geonet/location_table.py @@ -1,16 +1,15 @@ from __future__ import annotations -import hashlib from collections import deque from threading import Lock, RLock from ..utils.time_service import TimeService from .gbc_extended_header import GBCExtendedHeader +from .tsb_extended_header import TSBExtendedHeader +from .guc_extended_header import GUCExtendedHeader +from .ls_extended_header import LSRequestExtendedHeader, LSReplyExtendedHeader from .gn_address import GNAddress from .mib import MIB from .position_vector import LongPositionVector, TST -from .exceptions import DuplicatedPacketException, IncongruentTimestampException - -DIGEST_SIZE = 16 -MAX_DPL = 50 +from .exceptions import DuplicatedPacketException class LocationTableEntry: @@ -29,8 +28,9 @@ class LocationTableEntry: is_neighbour : bool Flag indicating that the GeoAdhoc router is in direct communication range, i.e. is a neighbour. - dpl : List[bytes] - Duplicate packet list for source GN_ADDR. (List of hashes of the packets) + dpl : deque[int] + Duplicate packet list for source GN_ADDR. Stores the itsGnDPLLength most recently + seen sequence numbers (annex A.2). tst : TST Timestamp TST(GN_ADDR): The timestamp of the last packet from the source GN_ADDR that was identified as 'not duplicated' @@ -51,8 +51,8 @@ def __init__(self, mib: MIB): self.pdr_lock = Lock() self.pdr: float = 0.0 self.dpl_lock = Lock() - self.dpl_set: set[bytes] = set() - self.dpl_deque: deque[bytes] = deque(maxlen=MAX_DPL) + self.dpl_set: set[int] = set() # O(1) SN lookup + self.dpl_deque: deque[int] = deque(maxlen=mib.itsGnDPLLength) # ring buffer per A.2 def get_gn_address(self) -> GNAddress: """ @@ -68,27 +68,27 @@ def get_gn_address(self) -> GNAddress: def update_position_vector(self, position_vector: LongPositionVector) -> None: """ Updates the position vector. - Annex C2 of ETSI EN 302 636-4-1 V1.4.1 (2020-01) - The algorithm is implemented partially on the TST + Annex C.2 of ETSI EN 302 636-4-1 V1.4.1 (2020-01): + PV is updated only when the received TST is strictly newer than the stored TST + (wrap-around handled by TST.__gt__). If not newer, the call returns silently + and packet processing continues normally (no exception raised). Parameters ---------- position_vector : LongPositionVector Position vector to update. - - Raises - ------ - IncongruentTimestampException - If there has been another packet with posterior timestamp received before. """ with self.position_vector_lock: if self.position_vector.tst.msec == 0: + # §C.2: initial entry – accept first PV unconditionally. + # TST.__gt__ comparison against TST(0) is unreliable for current + # real-world timestamps (mod 2^32 > 2^31) due to wrap-around logic. self.position_vector = position_vector - elif position_vector.tst >= self.position_vector.tst: + elif position_vector.tst > self.position_vector.tst: + # §C.2: received PV is strictly newer → update self.position_vector = position_vector - else: - raise IncongruentTimestampException( - "Position vector not updated") + # §C.2 ELSE: received PV is not newer → do nothing; + # packet processing continues normally (no exception) def update_pdr(self, position_vector: LongPositionVector, packet_size: int) -> None: """ @@ -135,9 +135,7 @@ def update_with_shb_packet( DuplicatedPacketException If the packet is duplicated. """ - # Own added check (not in the standard) - self.check_duplicate_packet(packet) - # End own added check + # NOTE: SHB has no SN field; annex A.2 DPD does NOT apply to SHB (§A.1) # step 4 self.update_position_vector(position_vector) # step 5 @@ -145,6 +143,42 @@ def update_with_shb_packet( # step 6 self.is_neighbour = True + def update_with_tsb_packet( + self, packet: bytes, tsb_extended_header: TSBExtendedHeader, is_new_entry: bool + ) -> None: + """ + Updates the entry with a TSB packet. + + Follows steps 3-6 of §10.3.9.3 of ETSI EN 302 636-4-1 V1.4.1 (2020-01). + + Parameters + ---------- + packet : bytes + TSB payload (without headers). + tsb_extended_header : TSBExtendedHeader + TSB extended header. + is_new_entry : bool + True when this LocTE was just created; used to decide whether to set + IS_NEIGHBOUR to FALSE (§10.3.9.3 step 5b) or leave it unchanged (NOTE 1). + + Raises + ------ + IncongruentTimestampException + If a packet with a later timestamp was received before. + DuplicatedPacketException + If the packet is duplicated. + """ + position_vector = tsb_extended_header.so_pv + # Step 3 (DPD) – SN-based duplicate check per annex A.2 + self.check_duplicate_sn(tsb_extended_header.sn) + # Step 5a / 6a – update PV + self.update_position_vector(position_vector) + # Step 5c / 6b – update PDR + self.update_pdr(position_vector, len(packet) + 8 + 4) + # Step 5b – set IS_NEIGHBOUR = FALSE only for new entries (NOTE 1: unchanged otherwise) + if is_new_entry: + self.is_neighbour = False + def update_with_gbc_packet( self, packet: bytes, gbc_extended_header: GBCExtendedHeader ) -> None: @@ -168,9 +202,8 @@ def update_with_gbc_packet( If the packet is duplicated. """ position_vector = gbc_extended_header.so_pv - # Own added check (not in the standard) - self.check_duplicate_packet(packet) - # End own added check + # Step 3 (DPD) – SN-based duplicate check per annex A.2 + self.check_duplicate_sn(gbc_extended_header.sn) # step 4 self.update_position_vector(position_vector) # step 5 @@ -178,33 +211,42 @@ def update_with_gbc_packet( # step 6 self.is_neighbour = False - def check_duplicate_packet(self, packet: bytes) -> None: + def check_duplicate_sn(self, sn: int) -> None: """ - Checks if the packet is duplicated. + Checks if a packet with the given sequence number is a duplicate. + + Implements the SN-based DPD algorithm of annex A.2 of + ETSI EN 302 636-4-1 V1.4.1 (2020-01). - ETSI EN 302 636-4-1 V1.4.1 (2020-01) + The DPL is a ring buffer of length itsGnDPLLength that stores the + sequence numbers of the most recently received (non-duplicate) packets + from this source. When SN is already present in the DPL the packet is + a duplicate; otherwise SN is added at the head (overwriting the oldest + entry when the buffer is full). - Temporary implementation. The DPL is not implemented yet. + Only applicable to multi-hop packets (GUC, TSB, GBC, GAC, LS Request, + LS Reply). BEACON and SHB do not carry an SN field and must NOT call + this method. Parameters ---------- - packet : bytes - Packet to check. + sn : int + Sequence number field from the received GeoNetworking packet. Raises ------ DuplicatedPacketException - If the packet is duplicated. + If *sn* is already present in the DPL. """ - packet_hash = hashlib.blake2b(packet, digest_size=DIGEST_SIZE).digest() with self.dpl_lock: - if packet_hash in self.dpl_set: + if sn in self.dpl_set: raise DuplicatedPacketException("Packet is duplicated") + # Evict oldest SN when the ring buffer is full if len(self.dpl_deque) == self.dpl_deque.maxlen: oldest = self.dpl_deque.popleft() - self.dpl_set.remove(oldest) - self.dpl_deque.append(packet_hash) - self.dpl_set.add(packet_hash) + self.dpl_set.discard(oldest) + self.dpl_deque.append(sn) + self.dpl_set.add(sn) class LocationTable: @@ -250,6 +292,29 @@ def get_entry(self, gn_address: GNAddress) -> LocationTableEntry | None: return self.loc_t.get(gn_address, None) return None + def ensure_entry(self, gn_address: GNAddress) -> LocationTableEntry: + """ + Gets or creates a LocTE for gn_address without modifying its fields. + + Used by the Location Service to create a placeholder entry before setting ls_pending. + + Parameters + ---------- + gn_address : GNAddress + GN address. + + Returns + ------- + LocationTableEntry + Existing or newly created location table entry. + """ + with self.loc_t_lock: + entry = self.loc_t.get(gn_address, None) + if entry is None: + entry = LocationTableEntry(self.mib) + self.loc_t[gn_address] = entry + return entry + def refresh_table(self) -> None: """ Removes the entries that have expired. @@ -261,7 +326,7 @@ def refresh_table(self) -> None: with self.loc_t_lock: self.loc_t = { gn: entry for gn, entry in self.loc_t.items() - if (current_time - entry.position_vector.tst) <= self.mib.itsGnLifetimeLocTE + if (current_time - entry.position_vector.tst) <= self.mib.itsGnLifetimeLocTE * 1000 } def new_shb_packet( @@ -293,6 +358,206 @@ def new_shb_packet( entry.update_with_shb_packet(position_vector, packet) self.refresh_table() + def new_guc_packet( + self, guc_extended_header: GUCExtendedHeader, packet: bytes + ) -> None: + """ + Updates the location table with a new GUC packet (SO LocTE). + + Follows steps 5-6 of §10.3.8.3 / §10.3.8.4 of ETSI EN 302 636-4-1 V1.4.1 (2020-01). + IS_NEIGHBOUR is set to FALSE for new entries only; unchanged for existing ones (NOTE 2). + + Parameters + ---------- + guc_extended_header : GUCExtendedHeader + GUC extended header. + packet : bytes + GUC payload (without headers). + + Raises + ------ + IncongruentTimestampException + If a packet with a later timestamp was received before. + DuplicatedPacketException + If the packet is duplicated. + """ + so_pv = guc_extended_header.so_pv + with self.loc_t_lock: + entry: LocationTableEntry | None = self.get_entry(so_pv.gn_addr) + is_new_entry = entry is None + if is_new_entry: + entry = LocationTableEntry(self.mib) + self.loc_t[so_pv.gn_addr] = entry + assert entry is not None + # DPD – SN-based per annex A.2 + entry.check_duplicate_sn(guc_extended_header.sn) + # Update PV + entry.update_position_vector(so_pv) + # Update PDR + entry.update_pdr(so_pv, len(packet) + 8 + 4) + # IS_NEIGHBOUR = FALSE only for new entry (NOTE 2: unchanged otherwise) + if is_new_entry: + entry.is_neighbour = False + self.refresh_table() + + def new_tsb_packet( + self, tsb_extended_header: TSBExtendedHeader, packet: bytes + ) -> None: + """ + Updates the location table with a new TSB packet. + + Parameters + ---------- + tsb_extended_header : TSBExtendedHeader + TSB extended header. + packet : bytes + TSB payload (without headers). + + Raises + ------ + IncongruentTimestampException + If a packet with a later timestamp was received before. + DuplicatedPacketException + If the packet is duplicated. + """ + with self.loc_t_lock: + entry: LocationTableEntry | None = self.get_entry( + tsb_extended_header.so_pv.gn_addr) + is_new_entry = entry is None + if is_new_entry: + entry = LocationTableEntry(self.mib) + self.loc_t[tsb_extended_header.so_pv.gn_addr] = entry + assert entry is not None + entry.update_with_tsb_packet(packet, tsb_extended_header, is_new_entry) + self.refresh_table() + + def new_gac_packet( + self, gbc_extended_header: GBCExtendedHeader, packet: bytes + ) -> None: + """ + Updates the location table with a new GAC packet (SO LocTE). + + Follows steps 5-6 of §10.3.12.3 of ETSI EN 302 636-4-1 V1.4.1 (2020-01). + GAC and GBC share the same extended header format (§9.8.5), so GBCExtendedHeader + is reused for GAC. + IS_NEIGHBOUR is set to FALSE for new entries only; unchanged for existing ones (NOTE 1). + + Parameters + ---------- + gbc_extended_header : GBCExtendedHeader + GAC extended header (same wire format as GBC). + packet : bytes + GAC payload (without headers). + + Raises + ------ + IncongruentTimestampException + If a packet with a later timestamp was received before. + DuplicatedPacketException + If the packet is duplicated. + """ + so_pv = gbc_extended_header.so_pv + with self.loc_t_lock: + entry: LocationTableEntry | None = self.get_entry(so_pv.gn_addr) + is_new_entry = entry is None + if is_new_entry: + entry = LocationTableEntry(self.mib) + self.loc_t[so_pv.gn_addr] = entry + assert entry is not None + # DPD – SN-based per annex A.2 + entry.check_duplicate_sn(gbc_extended_header.sn) + # Update PV (step 5a / 6a) + entry.update_position_vector(so_pv) + # Update PDR (step 5c / 6b) + entry.update_pdr(so_pv, len(packet) + 8 + 4) + # IS_NEIGHBOUR = FALSE only for new entry (NOTE 1: unchanged otherwise) + if is_new_entry: + entry.is_neighbour = False + self.refresh_table() + + def new_ls_request_packet( + self, ls_request_header: LSRequestExtendedHeader, packet: bytes + ) -> None: + """ + Updates the location table with a new LS Request packet (SO LocTE). + + Follows steps 5-6 of §10.3.7.3 of ETSI EN 302 636-4-1 V1.4.1 (2020-01). + IS_NEIGHBOUR is set to FALSE for new entries only; unchanged for existing ones (NOTE). + + Parameters + ---------- + ls_request_header : LSRequestExtendedHeader + LS Request extended header. + packet : bytes + LS Request payload/data (used for DPD). + + Raises + ------ + IncongruentTimestampException + If a packet with a later timestamp was received before. + DuplicatedPacketException + If the packet is duplicated. + """ + so_pv = ls_request_header.so_pv + with self.loc_t_lock: + entry: LocationTableEntry | None = self.get_entry(so_pv.gn_addr) + is_new_entry = entry is None + if is_new_entry: + entry = LocationTableEntry(self.mib) + self.loc_t[so_pv.gn_addr] = entry + assert entry is not None + # DPD – SN-based per annex A.2 + entry.check_duplicate_sn(ls_request_header.sn) + # Step 5a / 6a: update PV(SO) + entry.update_position_vector(so_pv) + # Step 5c / 6b: update PDR(SO) + entry.update_pdr(so_pv, len(packet) + 8 + 4) + # Step 5b: IS_NEIGHBOUR = FALSE only for new entry (NOTE: unchanged otherwise) + if is_new_entry: + entry.is_neighbour = False + self.refresh_table() + + def new_ls_reply_packet( + self, ls_reply_header: LSReplyExtendedHeader, packet: bytes + ) -> None: + """ + Updates the location table with a new LS Reply packet (SO LocTE). + + Follows steps 4-5 of §10.3.7.1.4 of ETSI EN 302 636-4-1 V1.4.1 (2020-01). + The LS Reply SO PV is the replier's position; IS_NEIGHBOUR is set to FALSE for new entries. + + Parameters + ---------- + ls_reply_header : LSReplyExtendedHeader + LS Reply extended header. + packet : bytes + LS Reply payload (used for DPD). + + Raises + ------ + IncongruentTimestampException + If a packet with a later timestamp was received before. + DuplicatedPacketException + If the packet is duplicated. + """ + so_pv = ls_reply_header.so_pv + with self.loc_t_lock: + entry: LocationTableEntry | None = self.get_entry(so_pv.gn_addr) + is_new_entry = entry is None + if is_new_entry: + entry = LocationTableEntry(self.mib) + self.loc_t[so_pv.gn_addr] = entry + assert entry is not None + # DPD – SN-based per annex A.2 + entry.check_duplicate_sn(ls_reply_header.sn) + # Step 4: update PV(SO) + entry.update_position_vector(so_pv) + # Step 5: update PDR(SO) + entry.update_pdr(so_pv, len(packet) + 8 + 4) + if is_new_entry: + entry.is_neighbour = False + self.refresh_table() + def new_gbc_packet( self, gbc_extended_header: GBCExtendedHeader, packet: bytes ) -> None: diff --git a/src/flexstack/geonet/ls_extended_header.py b/src/flexstack/geonet/ls_extended_header.py new file mode 100644 index 0000000..31170a2 --- /dev/null +++ b/src/flexstack/geonet/ls_extended_header.py @@ -0,0 +1,174 @@ +from dataclasses import dataclass, field +from .exceptions import DecodeError +from .position_vector import LongPositionVector, ShortPositionVector +from .gn_address import GNAddress + + +@dataclass(frozen=True) +class LSRequestExtendedHeader: + """ + LS Request Extended Header. §9.8.7 Table 16 of ETSI EN 302 636-4-1 V1.4.1 (2020-01). + + Layout (36 bytes, after Basic + Common headers): + SN 2 octets (octets 12-13 of full LS Request packet) + Reserved 2 octets (octets 14-15) + SO PV 24 octets (octets 16-39) Long Position Vector + Request GN_ADDR 8 octets (octets 40-47) GN address being sought + + Attributes + ---------- + sn : int + Sequence number (16-bit unsigned). + reserved : int + Reserved. Set to 0. + so_pv : LongPositionVector + Source Long Position Vector (ego GeoAdhoc router). + request_gn_addr : GNAddress + GN_ADDR of the GeoAdhoc router whose location is being requested. + """ + + sn: int = 0 + reserved: int = 0 + so_pv: LongPositionVector = field(default_factory=LongPositionVector) + request_gn_addr: GNAddress = field(default_factory=GNAddress) + + @classmethod + def initialize( + cls, + sequence_number: int, + ego_pv: LongPositionVector, + request_gn_addr: GNAddress, + ) -> "LSRequestExtendedHeader": + """ + Initialise an LS Request Extended Header for a new outgoing LS Request. + + Parameters + ---------- + sequence_number : int + Local sequence number (clause 8.3). + ego_pv : LongPositionVector + Ego position vector (SO PV field, clause 8.2). + request_gn_addr : GNAddress + GN_ADDR of the sought GeoAdhoc router (table 23). + """ + return cls(sn=sequence_number, so_pv=ego_pv, request_gn_addr=request_gn_addr) + + def encode(self) -> bytes: + """Encode the LS Request Extended Header to bytes (36 bytes).""" + return ( + self.sn.to_bytes(2, "big") + + self.reserved.to_bytes(2, "big") + + self.so_pv.encode() + + self.request_gn_addr.encode_to_int().to_bytes(8, "big") + ) + + @classmethod + def decode(cls, header: bytes) -> "LSRequestExtendedHeader": + """ + Decode the LS Request Extended Header from bytes. + + Parameters + ---------- + header : bytes + At least 36 bytes. + + Raises + ------ + DecodeError + If the header is shorter than 36 bytes. + """ + if len(header) < 36: + raise DecodeError( + f"LS Request Extended Header too short: expected 36 bytes, got {len(header)}" + ) + sn = int.from_bytes(header[0:2], "big") + reserved = int.from_bytes(header[2:4], "big") + so_pv = LongPositionVector.decode(header[4:28]) + request_gn_addr = GNAddress.decode(header[28:36]) + return cls(sn=sn, reserved=reserved, so_pv=so_pv, request_gn_addr=request_gn_addr) + + +@dataclass(frozen=True) +class LSReplyExtendedHeader: + """ + LS Reply Extended Header. §9.8.8 Table 17 of ETSI EN 302 636-4-1 V1.4.1 (2020-01). + + Layout (48 bytes, after Basic + Common headers): + SN 2 octets (octets 12-13) Sequence number + Reserved 2 octets (octets 14-15) + SO PV 24 octets (octets 16-39) Long Position Vector (source = replier) + DE PV 20 octets (octets 40-59) Short Position Vector (destination = requester) + + NOTE: This layout is identical to the GUC Extended Header (§9.8.2). + + Attributes + ---------- + sn : int + Sequence number (16-bit unsigned). + reserved : int + Reserved. Set to 0. + so_pv : LongPositionVector + Source Long Position Vector (the replier's own position). + de_pv : ShortPositionVector + Destination Short Position Vector (the requester's position from LocT). + """ + + sn: int = 0 + reserved: int = 0 + so_pv: LongPositionVector = field(default_factory=LongPositionVector) + de_pv: ShortPositionVector = field(default_factory=ShortPositionVector) + + @classmethod + def initialize( + cls, + sequence_number: int, + ego_pv: LongPositionVector, + de_pv: ShortPositionVector, + ) -> "LSReplyExtendedHeader": + """ + Initialise an LS Reply Extended Header for a new outgoing LS Reply. + + Parameters + ---------- + sequence_number : int + Local sequence number (clause 8.3). + ego_pv : LongPositionVector + Ego position vector of the replier (SO PV field, table 25). + de_pv : ShortPositionVector + Short Position Vector of the requester, from LocT (DE PV field, table 25). + """ + return cls(sn=sequence_number, so_pv=ego_pv, de_pv=de_pv) + + def encode(self) -> bytes: + """Encode the LS Reply Extended Header to bytes (48 bytes).""" + return ( + self.sn.to_bytes(2, "big") + + self.reserved.to_bytes(2, "big") + + self.so_pv.encode() + + self.de_pv.encode() + ) + + @classmethod + def decode(cls, header: bytes) -> "LSReplyExtendedHeader": + """ + Decode the LS Reply Extended Header from bytes. + + Parameters + ---------- + header : bytes + At least 48 bytes. + + Raises + ------ + DecodeError + If the header is shorter than 48 bytes. + """ + if len(header) < 48: + raise DecodeError( + f"LS Reply Extended Header too short: expected 48 bytes, got {len(header)}" + ) + sn = int.from_bytes(header[0:2], "big") + reserved = int.from_bytes(header[2:4], "big") + so_pv = LongPositionVector.decode(header[4:28]) + de_pv = ShortPositionVector.decode(header[28:48]) + return cls(sn=sn, reserved=reserved, so_pv=so_pv, de_pv=de_pv) diff --git a/src/flexstack/geonet/router.py b/src/flexstack/geonet/router.py index 5e26d3a..933c84c 100644 --- a/src/flexstack/geonet/router.py +++ b/src/flexstack/geonet/router.py @@ -1,24 +1,32 @@ from __future__ import annotations from collections.abc import Callable +from dataclasses import replace as dataclass_replace from enum import Enum -from time import sleep -from threading import Thread, Lock +from threading import Thread, Lock, Event, Timer +from typing import Union, cast import math +import random from ..linklayer.exceptions import ( SendingException, PacketTooLongException, ) from .mib import ( MIB, + GnSecurity, LocalGnAddrConfMethod, NonAreaForwardingAlgorithm, AreaForwardingAlgorithm, ) from .gn_address import GNAddress from .service_access_point import ( + CommonNH, HeaderType, + HeaderSubType, TopoBroadcastHST, GeoBroadcastHST, + GeoAnycastHST, + LocationServiceHST, + TrafficClass, GNDataRequest, ResultCode, GNDataConfirm, @@ -29,12 +37,16 @@ from .basic_header import BasicNH, BasicHeader from .common_header import CommonHeader from .gbc_extended_header import GBCExtendedHeader -from .position_vector import LongPositionVector +from .tsb_extended_header import TSBExtendedHeader +from .guc_extended_header import GUCExtendedHeader +from .ls_extended_header import LSRequestExtendedHeader, LSReplyExtendedHeader +from .position_vector import LongPositionVector, ShortPositionVector from .location_table import LocationTable from ..linklayer.link_layer import LinkLayer from ..security.sign_service import SignService +from ..security.verify_service import VerifyService from ..security.security_profiles import SecurityProfile -from ..security.sn_sap import SNSIGNConfirm, SNSIGNRequest +from ..security.sn_sap import SNSIGNConfirm, SNSIGNRequest, SNVERIFYRequest, ReportVerify from .exceptions import ( DADException, DecapError, @@ -73,7 +85,7 @@ class Router: """ - def __init__(self, mib: MIB, sign_service: SignService | None = None) -> None: + def __init__(self, mib: MIB, sign_service: SignService | None = None, verify_service: VerifyService | None = None) -> None: """ Initialize the router. @@ -81,6 +93,12 @@ def __init__(self, mib: MIB, sign_service: SignService | None = None) -> None: ---------- mib : MIB MIB to use. + sign_service : SignService | None + Sign service used to sign outgoing secured packets. Defaults to None + (no signing). + verify_service : VerifyService | None + Verify service used to verify incoming secured packets. Defaults to None + (secured packets are discarded with a warning). """ self.mib = mib self.ego_position_vector_lock = Lock() @@ -89,9 +107,21 @@ def __init__(self, mib: MIB, sign_service: SignService | None = None) -> None: self.link_layer: LinkLayer | None = None self.location_table = LocationTable(mib) self.sign_service: SignService | None = sign_service + self.verify_service: VerifyService | None = verify_service self.indication_callback = None self.sequence_number_lock = Lock() self.sequence_number = 0 + self._beacon_reset_event: Event | None = None + # Location Service state (§10.2.4) + self._ls_lock: Lock = Lock() + self._ls_timers: dict = {} # GNAddress → threading.Timer + self._ls_retransmit_counters: dict = {} # GNAddress → int + # GNAddress → list[GNDataRequest] + self._ls_packet_buffers: dict = {} + # CBF packet buffer (§F.3): keyed by (so_gn_addr, sn) + self._cbf_lock: Lock = Lock() + # (GNAddress, int) → threading.Timer + self._cbf_buffer: dict = {} if self.mib.itsGnBeaconServiceRetransmitTimer > 0: self.configure_beacon_service() @@ -103,18 +133,34 @@ def configure_beacon_service(self) -> None: ---------- None """ - + self._beacon_reset_event = Event() thread = Thread(target=self.beacon_service_thread, daemon=True) thread.start() def beacon_service_thread(self) -> None: """ Thread function to handle beacon service retransmissions. - """ + Sends a beacon then waits for the retransmit interval plus a random + jitter in [0, itsGnBeaconServiceMaxJitter] ms as required by + §10.3.6.2 step 5 of ETSI EN 302 636-4-1 V1.4.1 (2020-01). + If an SHB transmission resets the beacon timer (§10.3.10.2 step 7) + during the wait the interval is restarted so that no unnecessary + beacon is sent. + """ + assert self._beacon_reset_event is not None while True: self.gn_data_request_beacon() - sleep(self.mib.itsGnBeaconServiceRetransmitTimer / 1000) + # §10.3.6.2 step 5: TBeacon = itsGnBeaconServiceRetransmitTimer + # + RAND[0, itsGnBeaconServiceMaxJitter] + assert self.mib.itsGnBeaconServiceMaxJitter is not None + jitter_ms = random.uniform(0, self.mib.itsGnBeaconServiceMaxJitter) + timeout = (self.mib.itsGnBeaconServiceRetransmitTimer + jitter_ms) / 1000 + # If SHB fires during the wait it sets the event, causing wait() to + # return True. Clear the event and restart the full interval so no + # redundant beacon is emitted immediately after an SHB. + while self._beacon_reset_event.wait(timeout): + self._beacon_reset_event.clear() def gn_data_request_beacon(self) -> None: """ @@ -125,7 +171,8 @@ def gn_data_request_beacon(self) -> None: None """ basic_header = BasicHeader.initialize_with_mib_and_rhl(self.mib, 1) - common_header = CommonHeader.initialize_beacon() + # §10.3.4: Flags Bit 0 shall be set to itsGnIsMobile + common_header = CommonHeader.initialize_beacon(self.mib) long_position_vector = self.ego_position_vector packet = ( basic_header.encode_to_bytes() @@ -198,32 +245,65 @@ def gn_data_request_shb(self, request: GNDataRequest) -> GNDataConfirm: request : GNDataRequest GNDataRequest to handle. """ - basic_header = BasicHeader.initialize_with_mib_and_rhl(self.mib, 1) - common_header = CommonHeader.initialize_with_request(request) + # Step 1a: Basic Header – LT from request when provided, else MIB default; + # RHL = 1 for SHB (§10.3.2) + # Step 1b: Common Header – flags bit 0 = itsGnIsMobile (§10.3.4) + basic_header = BasicHeader.initialize_with_mib_request_and_rhl( + self.mib, request.max_packet_lifetime, 1) + common_header = CommonHeader.initialize_with_request( + request, self.mib) long_position_vector = self.ego_position_vector media_dependant_data = b"\x00\x00\x00\x00" packet = b"" - if request.security_profile == SecurityProfile.COOPERATIVE_AWARENESS_MESSAGE: + if self.mib.itsGnSecurity == GnSecurity.ENABLED: if self.sign_service is None: - raise NotImplementedError("Security profile not implemented") - media_dependant_data = b"\x00\x00\x00\x00" - tbs_packet = ( - common_header.encode_to_bytes() - + long_position_vector.encode() - + media_dependant_data - + request.data - ) - sign_request = SNSIGNRequest( - tbs_message_length=len(tbs_packet), - tbs_message=tbs_packet, - its_aid=request.its_aid, - permissions=request.security_permissions, - permissions_length=len(request.security_permissions), - ) - sign_confirm: SNSIGNConfirm = self.sign_service.sign_cam( - sign_request) - basic_header = basic_header.set_nh(BasicNH.SECURED_PACKET) - packet = basic_header.encode_to_bytes() + sign_confirm.sec_message + raise ValueError( + "MIB requires security but no SignService provided to Router") + if request.security_profile in ( + SecurityProfile.COOPERATIVE_AWARENESS_MESSAGE, + SecurityProfile.VRU_AWARENESS_MESSAGE, + ): + # ETSI EN 302 636-4-1 V1.4.1 §9.5 / ETSI TS 103 097 §7.1.1: + # the signed payload is Common Header + Extended Header + GN-SDU. + # The Basic Header is NOT part of the signed content. + # Both CAM (PSID 36) and VAM (PSID 638) follow the §7.1.1 + # profile: signer alternates certificate/digest on a 1-second + # timer so Wireshark and receivers always obtain the full cert. + tbs_packet = ( + common_header.encode_to_bytes() + + long_position_vector.encode() + + media_dependant_data + + request.data + ) + sign_request = SNSIGNRequest( + tbs_message_length=len(tbs_packet), + tbs_message=tbs_packet, + its_aid=request.its_aid, + permissions=request.security_permissions, + permissions_length=len(request.security_permissions), + ) + sign_confirm: SNSIGNConfirm = self.sign_service.sign_cam( + sign_request) + basic_header = basic_header.set_nh(BasicNH.SECURED_PACKET) + packet = basic_header.encode_to_bytes() + sign_confirm.sec_message + + else: + tbs_packet = ( + common_header.encode_to_bytes() + + long_position_vector.encode() + + media_dependant_data + + request.data + ) + sign_request = SNSIGNRequest( + tbs_message_length=len(tbs_packet), + tbs_message=tbs_packet, + its_aid=request.its_aid, + permissions=request.security_permissions, + permissions_length=len(request.security_permissions), + ) + sign_confirm = self.sign_service.sign_request(sign_request) + basic_header = basic_header.set_nh(BasicNH.SECURED_PACKET) + packet = basic_header.encode_to_bytes() + sign_confirm.sec_message else: packet = ( @@ -234,6 +314,12 @@ def gn_data_request_shb(self, request: GNDataRequest) -> GNDataConfirm: + request.data ) + # Step 3: §10.3.10.2 – if no suitable neighbour exists and SCF is set, + # buffer in the BC forwarding packet buffer (not yet implemented). + if len(self.location_table.get_neighbours()) == 0 and request.traffic_class.scf: + print( + "SHB: no neighbours and SCF set; BC forwarding buffer not yet implemented") + # Steps 5-6: media-dependent procedures and pass GN-PDU to LL try: if self.link_layer: self.link_layer.send(packet) @@ -242,6 +328,10 @@ def gn_data_request_shb(self, request: GNDataRequest) -> GNDataConfirm: except SendingException: return GNDataConfirm(result_code=ResultCode.UNSPECIFIED) + # Step 7: reset beacon timer to prevent an unnecessary beacon (§10.3.10.2) + if self._beacon_reset_event is not None: + self._beacon_reset_event.set() + return GNDataConfirm(result_code=ResultCode.ACCEPTED) @staticmethod @@ -318,35 +408,217 @@ def gn_geometric_function_f( coord1 = (area.latitude / 10000000, area.longitude / 10000000) coord2 = (lat / 10000000, lon / 10000000) x_distance, y_distance = Router.calculate_distance(coord1, coord2) - if area_type == GeoBroadcastHST.GEOBROADCAST_CIRCLE: + if area_type in (GeoBroadcastHST.GEOBROADCAST_CIRCLE, GeoAnycastHST.GEOANYCAST_CIRCLE): return 1 - (x_distance / area.a) ** 2 - (y_distance / area.a) ** 2 - if area_type == GeoBroadcastHST.GEOBROADCAST_ELIP: + if area_type in (GeoBroadcastHST.GEOBROADCAST_ELIP, GeoAnycastHST.GEOANYCAST_ELIP): return 1 - (x_distance / area.a) ** 2 - (y_distance / area.b) ** 2 - if area_type == GeoBroadcastHST.GEOBROADCAST_RECT: - return min(1 - (x_distance / area.a) ** 2, (y_distance / area.b) ** 2) + if area_type in (GeoBroadcastHST.GEOBROADCAST_RECT, GeoAnycastHST.GEOANYCAST_RECT): + return min(1 - (x_distance / area.a) ** 2, 1 - (y_distance / area.b) ** 2) raise ValueError("Invalid area type") + @staticmethod + def _compute_area_size_m2( + area_type: GeoBroadcastHST | GeoAnycastHST, area: Area + ) -> float: + """ + Compute the geographical area size in m² for §B.3 area size control. + + Parameters + ---------- + area_type : GeoBroadcastHST | GeoAnycastHST + Shape type obtained from the Common Header HST field. + area : Area + Area parameters (a, b in metres). + """ + if area_type in (GeoBroadcastHST.GEOBROADCAST_CIRCLE, GeoAnycastHST.GEOANYCAST_CIRCLE): + return math.pi * area.a ** 2 + if area_type in (GeoBroadcastHST.GEOBROADCAST_ELIP, GeoAnycastHST.GEOANYCAST_ELIP): + return math.pi * area.a * area.b + # RECT: a and b are distances from centre to edge → full area = (2a) × (2b) + return 4.0 * area.a * area.b + + @staticmethod + def _distance_m(lat1: int, lon1: int, lat2: int, lon2: int) -> float: + """Euclidean distance in metres between two points (coordinates in 1/10 µdeg).""" + coord1 = (lat1 / 10_000_000, lon1 / 10_000_000) + coord2 = (lat2 / 10_000_000, lon2 / 10_000_000) + dx, dy = Router.calculate_distance(coord1, coord2) + return math.sqrt(dx * dx + dy * dy) + + def gn_greedy_forwarding( + self, dest_lat: int, dest_lon: int, traffic_class: TrafficClass + ) -> bool: + """ + §E.2 Greedy Forwarding algorithm (ETSI EN 302 636-4-1 V1.4.1 Annex E). + + Selects the neighbour with the smallest distance to the destination + (Most Forward within Radius policy). Returns True if the packet should + be transmitted (greedy next hop or BCAST fallback at local optimum), + False if it should be buffered (local optimum and SCF is enabled). + """ + mfr = Router._distance_m( + dest_lat, dest_lon, + self.ego_position_vector.latitude, + self.ego_position_vector.longitude, + ) + progress_found = False + for entry in self.location_table.get_neighbours(): + pv = entry.position_vector + d = Router._distance_m( + dest_lat, dest_lon, pv.latitude, pv.longitude) + if d < mfr: + mfr = d + progress_found = True + if progress_found: + return True # §E.2: send to greedy NH (NH_LL_ADDR = NH.LL_ADDR) + # §E.2: local optimum – no neighbour with positive progress towards destination + if traffic_class.scf: + return False # §E.2: buffer (NH_LL_ADDR = 0); stub: omit send + return True # §E.2: BCAST fallback (NH_LL_ADDR = BCAST) + + def _cbf_compute_timeout_ms(self, dist_m: float) -> float: + """ + §F.3 equation (F.1): compute CBF timeout in milliseconds. + + TO_CBF = TO_CBF_MAX + (TO_CBF_MIN - TO_CBF_MAX) / DIST_MAX * DIST + Clamped to TO_CBF_MIN when DIST >= DIST_MAX. + """ + dist_max = float(self.mib.itsGnDefaultMaxCommunicationRange) + to_min = float(self.mib.itsGnCbfMinTime) + to_max = float(self.mib.itsGnCbfMaxTime) + if dist_m >= dist_max: + return to_min + return to_max + (to_min - to_max) / dist_max * dist_m + + def _cbf_timeout( + self, + cbf_key: tuple, + full_packet: bytes, + ) -> None: + """ + §F.3 timer expiry callback: re-broadcast the buffered GBC/GAC packet. + + Called by the per-packet Timer when TO_CBF expires. The packet is + removed from the CBF buffer and sent to the LL as BCAST. + """ + with self._cbf_lock: + if cbf_key not in self._cbf_buffer: + return # duplicate already arrived and discarded us + del self._cbf_buffer[cbf_key] + try: + if self.link_layer: + self.link_layer.send(full_packet) + except (PacketTooLongException, SendingException): + pass + + def gn_area_cbf_forwarding( + self, + basic_header: BasicHeader, + common_header: CommonHeader, + gbc_extended_header: GBCExtendedHeader, + packet: bytes, + ) -> bool: + """ + §F.3 Area CBF forwarding algorithm for GBC/GAC forwarder operations. + + Returns True if the packet was buffered (timer started) so that the + caller should NOT send immediately. Returns False when a duplicate + arrives and the buffered copy is discarded (packet fully suppressed). + + The CBF key is ``(so_gn_addr, sn)`` uniquely identifying the GN-PDU. + If the key is already in the CBF buffer (duplicate reception): + - stop the timer and remove the entry → return False (discard). + If the key is new: + - compute timeout from sender-to-ego distance (§F.3 eq. F.1), + - store the full re-encoded packet in the CBF buffer, + - start a threading.Timer that calls ``_cbf_timeout`` on expiry, + - return True (packet buffered, do not send immediately). + """ + cbf_key = (gbc_extended_header.so_pv.gn_addr, gbc_extended_header.sn) + with self._cbf_lock: + if cbf_key in self._cbf_buffer: + # §F.3: duplicate arrived while buffering → stop timer, discard + old_timer = self._cbf_buffer.pop(cbf_key) + old_timer.cancel() + return False # §F.3: return -1 (discard) + # New packet – compute timeout + se_entry = self.location_table.get_entry( + gbc_extended_header.so_pv.gn_addr) + se_pos_valid = ( + se_entry is not None and se_entry.position_vector.pai + and self.ego_position_vector.pai + ) + if se_pos_valid and se_entry is not None: + dist = Router._distance_m( + se_entry.position_vector.latitude, + se_entry.position_vector.longitude, + self.ego_position_vector.latitude, + self.ego_position_vector.longitude, + ) + timeout_ms = self._cbf_compute_timeout_ms(dist) + else: + # §F.3: use TO_CBF_MAX when sender position unavailable + timeout_ms = float(self.mib.itsGnCbfMaxTime) + full_packet = ( + basic_header.encode_to_bytes() + + common_header.encode_to_bytes() + + gbc_extended_header.encode() + + packet + ) + timer = Timer( + timeout_ms / 1000.0, + self._cbf_timeout, + args=[cbf_key, full_packet], + ) + timer.daemon = True + self._cbf_buffer[cbf_key] = timer + timer.start() + return True # §F.3: return 0 (packet buffered) + def gn_forwarding_algorithm_selection( - self, request: GNDataRequest + self, request: GNDataRequest, sender_gn_addr: GNAddress | None = None ) -> GNForwardingAlgorithmResponse: """ + Annex D of ETSI EN 302 636-4-1 V1.4.1 (2020-01). Parameters ---------- request : GNDataRequest GNDataRequest to handle. + sender_gn_addr : GNAddress | None + GN address of the sender of the packet being forwarded. Used to + look up the sender's LocTE for the §D SE_POS_VALID / F_SE check. + None for source operations (ego is the originator). """ - result = self.gn_geometric_function_f( + f_ego = self.gn_geometric_function_f( request.packet_transport_type.header_subtype, request.area, self.ego_position_vector.latitude, self.ego_position_vector.longitude, ) - if result >= 0: + if f_ego >= 0: + # §D: ego inside or at border of area → area forwarding return GNForwardingAlgorithmResponse.AREA_FORWARDING - # TODO: Parts of the forwarding algorithm selection - return GNForwardingAlgorithmResponse.DISCARTED + + # §D: ego is outside the area – check sender position (Annex D lines 14-22) + se_pv = None + if sender_gn_addr is not None: + se_entry = self.location_table.get_entry(sender_gn_addr) + if se_entry is not None: + se_pv = se_entry.position_vector + # SE_POS_VALID = PV_SE EXISTS AND PAI_SE = TRUE + if se_pv is not None and se_pv.pai: + f_se = self.gn_geometric_function_f( + request.packet_transport_type.header_subtype, + request.area, + se_pv.latitude, + se_pv.longitude, + ) + if f_se >= 0: + # Sender was inside/at border → discard to prevent area→non-area transition + return GNForwardingAlgorithmResponse.DISCARTED + return GNForwardingAlgorithmResponse.NON_AREA_FORWARDING def gn_data_forward_gbc( self, @@ -389,13 +661,20 @@ def gn_data_forward_gbc( ) request = GNDataRequest( area=area, packet_transport_type=packet_transport_type) - algorithm = self.gn_forwarding_algorithm_selection(request) + algorithm = self.gn_forwarding_algorithm_selection( + request, sender_gn_addr=gbc_extended_header.so_pv.gn_addr) # 12) if the return value of the forwarding algorithm is 0 (packet is buffered in a forwarding packet # buffer) or -1 (packet is discarded), omit the execution of further steps; if algorithm == GNForwardingAlgorithmResponse.AREA_FORWARDING: # TODO: step 13 - # 14) pass the GN-PDU to the LL protocol entity via the IN interface and set the destination - # address to the LL address of the next hop LL_ADDR_NH. + # 14) pass the GN-PDU to the LL protocol entity; dispatch to §F.2 (SIMPLE/UNSPECIFIED) + # or §F.3 (CBF) based on itsGnAreaForwardingAlgorithm. + if self.mib.itsGnAreaForwardingAlgorithm == AreaForwardingAlgorithm.CBF: + # §F.3: buffer with timer; _cbf_timeout fires the BCAST re-transmission + self.gn_area_cbf_forwarding( + basic_header, common_header, gbc_extended_header, packet) + return GNDataConfirm(result_code=ResultCode.ACCEPTED) + # §F.2 / UNSPECIFIED: simple re-broadcast (BCAST) immediately final_packet: bytes = ( basic_header.encode_to_bytes() + common_header.encode_to_bytes() @@ -410,6 +689,23 @@ def gn_data_forward_gbc( result_code=ResultCode.MAXIMUM_LENGTH_EXCEEDED) except SendingException: return GNDataConfirm(result_code=ResultCode.UNSPECIFIED) + elif algorithm == GNForwardingAlgorithmResponse.NON_AREA_FORWARDING: + # §E.2: Greedy Forwarding towards area centre (ego outside area) + if self.gn_greedy_forwarding(area.latitude, area.longitude, common_header.tc): + naf_packet: bytes = ( + basic_header.encode_to_bytes() + + common_header.encode_to_bytes() + + gbc_extended_header.encode() + + packet + ) + try: + if self.link_layer: + self.link_layer.send(naf_packet) + except PacketTooLongException: + return GNDataConfirm( + result_code=ResultCode.MAXIMUM_LENGTH_EXCEEDED) + except SendingException: + return GNDataConfirm(result_code=ResultCode.UNSPECIFIED) else: final_packet: bytes = ( @@ -428,6 +724,20 @@ def gn_data_forward_gbc( return GNDataConfirm(result_code=ResultCode.UNSPECIFIED) return GNDataConfirm(result_code=ResultCode.ACCEPTED) + def gn_data_request_gac(self, request: GNDataRequest) -> GNDataConfirm: + """ + Handle a GeoAnycast (GAC) GNDataRequest. + + Implements §10.3.12.2 source operations of ETSI EN 302 636-4-1 V1.4.1 (2020-01). + The source operations for GAC are identical to those for GBC (§10.3.11.2). + + Parameters + ---------- + request : GNDataRequest + GNDataRequest to handle. + """ + return self.gn_data_request_gbc(request) + def gn_data_request_gbc(self, request: GNDataRequest) -> GNDataConfirm: """ Handle a Geo Broadcast GNDataRequest. @@ -437,50 +747,82 @@ def gn_data_request_gbc(self, request: GNDataRequest) -> GNDataConfirm: request : GNDataRequest GNDataRequest to handle. """ + # §B.3: Geographical area size control – do not send if area exceeds itsGnMaxGeoAreaSize + if Router._compute_area_size_m2( + request.packet_transport_type.header_subtype, request.area # type: ignore + ) > self.mib.itsGnMaxGeoAreaSize * 1_000_000: + return GNDataConfirm(result_code=ResultCode.GEOGRAPHICAL_SCOPE_TOO_LARGE) # 1) create a GN-PDU with the T/GN6-SDU as payload and a GBC packet header (clause 9.8.5): - # a) set the fields of the Basic Header (clause 10.3.2); - basic_header = BasicHeader.initialize_with_mib(self.mib) + # a) set the fields of the Basic Header (clause 10.3.2): + # LT from request when provided, else itsGnDefaultPacketLifetime; + # RHL = itsGnDefaultHopLimit for GBC (§10.3.2). + hop_limit = self.mib.itsGnDefaultHopLimit if request.max_hop_limit <= 1 else request.max_hop_limit + basic_header = BasicHeader.initialize_with_mib_request_and_rhl( + self.mib, request.max_packet_lifetime, hop_limit) # b) set the fields of the Common Header (clause 10.3.4); - common_header = CommonHeader.initialize_with_request(request) + # Flags Bit 0 = itsGnIsMobile (§10.3.4) + _req_with_hl = dataclass_replace(request, max_hop_limit=hop_limit) + common_header = CommonHeader.initialize_with_request( + _req_with_hl, self.mib) # c) set the fields of the GBC Extended Header (table 36); geo_broadcast_extended_header = GBCExtendedHeader.initialize_with_request_sequence_number_ego_pv( request, self.get_sequence_number(), self.ego_position_vector) + # Security encapsulation – §7.1.2 DENM profile + # Sign the inner payload (common_header + ext_header + data) once, before + # forwarding-algorithm packet assembly, so both AREA_FORWARDING and + # NON_AREA_FORWARDING branches share the same signed bytes. + sec_payload: bytes | None = None + if request.security_profile == SecurityProfile.DECENTRALIZED_ENVIRONMENTAL_NOTIFICATION_MESSAGE: + if self.sign_service is None: + raise NotImplementedError( + "DENM security profile requires a SignService" + ) + tbs_payload = ( + common_header.encode_to_bytes() + + geo_broadcast_extended_header.encode() + + request.data + ) + sign_request = SNSIGNRequest( + tbs_message_length=len(tbs_payload), + tbs_message=tbs_payload, + its_aid=request.its_aid, + permissions=request.security_permissions, + permissions_length=len(request.security_permissions), + generation_location={ + "latitude": self.ego_position_vector.latitude, + "longitude": self.ego_position_vector.longitude, + "elevation": 0xF000, # Uint16 unavailable per IEEE 1609.2 + }, + ) + sign_confirm: SNSIGNConfirm = self.sign_service.sign_denm( + sign_request) + basic_header = basic_header.set_nh(BasicNH.SECURED_PACKET) + sec_payload = sign_confirm.sec_message # 2) if no neighbour exists, i.e. the LocT does not contain a LocTE with the IS_NEIGHBOUR flag set to TRUE, # and SCF for the traffic class in the service primitive GN-DATA.request parameter Traffic class is enabled, # then buffer the GBC packet in the BC forwarding packet buffer and omit the execution of further steps; - if ( - len(self.location_table.get_neighbours()) > 0 - or not request.traffic_class.scf - ): - # 3) execute the forwarding algorithm procedures (starting with annex D); - algorithm = self.gn_forwarding_algorithm_selection(request) - # 4) if the return value of the forwarding algorithm is 0 (packet is buffered in the BC forwarding packet - # buffer or in the CBF buffer) or -1 (packet is discarded), omit the execution of further steps; - if algorithm == GNForwardingAlgorithmResponse.AREA_FORWARDING: - # TODO: steps 5-7 - # 8) pass the GN-PDU to the LL protocol entity via the IN interface and set the destination address to - # the LL address of the next hop LL_ADDR_NH. - packet: bytes = ( - basic_header.encode_to_bytes() - + common_header.encode_to_bytes() + if len(self.location_table.get_neighbours()) == 0 and request.traffic_class.scf: + print( + "GBC: no neighbours and SCF set; BC forwarding buffer not yet implemented") + return GNDataConfirm(result_code=ResultCode.ACCEPTED) + # 3) execute the forwarding algorithm procedures (starting with annex D); + algorithm = self.gn_forwarding_algorithm_selection(request) + # 4) if the return value of the forwarding algorithm is 0 (packet is buffered in the BC forwarding packet + # buffer or in the CBF buffer) or -1 (packet is discarded), omit the execution of further steps; + if algorithm == GNForwardingAlgorithmResponse.AREA_FORWARDING: + # TODO: steps 6-7 (repetition) + # 8) pass the GN-PDU to the LL protocol entity via the IN interface and set the destination address to + # the LL address of the next hop LL_ADDR_NH. + inner: bytes = ( + sec_payload + if sec_payload is not None + else ( + common_header.encode_to_bytes() + geo_broadcast_extended_header.encode() + request.data ) - try: - if self.link_layer: - self.link_layer.send(packet) - except PacketTooLongException: - return GNDataConfirm(result_code=ResultCode.MAXIMUM_LENGTH_EXCEEDED) - except SendingException: - return GNDataConfirm(result_code=ResultCode.UNSPECIFIED) - - else: - packet: bytes = ( - basic_header.encode_to_bytes() - + common_header.encode_to_bytes() - + geo_broadcast_extended_header.encode() - + request.data ) + packet: bytes = basic_header.encode_to_bytes() + inner try: if self.link_layer: self.link_layer.send(packet) @@ -488,6 +830,28 @@ def gn_data_request_gbc(self, request: GNDataRequest) -> GNDataConfirm: return GNDataConfirm(result_code=ResultCode.MAXIMUM_LENGTH_EXCEEDED) except SendingException: return GNDataConfirm(result_code=ResultCode.UNSPECIFIED) + elif algorithm == GNForwardingAlgorithmResponse.NON_AREA_FORWARDING: + # §E.2: Greedy Forwarding towards area centre (source outside target area) + if self.gn_greedy_forwarding( + request.area.latitude, request.area.longitude, request.traffic_class + ): + naf_inner: bytes = ( + sec_payload + if sec_payload is not None + else ( + common_header.encode_to_bytes() + + geo_broadcast_extended_header.encode() + + request.data + ) + ) + naf_packet: bytes = basic_header.encode_to_bytes() + naf_inner + try: + if self.link_layer: + self.link_layer.send(naf_packet) + except PacketTooLongException: + return GNDataConfirm(result_code=ResultCode.MAXIMUM_LENGTH_EXCEEDED) + except SendingException: + return GNDataConfirm(result_code=ResultCode.UNSPECIFIED) return GNDataConfirm(result_code=ResultCode.ACCEPTED) @@ -515,33 +879,50 @@ def gn_data_request(self, request: GNDataRequest) -> GNDataConfirm: return self.gn_data_request_shb(request) if request.packet_transport_type.header_type == HeaderType.GEOBROADCAST: return self.gn_data_request_gbc(request) + if request.packet_transport_type.header_type == HeaderType.GEOANYCAST: + return self.gn_data_request_gac(request) + if request.packet_transport_type.header_type == HeaderType.GEOUNICAST: + return self.gn_data_request_guc(request) raise NotImplementedError("PacketTransportType not implemented") def gn_data_indicate_shb( - self, packet: bytes, common_header: CommonHeader - ) -> GNDataIndication: + self, packet: bytes, common_header: CommonHeader, basic_header: BasicHeader + ) -> GNDataIndication | None: """ Handle a Single Hop Broadcast GeoNetworking packet. + Implements §10.3.10.3 receiver operations. + Parameters ---------- packet : bytes - GeoNetworking packet to handle. + GeoNetworking packet to handle (without Basic and Common headers). common_header : CommonHeader CommonHeader of the packet. + basic_header : BasicHeader + BasicHeader of the packet; used for remaining LT and RHL (Table 35). """ - # ETSI EN 302 636-4-1 V1.4.1 (2020-01). Section try: - long_position_vector = LongPositionVector() - long_position_vector.decode(packet[0:24]) + long_position_vector = LongPositionVector.decode(packet[0:24]) packet = packet[24:] # Ignore Media Dependant Data packet = packet[4:] + # Step 3: execute DAD (§10.2.1.5) + self.duplicate_address_detection(long_position_vector.gn_addr) + # Steps 4, 5, 6: update SO LocTE (PV, PDR, IS_NEIGHBOUR) self.location_table.new_shb_packet(long_position_vector, packet) + # Step 7: pass payload to upper entity via GN-DATA.indication (Table 35) return GNDataIndication( upper_protocol_entity=common_header.nh, + packet_transport_type=PacketTransportType( + header_type=HeaderType.TSB, + header_subtype=TopoBroadcastHST.SINGLE_HOP, + ), source_position_vector=long_position_vector, traffic_class=common_header.tc, + remaining_packet_lifetime=float( + basic_header.lt.get_value_in_seconds()), + remaining_hop_limit=basic_header.rhl, length=len(packet), data=packet ) @@ -553,20 +934,657 @@ def gn_data_indicate_shb( print("Packet is duplicated") except DecodeError as e: print(str(e)) - return GNDataIndication() + return None + + def gn_data_request_guc(self, request: GNDataRequest) -> GNDataConfirm: + """ + Handle a GeoUnicast (GUC) GNDataRequest. + + Implements §10.3.8.2 source operations of ETSI EN 302 636-4-1 V1.4.1 (2020-01). + + Parameters + ---------- + request : GNDataRequest + GNDataRequest to handle. Must have ``destination`` set to the target GNAddress. + """ + # Step 1a: Basic Header – LT from request, RHL = itsGnDefaultHopLimit + hop_limit = self.mib.itsGnDefaultHopLimit if request.max_hop_limit <= 1 else request.max_hop_limit + basic_header = BasicHeader.initialize_with_mib_request_and_rhl( + self.mib, request.max_packet_lifetime, hop_limit) + # Step 1b: Common Header + _req_with_hl = dataclass_replace(request, max_hop_limit=hop_limit) + common_header = CommonHeader.initialize_with_request( + _req_with_hl, self.mib) + # Step 2: look up DE PV from LocT + de_entry = self.location_table.get_entry( + request.destination) if request.destination else None + if de_entry is None: + # No LocTE for destination → invoke Location Service (§10.3.7.1.2) + assert request.destination is not None + self.gn_ls_request(request.destination, request) + return GNDataConfirm(result_code=ResultCode.ACCEPTED) + de_lpv = de_entry.position_vector + de_pv = ShortPositionVector( + gn_addr=de_lpv.gn_addr, + tst=de_lpv.tst, + latitude=de_lpv.latitude, + longitude=de_lpv.longitude, + ) + # Step 1c: GUC Extended Header + guc_extended_header = GUCExtendedHeader.initialize_with_request_sequence_number_ego_pv_de_pv( + self.get_sequence_number(), self.ego_position_vector, de_pv) + # Step 3: if no neighbours and SCF → buffer (stub) + if len(self.location_table.get_neighbours()) == 0 and request.traffic_class.scf: + print( + "GUC: no neighbours and SCF set; UC forwarding buffer not yet implemented") + return GNDataConfirm(result_code=ResultCode.ACCEPTED) + # Step 4: forwarding algorithm (§10.3.8.2 step 4, Annex E.2 – Greedy Forwarding) + if not self.gn_greedy_forwarding(de_pv.latitude, de_pv.longitude, request.traffic_class): + # §E.2: local optimum + SCF → buffer in UC forwarding packet buffer (stub) + return GNDataConfirm(result_code=ResultCode.ACCEPTED) + # Step 9: pass the GN-PDU to the LL entity + packet: bytes = ( + basic_header.encode_to_bytes() + + common_header.encode_to_bytes() + + guc_extended_header.encode() + + request.data + ) + try: + if self.link_layer: + self.link_layer.send(packet) + except PacketTooLongException: + return GNDataConfirm(result_code=ResultCode.MAXIMUM_LENGTH_EXCEEDED) + except SendingException: + return GNDataConfirm(result_code=ResultCode.UNSPECIFIED) + return GNDataConfirm(result_code=ResultCode.ACCEPTED) + + def gn_data_indicate_guc( + self, packet: bytes, common_header: CommonHeader, basic_header: BasicHeader + ) -> GNDataIndication | None: + """ + Handle a GeoUnicast (GUC) GeoNetworking packet. + + Implements §10.3.8.3 forwarder and §10.3.8.4 destination operations of + ETSI EN 302 636-4-1 V1.4.1 (2020-01). + + Parameters + ---------- + packet : bytes + GeoNetworking packet to handle (without Basic and Common headers). + common_header : CommonHeader + CommonHeader of the packet. + basic_header : BasicHeader + BasicHeader of the packet. + """ + guc_extended_header = GUCExtendedHeader.decode(packet[0:48]) + packet = packet[48:] + try: + # Step 3: execute DAD (§10.2.1.5) + self.duplicate_address_detection(guc_extended_header.so_pv.gn_addr) + # Steps 5-6: create/update SO LocTE (PV, PDR, IS_NEIGHBOUR per NOTE 2) + self.location_table.new_guc_packet(guc_extended_header, packet) + # TODO step 7/8 (forwarder): flush SO LS packet buffer and UC forwarding packet buffer + is_destination = ( + guc_extended_header.de_pv.gn_addr == self.mib.itsGnLocalGnAddr + ) + if is_destination: + # §10.3.8.4: pass payload to upper entity via GN-DATA.indication (Table 29) + return GNDataIndication( + upper_protocol_entity=common_header.nh, + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOUNICAST, + ), + source_position_vector=guc_extended_header.so_pv, + traffic_class=common_header.tc, + remaining_packet_lifetime=float( + basic_header.lt.get_value_in_seconds()), + remaining_hop_limit=basic_header.rhl, + length=len(packet), + data=packet, + ) + # §10.3.8.3 forwarder operations + # §B.2: PDR enforcement – do not forward if SO PDR exceeds itsGnMaxPacketDataRate + so_entry = self.location_table.get_entry( + guc_extended_header.so_pv.gn_addr) + if so_entry is not None and so_entry.pdr > self.mib.itsGnMaxPacketDataRate * 1000: + return None + # Step 7: update DE LocTE from packet if not a neighbour, or + # Step 8: update DE PV in packet from LocT if DE is a neighbour + de_entry = self.location_table.get_entry( + guc_extended_header.de_pv.gn_addr) + if de_entry is not None and de_entry.is_neighbour: + # §C.3: only update DE PV in forwarded packet if LocT PV is strictly newer + if de_entry.position_vector.tst > guc_extended_header.de_pv.tst: + # Step 8: refresh DE PV in the forwarded packet from LocT + de_lpv = de_entry.position_vector + updated_de_pv = ShortPositionVector( + gn_addr=de_lpv.gn_addr, + tst=de_lpv.tst, + latitude=de_lpv.latitude, + longitude=de_lpv.longitude, + ) + guc_extended_header = guc_extended_header.with_de_pv( + updated_de_pv) + # Step 9: decrement RHL; if RHL == 0 discard + new_rhl = basic_header.rhl - 1 + if new_rhl > 0: + updated_basic_header = basic_header.set_rhl(new_rhl) + # Step 10: if no neighbour AND SCF: buffer in UC forwarding packet buffer + if len(self.location_table.get_neighbours()) == 0 and common_header.tc.scf: + print( + "GUC: no neighbours and SCF set; UC forwarding buffer not yet implemented") + else: + # Step 12: forwarding algorithm (§10.3.8.3, Annex E.2 – Greedy Forwarding) + if self.gn_greedy_forwarding( + guc_extended_header.de_pv.latitude, + guc_extended_header.de_pv.longitude, + common_header.tc, + ): + # Steps 14-15: media-dependent procedures + pass to LL + forward_packet = ( + updated_basic_header.encode_to_bytes() + + common_header.encode_to_bytes() + + guc_extended_header.encode() + + packet + ) + try: + if self.link_layer: + self.link_layer.send(forward_packet) + except PacketTooLongException: + pass + except SendingException: + pass + except DADException: + print("Duplicate Address Detected!") + except IncongruentTimestampException: + print("Incongruent Timestamp Detected!") + except DuplicatedPacketException: + print("Packet is duplicated") + except DecodeError as e: + print(str(e)) + return None + + def gn_data_indicate_gac( + self, packet: bytes, common_header: CommonHeader, basic_header: BasicHeader + ) -> GNDataIndication | None: + """ + Handle a GeoAnycast (GAC) GeoNetworking packet. + + Implements §10.3.12.3 forwarder and receiver operations of + ETSI EN 302 636-4-1 V1.4.1 (2020-01). + + GAC and GBC share the same extended header format (§9.8.5). + Key distinction from GBC: + - Inside/at border of area (F ≥ 0): deliver to upper entity and STOP – do NOT forward. + - Outside area (F < 0): forward only – do NOT deliver to upper layer. + + Parameters + ---------- + packet : bytes + GeoNetworking packet to handle (without Basic and Common headers). + common_header : CommonHeader + CommonHeader of the packet. + basic_header : BasicHeader + BasicHeader of the packet. + """ + # Header is same wire format as GBC (§9.8.5) + gbc_extended_header = GBCExtendedHeader.decode(packet[0:44]) + packet = packet[44:] + area = Area( + a=gbc_extended_header.a, + b=gbc_extended_header.b, + latitude=gbc_extended_header.latitude, + longitude=gbc_extended_header.longitude, + angle=gbc_extended_header.angle, + ) + # Step 7: determine function F(x,y) per ETSI EN 302 931 §5 + area_f = self.gn_geometric_function_f( + common_header.hst, # type: ignore + area, + self.ego_position_vector.latitude, + self.ego_position_vector.longitude, + ) + try: + # Step 3: DPD – duplicate packet detection (via location table) + # Step 4: execute DAD (§10.2.1.5) + self.duplicate_address_detection(gbc_extended_header.so_pv.gn_addr) + # Steps 5-6: create/update SO LocTE (PV, PDR, IS_NEIGHBOUR per NOTE 1) + self.location_table.new_gac_packet(gbc_extended_header, packet) + # TODO Step 8: flush SO LS packet buffer and UC forwarding packet buffer + # Step 9: inside or at border (F ≥ 0) → deliver to upper entity and STOP + if area_f >= 0: + return GNDataIndication( + upper_protocol_entity=common_header.nh, + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOANYCAST, + header_subtype=common_header.hst, + ), + destination_area=area, + source_position_vector=gbc_extended_header.so_pv, + traffic_class=common_header.tc, + remaining_packet_lifetime=float( + basic_header.lt.get_value_in_seconds()), + remaining_hop_limit=basic_header.rhl, + length=len(packet), + data=packet, + ) + # Step 10: outside area (F < 0) → forward only, no delivery to upper layer + # §B.3: Geographical area size control – do not forward if area exceeds itsGnMaxGeoAreaSize + if Router._compute_area_size_m2(cast(Union[GeoBroadcastHST, GeoAnycastHST], common_header.hst), area) > self.mib.itsGnMaxGeoAreaSize * 1_000_000: + return None + # §B.2: PDR enforcement – do not forward if SO PDR exceeds itsGnMaxPacketDataRate + so_entry = self.location_table.get_entry( + gbc_extended_header.so_pv.gn_addr) + if so_entry is not None and so_entry.pdr > self.mib.itsGnMaxPacketDataRate * 1000: + return None + # §D (Annex D): discard if sender is inside/at border of area (SE_POS_VALID AND F_SE ≥ 0) + if so_entry is not None and so_entry.position_vector.pai: + f_se = self.gn_geometric_function_f( + common_header.hst, area, # type: ignore + so_entry.position_vector.latitude, + so_entry.position_vector.longitude, + ) + if f_se >= 0: + return None + # Step 10a: decrement RHL + new_rhl = basic_header.rhl - 1 + if new_rhl == 0: + # Step 10a(i): RHL reached 0 → discard + return None + updated_basic_header = basic_header.set_rhl(new_rhl) + # Step 10b: no neighbours AND SCF → buffer in BC forwarding packet buffer (stub) + if len(self.location_table.get_neighbours()) == 0 and common_header.tc.scf: + print( + "GAC: no neighbours and SCF set; BC forwarding buffer not yet implemented") + return None + # Steps 11-13: §E.2 Greedy Forwarding (NON_AREA) → media-dependent → LL + if self.gn_greedy_forwarding(area.latitude, area.longitude, common_header.tc): + forward_packet = ( + updated_basic_header.encode_to_bytes() + + common_header.encode_to_bytes() + + gbc_extended_header.encode() + + packet + ) + try: + if self.link_layer: + self.link_layer.send(forward_packet) + except PacketTooLongException: + pass + except SendingException: + pass + except DADException: + print("Duplicate Address Detected!") + except IncongruentTimestampException: + print("Incongruent Timestamp Detected!") + except DuplicatedPacketException: + print("Packet is duplicated") + except DecodeError as e: + print(str(e)) + return None + + # ------------------------------------------------------------------------- + # Location Service (LS) – §10.3.7 + # ------------------------------------------------------------------------- + + def _send_ls_request_packet(self, sought_gn_addr: GNAddress) -> None: + """ + Build and broadcast an LS Request packet for *sought_gn_addr*. + + §10.3.7.1.2 step 2 of ETSI EN 302 636-4-1 V1.4.1 (2020-01). + The packet uses HT=LS / HST=LS_REQUEST and travels on the broadcast LL + address (same distribution as TSB multi-hop). + """ + with self.ego_position_vector_lock: + ego_pv = self.ego_position_vector + basic_header = BasicHeader.initialize_with_mib_request_and_rhl( + self.mib, None, self.mib.itsGnDefaultHopLimit) + common_header = CommonHeader( + nh=CommonNH.ANY, + ht=HeaderType.LS, + hst=cast(HeaderSubType, LocationServiceHST.LS_REQUEST), + tc=TrafficClass(), + flags=self.mib.itsGnIsMobile.value, + pl=0, + mhl=self.mib.itsGnDefaultHopLimit, + ) + ls_req_header = LSRequestExtendedHeader.initialize( + self.get_sequence_number(), ego_pv, sought_gn_addr) + packet = ( + basic_header.encode_to_bytes() + + common_header.encode_to_bytes() + + ls_req_header.encode() + ) + try: + if self.link_layer: + self.link_layer.send(packet) + except (PacketTooLongException, SendingException): + pass + + def gn_ls_request( + self, sought_gn_addr: GNAddress, buffered_request: GNDataRequest | None = None + ) -> None: + """ + Initiate or update a Location Service (LS) request for *sought_gn_addr*. + + Implements source operations of §10.3.7.1.2 of + ETSI EN 302 636-4-1 V1.4.1 (2020-01): + + * If an LS is already in progress (ls_pending=TRUE) for the sought + address, the optional *buffered_request* is queued for later delivery + and the method returns immediately. + * Otherwise a new LS Request packet is built and broadcast, a + retransmit timer TLS is started and the LocTE ls_pending flag is set + to TRUE. + + Parameters + ---------- + sought_gn_addr : GNAddress + GN address of the sought GeoAdhoc router. + buffered_request : GNDataRequest | None + Optional GNDataRequest whose processing triggered this LS. It will + be (re-)processed automatically when the LS Reply is received. + """ + with self._ls_lock: + entry = self.location_table.get_entry(sought_gn_addr) + if entry is not None and entry.ls_pending: + # LS already in-progress → just queue the request + if buffered_request is not None: + self._ls_packet_buffers.setdefault( + sought_gn_addr, []).append(buffered_request) + return + # Create or fetch LocTE and set ls_pending + entry = self.location_table.ensure_entry(sought_gn_addr) + entry.ls_pending = True + self._ls_packet_buffers[sought_gn_addr] = ( + [buffered_request] if buffered_request is not None else [] + ) + self._ls_retransmit_counters[sought_gn_addr] = 0 + # Send LS Request and start retransmit timer (outside lock to avoid deadlock) + self._send_ls_request_packet(sought_gn_addr) + timer = Timer( + self.mib.itsGnLocationServiceRetransmitTimer / 1000.0, + self._ls_retransmit, args=[sought_gn_addr] + ) + timer.daemon = True + timer.start() + with self._ls_lock: + old = self._ls_timers.pop(sought_gn_addr, None) + if old: + old.cancel() + self._ls_timers[sought_gn_addr] = timer + + def _ls_retransmit(self, sought_gn_addr: GNAddress) -> None: + """ + Retransmit timer callback for an ongoing LS Request. + + Implements §10.3.7.1.3 of ETSI EN 302 636-4-1 V1.4.1 (2020-01): + + * If the retransmit counter is below *itsGnLocationServiceMaxRetrans*, + the LS Request is resent and the timer is restarted. + * Otherwise the LS is abandoned: buffered requests are discarded and + ls_pending is set back to FALSE. + """ + with self._ls_lock: + count = self._ls_retransmit_counters.get(sought_gn_addr, 0) + if count >= self.mib.itsGnLocationServiceMaxRetrans: + # Give up + self._ls_packet_buffers.pop(sought_gn_addr, None) + self._ls_timers.pop(sought_gn_addr, None) + self._ls_retransmit_counters.pop(sought_gn_addr, None) + entry = self.location_table.get_entry(sought_gn_addr) + if entry is not None: + entry.ls_pending = False + return + self._ls_retransmit_counters[sought_gn_addr] = count + 1 + # Resend and restart timer + self._send_ls_request_packet(sought_gn_addr) + timer = Timer( + self.mib.itsGnLocationServiceRetransmitTimer / 1000.0, + self._ls_retransmit, args=[sought_gn_addr] + ) + timer.daemon = True + timer.start() + with self._ls_lock: + self._ls_timers[sought_gn_addr] = timer + + def gn_data_indicate_ls_request( + self, packet: bytes, common_header: CommonHeader, basic_header: BasicHeader + ) -> None: + """ + Handle an incoming LS Request packet. + + Implements §10.3.7.2 (forwarder) and §10.3.7.3 (destination) of + ETSI EN 302 636-4-1 V1.4.1 (2020-01). + + * **Destination** (Request_GN_ADDR == own address): perform DPD/DAD, + update SO LocTE, then send an LS Reply unicast back to the requester. + * **Forwarder** (all other nodes): perform DPD/DAD, update SO LocTE, + decrement RHL and re-broadcast without delivering to the upper layer. + + Parameters + ---------- + packet : bytes + GeoNetworking packet body after Basic and Common headers. + common_header : CommonHeader + Common header of the received packet. + basic_header : BasicHeader + Basic header of the received packet. + """ + ls_request_header = LSRequestExtendedHeader.decode(packet[0:36]) + payload = packet[36:] + try: + # Step 3-4: DPD + DAD + self.duplicate_address_detection(ls_request_header.so_pv.gn_addr) + # Step 5-6: update SO LocTE + self.location_table.new_ls_request_packet( + ls_request_header, payload) + # Step 7: check if we are the destination + if ls_request_header.request_gn_addr == self.mib.itsGnLocalGnAddr: + # §10.3.7.3: we are the destination – send LS Reply + so_entry = self.location_table.get_entry( + ls_request_header.so_pv.gn_addr) + if so_entry is None: + return + so_lpv = so_entry.position_vector + de_pv = ShortPositionVector( + gn_addr=so_lpv.gn_addr, + tst=so_lpv.tst, + latitude=so_lpv.latitude, + longitude=so_lpv.longitude, + ) + with self.ego_position_vector_lock: + ego_pv = self.ego_position_vector + reply_basic = BasicHeader.initialize_with_mib_request_and_rhl( + self.mib, None, self.mib.itsGnDefaultHopLimit) + reply_common = CommonHeader( + nh=CommonNH.ANY, + ht=HeaderType.LS, + hst=cast(HeaderSubType, LocationServiceHST.LS_REPLY), + tc=TrafficClass(), + flags=self.mib.itsGnIsMobile.value, + pl=0, + mhl=self.mib.itsGnDefaultHopLimit, + ) + reply_header = LSReplyExtendedHeader.initialize( + self.get_sequence_number(), ego_pv, de_pv) + reply_packet = ( + reply_basic.encode_to_bytes() + + reply_common.encode_to_bytes() + + reply_header.encode() + ) + try: + if self.link_layer: + self.link_layer.send(reply_packet) + except (PacketTooLongException, SendingException): + pass + else: + # §10.3.7.2 forwarder: re-broadcast like TSB but no upper-layer delivery + # §B.2: PDR enforcement – do not forward if SO PDR exceeds itsGnMaxPacketDataRate + so_entry = self.location_table.get_entry( + ls_request_header.so_pv.gn_addr) + if so_entry is not None and so_entry.pdr > self.mib.itsGnMaxPacketDataRate * 1000: + return + new_rhl = basic_header.rhl - 1 + if new_rhl > 0: + updated_basic_header = basic_header.set_rhl(new_rhl) + forward_packet = ( + updated_basic_header.encode_to_bytes() + + common_header.encode_to_bytes() + + ls_request_header.encode() + + payload + ) + try: + if self.link_layer: + self.link_layer.send(forward_packet) + except (PacketTooLongException, SendingException): + pass + except DADException: + print("Duplicate Address Detected!") + except IncongruentTimestampException: + print("Incongruent Timestamp Detected!") + except DuplicatedPacketException: + print("Packet is duplicated") + except DecodeError as e: + print(str(e)) + + def gn_data_indicate_ls_reply( + self, packet: bytes, common_header: CommonHeader, basic_header: BasicHeader + ) -> None: + """ + Handle an incoming LS Reply packet. + + Implements §10.3.7.1.4 (source receives reply) and §10.3.7.2 (forwarder) + of ETSI EN 302 636-4-1 V1.4.1 (2020-01). + + * **Source** (DE_GN_ADDR == own address): update SO LocTE, stop the + retransmit timer, reset the counter, set ls_pending=FALSE and flush + the LS packet buffer by re-invoking each buffered ``GNDataRequest``. + * **Forwarder**: update SO LocTE, decrement RHL and forward like GUC. + + Parameters + ---------- + packet : bytes + GeoNetworking packet body after Basic and Common headers. + common_header : CommonHeader + Common header of the received packet. + basic_header : BasicHeader + Basic header of the received packet. + """ + ls_reply_header = LSReplyExtendedHeader.decode(packet[0:48]) + payload = packet[48:] + try: + # Steps 2-3: DPD + DAD + self.duplicate_address_detection(ls_reply_header.so_pv.gn_addr) + # Steps 4-5: update SO LocTE + self.location_table.new_ls_reply_packet(ls_reply_header, payload) + # Determine role: source vs. forwarder + sought_gn_addr = ls_reply_header.so_pv.gn_addr + if ls_reply_header.de_pv.gn_addr == self.mib.itsGnLocalGnAddr: + # §10.3.7.1.4: we are the original requester + buffered: list[GNDataRequest] = [] + with self._ls_lock: + timer = self._ls_timers.pop(sought_gn_addr, None) + self._ls_retransmit_counters.pop(sought_gn_addr, None) + buffered = self._ls_packet_buffers.pop(sought_gn_addr, []) + entry = self.location_table.get_entry(sought_gn_addr) + if entry is not None: + entry.ls_pending = False + if timer is not None: + timer.cancel() + # Flush LS packet buffer: re-process each buffered request now + # that the LocTE is available + for req in buffered: + self.gn_data_request_guc(req) + else: + # §10.3.7.2 forwarder: forward like GUC forwarder + # §B.2: PDR enforcement – do not forward if SO PDR exceeds itsGnMaxPacketDataRate + so_entry = self.location_table.get_entry( + ls_reply_header.so_pv.gn_addr) + if so_entry is not None and so_entry.pdr > self.mib.itsGnMaxPacketDataRate * 1000: + return + de_entry = self.location_table.get_entry( + ls_reply_header.de_pv.gn_addr) + if de_entry is not None and de_entry.is_neighbour: + # §C.3: only update DE PV in forwarded packet if LocT PV is strictly newer + if de_entry.position_vector.tst > ls_reply_header.de_pv.tst: + de_lpv = de_entry.position_vector + updated_de_pv = ShortPositionVector( + gn_addr=de_lpv.gn_addr, + tst=de_lpv.tst, + latitude=de_lpv.latitude, + longitude=de_lpv.longitude, + ) + ls_reply_header = ls_reply_header.__class__( + sn=ls_reply_header.sn, + reserved=ls_reply_header.reserved, + so_pv=ls_reply_header.so_pv, + de_pv=updated_de_pv, + ) + new_rhl = basic_header.rhl - 1 + if new_rhl > 0: + updated_basic_header = basic_header.set_rhl(new_rhl) + forward_packet = ( + updated_basic_header.encode_to_bytes() + + common_header.encode_to_bytes() + + ls_reply_header.encode() + + payload + ) + try: + if self.link_layer: + self.link_layer.send(forward_packet) + except (PacketTooLongException, SendingException): + pass + except DADException: + print("Duplicate Address Detected!") + except IncongruentTimestampException: + print("Incongruent Timestamp Detected!") + except DuplicatedPacketException: + print("Packet is duplicated") + except DecodeError as e: + print(str(e)) + + def gn_data_indicate_ls( + self, packet: bytes, common_header: CommonHeader, basic_header: BasicHeader + ) -> None: + """ + Dispatch an incoming Location Service (LS) packet. + + Calls either :meth:`gn_data_indicate_ls_request` or + :meth:`gn_data_indicate_ls_reply` based on the Common Header HST field. + + Parameters + ---------- + packet : bytes + GeoNetworking packet body after Basic and Common headers. + common_header : CommonHeader + Common header of the received packet. + basic_header : BasicHeader + Basic header of the received packet. + """ + if common_header.hst == LocationServiceHST.LS_REQUEST: + self.gn_data_indicate_ls_request( + packet, common_header, basic_header) + elif common_header.hst == LocationServiceHST.LS_REPLY: + self.gn_data_indicate_ls_reply(packet, common_header, basic_header) + else: + raise NotImplementedError(f"Unknown LS HST: {common_header.hst}") def gn_data_indicate_gbc( - self, packet: bytes, common_header: CommonHeader - ) -> GNDataIndication: + self, packet: bytes, common_header: CommonHeader, basic_header: BasicHeader + ) -> GNDataIndication | None: """ - Handle a GeobroadcastBroadcast GeoNetworking packet. + Handle a GeoBroadcast GeoNetworking packet. + + Implements §10.3.11.3 forwarder and receiver operations. Parameters ---------- packet : bytes - GeoNetworking packet to handle (without the basic header and common header) + GeoNetworking packet to handle (without Basic and Common headers). common_header : CommonHeader CommonHeader of the packet. + basic_header : BasicHeader + BasicHeader of the packet; used for remaining LT and RHL (Table 38). """ gbc_extended_header = GBCExtendedHeader.decode(packet[0:44]) packet = packet[44:] @@ -583,6 +1601,8 @@ def gn_data_indicate_gbc( self.ego_position_vector.latitude, self.ego_position_vector.longitude, ) + # Step 3: DPD (Duplicate Packet Detection) – run before DAD for GREEDY/SIMPLE/UNSPECIFIED + # forwarding algorithms (§10.3.11.3 step 3a/3b) if area_f < 0 and ( self.mib.itsGnNonAreaForwardingAlgorithm in ( @@ -597,28 +1617,133 @@ def gn_data_indicate_gbc( ): pass try: + # Step 4: execute DAD (§10.2.1.5) self.duplicate_address_detection(gbc_extended_header.so_pv.gn_addr) + # Steps 5-6: update SO LocTE self.location_table.new_gbc_packet(gbc_extended_header, packet) + indication: GNDataIndication | None = None + # Step 7: if inside/at border of area, pass payload to upper entity via GN-DATA.indication (Table 38) if area_f >= 0: - # TODO: Extend the indication information - return GNDataIndication( + indication = GNDataIndication( upper_protocol_entity=common_header.nh, packet_transport_type=PacketTransportType( header_type=HeaderType.GEOBROADCAST, header_subtype=common_header.hst ), + destination_area=area, source_position_vector=gbc_extended_header.so_pv, traffic_class=common_header.tc, + remaining_packet_lifetime=float( + basic_header.lt.get_value_in_seconds()), + remaining_hop_limit=basic_header.rhl, length=len(packet), data=packet ) + # TODO: Step 8: flush LS packet buffer and UC forwarding packet buffer for SO + # §B.3: Geographical area size control – do not forward if area exceeds itsGnMaxGeoAreaSize + if Router._compute_area_size_m2(cast(Union[GeoBroadcastHST, GeoAnycastHST], common_header.hst), area) > self.mib.itsGnMaxGeoAreaSize * 1_000_000: + return indication + # §B.2: PDR enforcement – do not forward if SO PDR exceeds itsGnMaxPacketDataRate + so_entry = self.location_table.get_entry( + gbc_extended_header.so_pv.gn_addr) + if so_entry is not None and so_entry.pdr > self.mib.itsGnMaxPacketDataRate * 1000: + return indication + # Step 9: decrement RHL; if RHL == 0 discard + new_rhl = basic_header.rhl - 1 + if new_rhl > 0: + # Steps 10-14: forward according to §10.3.11.3 + self.gn_data_forward_gbc( + basic_header, common_header, gbc_extended_header, packet) + return indication + except DADException: + print("Duplicate Address Detected!") + except IncongruentTimestampException: + print("Incongruent Timestamp Detected!") + except DuplicatedPacketException: + print("Packet is duplicated") + except DecodeError as e: + print(str(e)) + return None + + def gn_data_indicate_tsb( + self, packet: bytes, common_header: CommonHeader, basic_header: BasicHeader + ) -> GNDataIndication | None: + """ + Handle a Topologically-Scoped Broadcast (multi-hop) GeoNetworking packet. + + Implements §10.3.9.3 forwarder and receiver operations. + + Parameters + ---------- + packet : bytes + GeoNetworking packet to handle (without Basic and Common headers). + common_header : CommonHeader + CommonHeader of the packet. + basic_header : BasicHeader + BasicHeader of the packet; used for remaining LT and RHL (Table 32). + """ + tsb_extended_header = TSBExtendedHeader.decode(packet[0:28]) + packet = packet[28:] + try: + # Step 3: DPD – duplicate packet detection (via location table) + # Step 4: execute DAD (§10.2.1.5) + self.duplicate_address_detection(tsb_extended_header.so_pv.gn_addr) + # Steps 5-6: create/update SO LocTE (PV, PDR, IS_NEIGHBOUR per NOTE 1) + self.location_table.new_tsb_packet(tsb_extended_header, packet) + # Step 7: pass payload to upper entity via GN-DATA.indication (Table 32) + indication = GNDataIndication( + upper_protocol_entity=common_header.nh, + packet_transport_type=PacketTransportType( + header_type=HeaderType.TSB, + header_subtype=TopoBroadcastHST.MULTI_HOP, + ), + source_position_vector=tsb_extended_header.so_pv, + traffic_class=common_header.tc, + remaining_packet_lifetime=float( + basic_header.lt.get_value_in_seconds()), + remaining_hop_limit=basic_header.rhl, + length=len(packet), + data=packet + ) + # TODO Step 8: flush SO LS packet buffer and UC forwarding packet buffer + # §B.2: PDR enforcement – do not forward if SO PDR exceeds itsGnMaxPacketDataRate + so_entry = self.location_table.get_entry( + tsb_extended_header.so_pv.gn_addr) + if so_entry is not None and so_entry.pdr > self.mib.itsGnMaxPacketDataRate * 1000: + return indication + # Step 9: decrement RHL; if RHL == 0 discard + new_rhl = basic_header.rhl - 1 + if new_rhl > 0: + updated_basic_header = basic_header.set_rhl(new_rhl) + # Step 10: if no neighbour AND SCF: buffer in BC forwarding packet buffer + if len(self.location_table.get_neighbours()) == 0 and common_header.tc.scf: + print( + "TSB: no neighbours and SCF set; BC forwarding buffer not yet implemented") + else: + # Steps 11-12: execute media-dependent procedures and pass to LL + forward_packet = ( + updated_basic_header.encode_to_bytes() + + common_header.encode_to_bytes() + + tsb_extended_header.encode() + + packet + ) + try: + if self.link_layer: + self.link_layer.send(forward_packet) + except PacketTooLongException: + pass + except SendingException: + pass + return indication except DADException: print("Duplicate Address Detected!") except IncongruentTimestampException: print("Incongruent Timestamp Detected!") + except DuplicatedPacketException: + print("Packet is duplicated") except DecodeError as e: print(str(e)) - return GNDataIndication() + return None def gn_data_indicate_beacon(self, packet: bytes) -> None: """ @@ -633,13 +1758,14 @@ def gn_data_indicate_beacon(self, packet: bytes) -> None: common_header : CommonHeader CommonHeader of the packet. """ - # ETSI EN 302 636-4-1 V1.4.1 (2020-01). Section 10.3.6 + # ETSI EN 302 636-4-1 V1.4.1 (2020-01). Section 10.3.6.3: + # Receiver operations are identical to SHB (§10.3.10.3) except step 8. try: - long_position_vector = LongPositionVector() - long_position_vector.decode(packet[0:24]) + long_position_vector = LongPositionVector.decode(packet[0:24]) packet = packet[24:] - # Receiver operations of Beacon packets are identical to the - # handling procedures of the SHB packet (clause 10.3.10.3) + # Step 3: execute DAD (§10.2.1.5) + self.duplicate_address_detection(long_position_vector.gn_addr) + # Steps 4-6: update SO LocTE (PV, PDR, IS_NEIGHBOUR) self.location_table.new_shb_packet(long_position_vector, packet) except DADException: print("Duplicate Address Detected!") @@ -650,6 +1776,92 @@ def gn_data_indicate_beacon(self, packet: bytes) -> None: except DecodeError as e: print(str(e)) + def process_basic_header(self, packet: bytes) -> None: + # ETSI EN 302 636-4-1 V1.4.1 (2020-01). Section 10.3.3 + # Decap the basic header + basic_header = BasicHeader.decode_from_bytes(packet[0:4]) + remaining = packet[4:] + if basic_header.version != self.mib.itsGnProtocolVersion: + raise NotImplementedError("Version not implemented") + if basic_header.nh == BasicNH.COMMON_HEADER: + # When itsGnSecurity is ENABLED, unsecured packets must be discarded + # (only secured packets with NH=SECURED_PACKET are accepted). + if self.mib.itsGnSecurity == GnSecurity.ENABLED: + return + self.process_common_header(remaining, basic_header) + elif basic_header.nh == BasicNH.SECURED_PACKET: + self.process_security_header(remaining, basic_header) + else: + raise NotImplementedError("ANY next header not implemented") + + def process_common_header(self, packet: bytes, basic_header: BasicHeader) -> None: + indication = GNDataIndication() + # ETSI EN 302 636-4-1 V1.4.1 (2020-01). Section 10.3.5 + # Decap the common header + common_header = CommonHeader.decode_from_bytes(packet[0:8]) + packet = packet[8:] + if basic_header.rhl > common_header.mhl: + raise DecapError("Hop limit exceeded") + # TODO: Forwarding packet buffer flush + if common_header.ht == HeaderType.ANY: + raise NotImplementedError( + "Any packet (Common Header) not implemented") + elif common_header.ht == HeaderType.BEACON: + self.gn_data_indicate_beacon(packet) + return + elif common_header.ht == HeaderType.GEOUNICAST: + indication = self.gn_data_indicate_guc( + packet, common_header, basic_header) + elif common_header.ht == HeaderType.GEOANYCAST: + indication = self.gn_data_indicate_gac( + packet, common_header, basic_header) + elif common_header.ht == HeaderType.GEOBROADCAST: + indication = self.gn_data_indicate_gbc( + packet, common_header, basic_header) + elif common_header.ht == HeaderType.TSB: + if common_header.hst == TopoBroadcastHST.SINGLE_HOP: + indication = self.gn_data_indicate_shb( + packet, common_header, basic_header) + elif common_header.hst == TopoBroadcastHST.MULTI_HOP: + indication = self.gn_data_indicate_tsb( + packet, common_header, basic_header) + else: + raise NotImplementedError("TopoBroadcast not implemented") + elif common_header.ht == HeaderType.LS: + self.gn_data_indicate_ls(packet, common_header, basic_header) + return # LS handling never delivers to upper entity callback + else: + raise NotImplementedError( + "Any packet (Common Header) not implemented") + if self.indication_callback and indication is not None: + self.indication_callback(indication) + + def process_security_header(self, packet: bytes, basic_header: BasicHeader) -> None: + # ETSI EN 302 636-4-1 V1.4.1 (2020-01). Section 10.3.3 - Secured packet processing + # 1) If no verify service is configured, discard the packet silently. + if self.verify_service is None: + print("Secured packet received but no VerifyService configured, discarding") + return + # 2) Verify the secured packet using the SN-VERIFY service (ETSI TS 102 723-8). + verify_confirm = self.verify_service.verify( + SNVERIFYRequest( + sec_header=b"", + sec_header_length=0, + message=packet, + message_length=len(packet), + ) + ) + if verify_confirm.report != ReportVerify.SUCCESS: + print( + f"Secured packet verification failed: {verify_confirm.report}") + return + # 3) Dispatch directly from the decrypted plain_message without byte + # reconstruction or recursive calls. + # plain_message layout: Common Header (8 bytes) | Extended Header + payload + processed_packet = verify_confirm.plain_message + self.process_common_header( + processed_packet, basic_header.set_nh(BasicNH.COMMON_HEADER)) + def gn_data_indicate(self, packet: bytes) -> None: # pylint: disable=no-else-raise, too-many-branches """ @@ -666,51 +1878,7 @@ def gn_data_indicate(self, packet: bytes) -> None: ------ NotImplementedError : Version not implemented """ - indication = GNDataIndication() - # ETSI EN 302 636-4-1 V1.4.1 (2020-01). Section 10.3.3 - # Decap the common header - basic_header = BasicHeader.decode_from_bytes(packet[0:4]) - packet = packet[4:] - if basic_header.version != self.mib.itsGnProtocolVersion: - raise NotImplementedError("Version not implemented") - if basic_header.nh == BasicNH.COMMON_HEADER: - # ETSI EN 302 636-4-1 V1.4.1 (2020-01). Section 10.3.5 - # Decap the common header - common_header = CommonHeader.decode_from_bytes(packet[0:8]) - packet = packet[8:] - if basic_header.rhl > common_header.mhl: - raise DecapError("Hop limit exceeded") - # TODO: Forwarding packet buffer flush - if common_header.ht == HeaderType.ANY: - raise NotImplementedError( - "Any packet (Common Header) not implemented") - elif common_header.ht == HeaderType.BEACON: - self.gn_data_indicate_beacon(packet) - return - elif common_header.ht == HeaderType.GEOUNICAST: - raise NotImplementedError("Geounicast not implemented") - elif common_header.ht == HeaderType.GEOANYCAST: - raise NotImplementedError("Geoanycast not implemented") - elif common_header.ht == HeaderType.GEOBROADCAST: - indication = self.gn_data_indicate_gbc(packet, common_header) - elif common_header.ht == HeaderType.TSB: - if common_header.hst == TopoBroadcastHST.SINGLE_HOP: - indication = self.gn_data_indicate_shb( - packet, common_header) - else: - raise NotImplementedError("TopoBroadcast not implemented") - elif common_header.ht == HeaderType.LS: - raise NotImplementedError("Location Service not implemented") - else: - raise NotImplementedError( - "Any packet (Common Header) not implemented") - - elif basic_header.nh == BasicNH.SECURED_PACKET: - raise NotImplementedError("Secured packet not implemented") - else: - raise NotImplementedError("ANY next header not implemented") - if self.indication_callback: - self.indication_callback(indication) + self.process_basic_header(packet) def duplicate_address_detection(self, gn_addr: GNAddress) -> None: """ diff --git a/src/flexstack/geonet/service_access_point.py b/src/flexstack/geonet/service_access_point.py index c19e1ba..5325896 100644 --- a/src/flexstack/geonet/service_access_point.py +++ b/src/flexstack/geonet/service_access_point.py @@ -1,8 +1,9 @@ from enum import Enum from base64 import b64encode, b64decode from dataclasses import dataclass, field -from typing import Any +from typing import Any, Optional from .position_vector import LongPositionVector +from .gn_address import GNAddress from ..security.security_profiles import SecurityProfile @@ -423,7 +424,9 @@ class GNDataRequest: length: int = 0 data: bytes = b"" area: Area = field(default_factory=Area) - max_hop_limit: int = 10 + max_hop_limit: int = 1 + max_packet_lifetime: Optional[float] = None + destination: Optional[GNAddress] = None def to_dict(self) -> dict: """ @@ -444,6 +447,8 @@ def to_dict(self) -> dict: "length": self.length, "data": b64encode(self.data).decode("utf-8"), "area": self.area.to_dict(), + "max_hop_limit": self.max_hop_limit, + "max_packet_lifetime": self.max_packet_lifetime, } @classmethod @@ -470,6 +475,8 @@ def from_dict(cls, gn_data_request: dict) -> "GNDataRequest": length = gn_data_request["length"] data = b64decode(gn_data_request["data"]) area = Area.from_dict(gn_data_request["area"]) + max_hop_limit = gn_data_request.get("max_hop_limit", 0) + max_packet_lifetime = gn_data_request.get("max_packet_lifetime") return cls( upper_protocol_entity=upper_protocol_entity, packet_transport_type=packet_transport_type, @@ -478,6 +485,8 @@ def from_dict(cls, gn_data_request: dict) -> "GNDataRequest": length=length, data=data, area=area, + max_hop_limit=max_hop_limit, + max_packet_lifetime=max_packet_lifetime, ) @@ -538,10 +547,16 @@ class GNDataIndication: Upper Protocol Entity. packet_transport_type : PacketTransportType Packet Transport Type. + destination_area : Area or None + Destination geographical area (GBC/GAC only). Optional per Annex J4. source_position_vector : LongPositionVector Source Position Vector. traffic_class : TrafficClass Traffic Class. + remaining_packet_lifetime : float or None + Remaining lifetime of the packet in seconds. Optional per Annex J4. + remaining_hop_limit : int or None + Remaining hop limit of the packet. Optional per Annex J4. length : int Length of the payload. data : bytes @@ -551,9 +566,12 @@ class GNDataIndication: upper_protocol_entity: CommonNH = CommonNH.ANY packet_transport_type: PacketTransportType = field( default_factory=PacketTransportType) + destination_area: Optional[Any] = None source_position_vector: LongPositionVector = field( default_factory=LongPositionVector) traffic_class: TrafficClass = field(default_factory=TrafficClass) + remaining_packet_lifetime: Optional[float] = None + remaining_hop_limit: Optional[int] = None length: int = 0 data: bytes = b"" @@ -569,12 +587,15 @@ def to_dict(self) -> dict: return { "upper_protocol_entity": self.upper_protocol_entity.value, "packet_transport_type": self.packet_transport_type.to_dict(), + "destination_area": self.destination_area.to_dict() if self.destination_area is not None else None, "source_position_vector": b64encode( self.source_position_vector.encode() ).decode("utf-8"), "traffic_class": b64encode(self.traffic_class.encode_to_bytes()).decode( "utf-8" ), + "remaining_packet_lifetime": self.remaining_packet_lifetime, + "remaining_hop_limit": self.remaining_hop_limit, "length": self.length, "data": b64encode(self.data).decode("utf-8"), } @@ -594,19 +615,26 @@ def from_dict(cls, gn_data_indication: dict) -> "GNDataIndication": packet_transport_type = PacketTransportType.from_dict( gn_data_indication["packet_transport_type"] ) + raw_dest = gn_data_indication.get("destination_area") + destination_area = Area.from_dict(raw_dest) if raw_dest is not None else None source_position_vector = LongPositionVector.decode( b64decode(gn_data_indication["source_position_vector"]) ) traffic_class = TrafficClass.decode_from_bytes( b64decode(gn_data_indication["traffic_class"]) ) + remaining_packet_lifetime = gn_data_indication.get("remaining_packet_lifetime") + remaining_hop_limit = gn_data_indication.get("remaining_hop_limit") length = gn_data_indication["length"] data = b64decode(gn_data_indication["data"]) return cls( upper_protocol_entity=upper_protocol_entity, packet_transport_type=packet_transport_type, + destination_area=destination_area, source_position_vector=source_position_vector, traffic_class=traffic_class, + remaining_packet_lifetime=remaining_packet_lifetime, + remaining_hop_limit=remaining_hop_limit, length=length, data=data, ) diff --git a/src/flexstack/geonet/tsb_extended_header.py b/src/flexstack/geonet/tsb_extended_header.py new file mode 100644 index 0000000..e519fcd --- /dev/null +++ b/src/flexstack/geonet/tsb_extended_header.py @@ -0,0 +1,93 @@ +from dataclasses import dataclass, field +from .exceptions import DecodeError +from .position_vector import LongPositionVector +from .service_access_point import GNDataRequest + + +@dataclass(frozen=True) +class TSBExtendedHeader: + """ + TSB Extended Header class. As specified in ETSI EN 302 636-4-1 V1.4.1 (2020-01). Section 9.8.3 (Table 12). + + Layout (28 bytes): + SN 2 octets (octets 12-13 of full TSB packet) + Reserved 2 octets (octets 14-15) + SO PV 24 octets (octets 16-39) + + Attributes + ---------- + sn : int + Sequence number (16-bit unsigned). + reserved : int + Reserved. Set to 0. + so_pv : LongPositionVector + Source Long Position Vector. + """ + + sn: int = 0 + reserved: int = 0 + so_pv: LongPositionVector = field(default_factory=LongPositionVector) + + @classmethod + def initialize_with_request_sequence_number_ego_pv( + cls, + request: GNDataRequest, # noqa: ARG003 (kept for API symmetry with GBCExtendedHeader) + sequence_number: int, + ego_pv: LongPositionVector, + ) -> "TSBExtendedHeader": + """ + Initialize the TSB Extended Header for a new outgoing TSB packet. + + Parameters + ---------- + request : GNDataRequest + The GN Data Request (unused for TSB but kept for API symmetry). + sequence_number : int + The current local sequence number (clause 8.3). + ego_pv : LongPositionVector + The ego position vector. + """ + return cls(sn=sequence_number, so_pv=ego_pv) + + def encode(self) -> bytes: + """ + Encode the TSB Extended Header to bytes (28 bytes). + + Returns + ------- + bytes + Encoded bytes. + """ + return ( + self.sn.to_bytes(2, "big") + + self.reserved.to_bytes(2, "big") + + self.so_pv.encode() + ) + + @classmethod + def decode(cls, header: bytes) -> "TSBExtendedHeader": + """ + Decode the TSB Extended Header from bytes. + + Parameters + ---------- + header : bytes + 28 bytes of the TSB Extended Header. + + Returns + ------- + TSBExtendedHeader + Decoded TSB Extended Header. + + Raises + ------ + DecodeError + If the header is too short. + """ + if len(header) < 28: + raise DecodeError( + f"TSB Extended Header too short: expected 28 bytes, got {len(header)}") + sn = int.from_bytes(header[0:2], "big") + reserved = int.from_bytes(header[2:4], "big") + so_pv = LongPositionVector.decode(header[4:28]) + return cls(sn=sn, reserved=reserved, so_pv=so_pv) diff --git a/src/flexstack/management/__init__.py b/src/flexstack/management/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/flexstack/management/dcc_adaptive.py b/src/flexstack/management/dcc_adaptive.py new file mode 100644 index 0000000..697d02f --- /dev/null +++ b/src/flexstack/management/dcc_adaptive.py @@ -0,0 +1,379 @@ +""" +Adaptive Decentralized Congestion Control algorithm and Gate Keeper. + +Implements the adaptive approach specified in ETSI TS 102 687 V1.2.1 (2018-04) +clause 5.4 and the gate-keeping packet admission mechanism described in +Annex B. + +Overview +-------- +The adaptive algorithm (LIMERIC) maintains a smoothed estimate of the local +Channel Busy Ratio (``cbr_its_s``) and adjusts a *duty-cycle fraction* +parameter ``delta`` so that the ITS-S's own channel occupancy converges toward +a configurable target CBR. ``delta`` represents the maximum fraction of the +wireless medium that this ITS-S is allowed to occupy over any given interval. + +The :class:`DccAdaptive` class implements the five algorithmic steps of +clause 5.4 and is intended to be called at every UTC-modulo-200 ms boundary. + +The :class:`GateKeeper` class implements the packet admission logic of Annex B. +It uses the current ``delta`` value to compute the earliest time at which the +next packet may be admitted to the access layer. When ``delta`` is updated by +the adaptive algorithm, the gate-opening time is recalculated according to +equation B.2 to avoid synchronised transmissions across stations. +""" + +from __future__ import annotations + +from dataclasses import dataclass, field + + +@dataclass +class DccAdaptiveParameters: + """ + Tunable parameters of the adaptive DCC algorithm. + + Default values are taken from Table 3 of ETSI TS 102 687 V1.2.1 (2018-04) + clause 5.4. + + Attributes + ---------- + alpha : float + Exponential averaging coefficient used in step 3 (equation 4). + Default: 0.016. + beta : float + Proportional gain applied to the CBR error in step 2 + (equations 2 and 3). Default: 0.0012. + cbr_target : float + Target Channel Busy Ratio toward which ``delta`` is steered. + Default: 0.68. + delta_max : float + Hard upper bound on the duty-cycle fraction ``delta`` (step 4, + equation 5). Corresponds to the maximum duty cycle permitted by + ETSI EN 302 571. Default: 0.03. + delta_min : float + Hard lower bound on ``delta`` (step 5, equation 6). Prevents + complete starvation under extreme congestion. Default: 0.0006. + delta_up_max : float + Upper clamp on the per-step offset when CBR is below the target + (equation 2). Default: 0.0005. + delta_down_max : float + Lower clamp on the per-step offset when CBR is at or above the target + (equation 3). Must be negative. Default: -0.00025. + """ + + alpha: float = 0.016 + beta: float = 0.0012 + cbr_target: float = 0.68 + delta_max: float = 0.03 + delta_min: float = 0.0006 + delta_up_max: float = 0.0005 + delta_down_max: float = -0.00025 + + +@dataclass +class DccAdaptive: + """ + Adaptive DCC algorithm as specified in ETSI TS 102 687 V1.2.1 (2018-04) + clause 5.4 (LIMERIC). + + The algorithm shall be evaluated at every UTC-modulo-200 ms boundary + (clause 5.2). Each evaluation executes five ordered steps that update the + duty-cycle fraction ``delta``: + + * **Step 1** – Compute a smoothed CBR estimate (``cbr_its_s``) from the + two most recent local (or global, if available) CBR measurements. + * **Step 2** – Compute a signed per-step correction (``delta_offset``) + proportional to the distance between ``cbr_target`` and ``cbr_its_s``, + clamped to ``[delta_down_max, delta_up_max]``. + * **Step 3** – Apply an exponential filter to blend the new offset into the + current ``delta``. + * **Steps 4–5** – Clamp ``delta`` to ``[delta_min, delta_max]``. + + Parameters + ---------- + parameters : DccAdaptiveParameters, optional + Algorithm tuning parameters. If not supplied the standard default + values from Table 3 are used. + + Attributes + ---------- + cbr_its_s : float + Current smoothed CBR estimate (initialised to 0.0). + delta : float + Current duty-cycle fraction (initialised to ``parameters.delta_min``). + + Examples + -------- + >>> alg = DccAdaptive() + >>> delta = alg.update(cbr_local=0.5, cbr_local_previous=0.5) + >>> 0.0006 <= delta <= 0.03 + True + """ + + parameters: DccAdaptiveParameters = field( + default_factory=DccAdaptiveParameters + ) + cbr_its_s: float = field(default=0.0, init=False) + delta: float = field(default=0.0, init=False) + + def __post_init__(self) -> None: + self.delta = self.parameters.delta_min + + def update( + self, + cbr_local: float, + cbr_local_previous: float, + cbr_global: float | None = None, + cbr_global_previous: float | None = None, + ) -> float: + """ + Execute one full adaptive DCC evaluation (steps 1–5 of clause 5.4). + + Parameters + ---------- + cbr_local : float + Most recent local CBR measurement (``CBR_L_0_Hop``). + cbr_local_previous : float + Second most recent local CBR measurement + (``CBR_L_0_Hop_Previous``). + cbr_global : float or None, optional + Most recent global CBR (``CBR_G``), received from a neighbouring + ITS-S via GeoNetworking header as described in the NOTE of + clause 5.4. When provided, replaces the local value in step 1. + cbr_global_previous : float or None, optional + Second most recent global CBR (``CBR_G_Previous``). When provided + together with *cbr_global*, replaces the previous local value in + step 1. + + Returns + ------- + float + Updated ``delta`` value after clamping. + + Raises + ------ + ValueError + If any CBR argument is outside ``[0.0, 1.0]``. + """ + for name, val in ( + ("cbr_local", cbr_local), + ("cbr_local_previous", cbr_local_previous), + ): + if not 0.0 <= val <= 1.0: + raise ValueError( + f"{name} must be in [0.0, 1.0], got {val!r}" + ) + p = self.parameters + + # Step 1 (equation 1) – CBR averaging + # Use global CBR if both global measurements are available (NOTE 1). + if cbr_global is not None and cbr_global_previous is not None: + cbr_avg = (cbr_global + cbr_global_previous) / 2.0 + else: + cbr_avg = (cbr_local + cbr_local_previous) / 2.0 + self.cbr_its_s = 0.5 * self.cbr_its_s + 0.5 * cbr_avg + + # Step 2 (equations 2–3) – compute delta_offset + diff = p.cbr_target - self.cbr_its_s + if diff > 0.0: + # CBR below target → increase delta (more transmission allowed) + delta_offset = min(p.beta * diff, p.delta_up_max) + else: + # CBR at or above target → decrease delta (fewer transmissions) + delta_offset = max(p.beta * diff, p.delta_down_max) + + # Step 3 (equation 4) – exponential filter + self.delta = (1.0 - p.alpha) * self.delta + delta_offset + + # Steps 4–5 (equations 5–6) – clamp to permitted range + if self.delta > p.delta_max: + self.delta = p.delta_max + if self.delta < p.delta_min: + self.delta = p.delta_min + + return self.delta + + +class GateKeeper: + """ + Packet admission gate keeper as described in ETSI TS 102 687 V1.2.1 + (2018-04) Annex B. + + The gate keeper controls which packets may pass from the Network & + Transport layer to the Access layer queue. The gate is **open** when the + Access layer will accept a new packet and **closed** otherwise. + + Lifecycle + --------- + 1. A packet arrives at the gate. If the gate is open the packet is + *admitted*: the gate closes and a gate-opening time ``t_go`` is + scheduled using equation B.1. + 2. From time ``t_go`` onward the gate is open again. + 3. Whenever ``delta`` is updated by the adaptive algorithm, ``t_go`` is + recalculated per equation B.2 to preserve relative ordering of gate + openings without introducing synchronisation artefacts. + + The minimum inter-admission interval is 25 ms and the maximum is 1 s + (both from the constraints in ETSI EN 302 571 referenced in Annex B). + + Parameters + ---------- + delta : float + Initial duty-cycle fraction value obtained from :class:`DccAdaptive`. + + Attributes + ---------- + GATE_OPEN_MIN_INTERVAL_S : float + Minimum allowed gate-open interval in seconds (25 ms). + GATE_OPEN_MAX_INTERVAL_S : float + Maximum allowed gate-open interval in seconds (1 s). + + Examples + -------- + >>> gk = GateKeeper(delta=0.01) + >>> gk.is_open(t=0.0) + True + >>> admitted = gk.admit_packet(t=0.0, t_on=0.001) + >>> admitted + True + >>> gk.is_open(t=0.0) + False + >>> gk.is_open(t=0.1) + True + """ + + GATE_OPEN_MIN_INTERVAL_S: float = 0.025 + GATE_OPEN_MAX_INTERVAL_S: float = 1.0 + _T_EPSILON: float = 1e-9 # 1 ns tolerance for floating-point rounding + + def __init__(self, delta: float) -> None: + """ + Initialise the gate keeper with an initial ``delta`` value. + + Parameters + ---------- + delta : float + Duty-cycle fraction from the adaptive DCC algorithm. + """ + self._delta: float = delta + self._t_pg: float | None = None # time when gate last closed + self._t_go: float | None = None # scheduled gate-opening time + + # ------------------------------------------------------------------ + # Public API + # ------------------------------------------------------------------ + + def is_open(self, t: float) -> bool: + """ + Return ``True`` if the gate is currently open at time *t*. + + The gate is open on first use (before any packet has been admitted) + and from ``t_go`` onward after each admission. + + Parameters + ---------- + t : float + Current time in seconds. + + Returns + ------- + bool + ``True`` if a packet may be admitted, ``False`` otherwise. + """ + if self._t_go is None: + return True + return t >= self._t_go - self._T_EPSILON + + def admit_packet(self, t: float, t_on: float) -> bool: + """ + Attempt to admit one packet at time *t*. + + If the gate is open the packet is accepted, the gate closes, and the + next gate-opening time is scheduled per equation B.1: + + .. math:: + + t_{go} = t_{pg} + \\min\\!\\left(\\max\\!\\left( + \\frac{T_{on\\_pp}}{\\delta}, 0.025 + \\right), 1 \\right) + + Parameters + ---------- + t : float + Current time in seconds. + t_on : float + Transmission duration of this packet in seconds + (``T_on_pp`` in Annex B). + + Returns + ------- + bool + ``True`` if the packet was admitted; ``False`` if the gate was + closed and the packet is rejected. + + Raises + ------ + ValueError + If *t_on* is not positive. + """ + if t_on <= 0.0: + raise ValueError(f"t_on must be positive, got {t_on!r}") + if not self.is_open(t): + return False + + self._t_pg = t + interval = min( + max(t_on / self._delta, self.GATE_OPEN_MIN_INTERVAL_S), + self.GATE_OPEN_MAX_INTERVAL_S, + ) + self._t_go = t + interval + return True + + def update_delta(self, t: float, delta_new: float) -> None: + """ + Update the duty-cycle fraction and reschedule the gate-opening time. + + When ``delta`` changes, the gate-opening time is recalculated per + equation B.2 to avoid synchronising gate openings across stations: + + .. math:: + + t_{go} = t_{pg} + \\min\\!\\left(\\max\\!\\left( + \\frac{\\delta_{old}}{\\delta_{new}} \\cdot (t_{go} - t_{pg}), + 0.025 + \\right), 1 \\right) + + If the gate is currently open (no packet has been admitted yet, or + ``t_go`` has already passed) only ``delta`` is updated. + + Parameters + ---------- + t : float + Current time in seconds (used to determine whether the gate is + still closed). + delta_new : float + Updated duty-cycle fraction from the adaptive DCC algorithm. + + Raises + ------ + ValueError + If *delta_new* is not positive. + """ + if delta_new <= 0.0: + raise ValueError(f"delta_new must be positive, got {delta_new!r}") + + delta_old = self._delta + self._delta = delta_new + + # Gate is already open → no rescheduling needed + if self._t_pg is None or self._t_go is None or self.is_open(t): + return + + # B.2 – rescale remaining closed interval by delta ratio + old_interval = self._t_go - self._t_pg + new_interval = (delta_old / delta_new) * old_interval + self._t_go = self._t_pg + min( + max(new_interval, self.GATE_OPEN_MIN_INTERVAL_S), + self.GATE_OPEN_MAX_INTERVAL_S, + ) diff --git a/src/flexstack/management/dcc_reactive.py b/src/flexstack/management/dcc_reactive.py new file mode 100644 index 0000000..e07e83e --- /dev/null +++ b/src/flexstack/management/dcc_reactive.py @@ -0,0 +1,240 @@ +""" +Reactive Decentralized Congestion Control algorithm. + +Implements the reactive approach specified in ETSI TS 102 687 V1.2.1 (2018-04) +clause 5.3 and Annex A. + +The algorithm consists of five states (Relaxed, Active 1, Active 2, Active 3, +Restrictive) arranged in a linear sequence. On each evaluation the state may +advance at most *one* step toward the state dictated by the current CBR, which +enforces the "one state can only be reached by a neighbouring state" requirement +from clause 5.3. Each state maps directly to a maximum allowed packet rate and +a minimum inter-packet gap (T_off). + +Two parameter tables are defined in Annex A depending on the assumed maximum +packet transmission duration (T_on): + +* **Table A.1** – used when T_on is at most 1 ms +* **Table A.2** – used when T_on is at most 500 µs +""" + +from __future__ import annotations + +from dataclasses import dataclass +from enum import Enum + + +class DccState(Enum): + """ + Ordered states of the reactive DCC algorithm. + + As specified in ETSI TS 102 687 V1.2.1 (2018-04) clause 5.3. + + Attributes + ---------- + RELAXED (0) : + Lowest utilisation state. Fewest restrictions on transmissions. + ACTIVE_1 (1) : + First active state. Moderate CBR detected. + ACTIVE_2 (2) : + Second active state. Elevated CBR detected. + ACTIVE_3 (3) : + Third active state. High CBR detected. + RESTRICTIVE (4) : + Most stringent state. Very high CBR detected. + """ + + RELAXED = 0 + ACTIVE_1 = 1 + ACTIVE_2 = 2 + ACTIVE_3 = 3 + RESTRICTIVE = 4 + + +@dataclass(frozen=True) +class DccStateConfig: + """ + Channel Busy Ratio thresholds and output parameters for a single state. + + As specified in ETSI TS 102 687 V1.2.1 (2018-04) Annex A, Tables A.1 and A.2. + + Attributes + ---------- + cbr_min : float + Inclusive lower CBR bound for this state (0.0 for Relaxed). + cbr_max : float + Exclusive upper CBR bound for this state (1.0 for Restrictive). + packet_rate_hz : float + Maximum allowed packet transmission rate in packets per second. + t_off_ms : float + Minimum required inter-packet gap in milliseconds. + """ + + cbr_min: float + cbr_max: float + packet_rate_hz: float + t_off_ms: float + + +# --------------------------------------------------------------------------- +# Standard parameter tables (Annex A) +# --------------------------------------------------------------------------- + +#: Table A.1 – T_on at most 1 ms. +#: States ordered from RELAXED to RESTRICTIVE. +_TABLE_A1: dict[DccState, DccStateConfig] = { + DccState.RELAXED: DccStateConfig(0.00, 0.30, 10.0, 100.0), + DccState.ACTIVE_1: DccStateConfig(0.30, 0.40, 5.0, 200.0), + DccState.ACTIVE_2: DccStateConfig(0.40, 0.50, 2.5, 400.0), + DccState.ACTIVE_3: DccStateConfig(0.50, 0.60, 2.0, 500.0), + DccState.RESTRICTIVE: DccStateConfig(0.60, 1.01, 1.0, 1000.0), +} + +#: Table A.2 – T_on at most 500 µs. +_TABLE_A2: dict[DccState, DccStateConfig] = { + DccState.RELAXED: DccStateConfig(0.00, 0.30, 20.0, 50.0), + DccState.ACTIVE_1: DccStateConfig(0.30, 0.40, 10.0, 100.0), + DccState.ACTIVE_2: DccStateConfig(0.40, 0.50, 5.0, 200.0), + DccState.ACTIVE_3: DccStateConfig(0.50, 0.65, 4.0, 250.0), + DccState.RESTRICTIVE: DccStateConfig(0.65, 1.01, 1.0, 1000.0), +} + +# Ordered list of states used for single-step transitions. +_STATE_ORDER: list[DccState] = [ + DccState.RELAXED, + DccState.ACTIVE_1, + DccState.ACTIVE_2, + DccState.ACTIVE_3, + DccState.RESTRICTIVE, +] + + +@dataclass(frozen=True) +class DccReactiveOutput: + """ + Output produced by a single reactive DCC evaluation. + + Attributes + ---------- + state : DccState + Current DCC state after the evaluation. + packet_rate_hz : float + Maximum allowed packet transmission rate in packets per second. + t_off_ms : float + Minimum required inter-packet gap in milliseconds (T_off). + """ + + state: DccState + packet_rate_hz: float + t_off_ms: float + + +class DccReactive: + """ + Reactive DCC algorithm as specified in ETSI TS 102 687 V1.2.1 (2018-04) + clause 5.3 and Annex A. + + The algorithm is evaluated periodically (at least every 200 ms, per + clause 5.2) by calling :meth:`update` with the most recently measured + Channel Busy Ratio (CBR). On each call the state may advance by at most + one step toward the state inferred from the CBR table, enforcing the + adjacency constraint of clause 5.3. + + Parameters + ---------- + t_on_max_us : int + Assumed maximum packet transmission duration in microseconds. When + this value is at most 500 the parameter table from Annex A Table A.2 + is used; otherwise Table A.1 is used. Defaults to 1000 (1 ms), which + selects Table A.1. + + Attributes + ---------- + state : DccState + Current algorithm state. Starts at :attr:`DccState.RELAXED`. + + Examples + -------- + >>> dcc = DccReactive() + >>> out = dcc.update(cbr=0.35) + >>> out.state + + >>> out.packet_rate_hz + 5.0 + """ + + def __init__(self, t_on_max_us: int = 1000) -> None: + """ + Initialise the reactive DCC algorithm. + + Parameters + ---------- + t_on_max_us : int, optional + Maximum packet transmission duration in microseconds. Values + ≤ 500 select Annex A Table A.2; all other values select Table A.1. + Defaults to 1000. + """ + self._table: dict[DccState, DccStateConfig] = ( + _TABLE_A2 if t_on_max_us <= 500 else _TABLE_A1 + ) + self.state: DccState = DccState.RELAXED + + # ------------------------------------------------------------------ + # Internal helpers + # ------------------------------------------------------------------ + + def _target_state(self, cbr: float) -> DccState: + """Return the state whose CBR band contains *cbr*.""" + for state, cfg in self._table.items(): + if cfg.cbr_min <= cbr < cfg.cbr_max: + return state + # CBR == 1.0 (or floating-point rounding above 1.0) → Restrictive + return DccState.RESTRICTIVE + + # ------------------------------------------------------------------ + # Public API + # ------------------------------------------------------------------ + + def update(self, cbr: float) -> DccReactiveOutput: + """ + Evaluate the algorithm with the current Channel Busy Ratio. + + The state advances by at most one step toward the state implied by + *cbr*, satisfying the adjacency constraint from clause 5.3. + + Parameters + ---------- + cbr : float + Current Channel Busy Ratio value in the range ``[0.0, 1.0]``. + + Returns + ------- + DccReactiveOutput + Updated state together with the corresponding transmission + constraints (maximum packet rate and minimum T_off). + + Raises + ------ + ValueError + If *cbr* is outside the range ``[0.0, 1.0]``. + """ + if not 0.0 <= cbr <= 1.0: + raise ValueError(f"cbr must be in [0.0, 1.0], got {cbr!r}") + + target = self._target_state(cbr) + current_idx = _STATE_ORDER.index(self.state) + target_idx = _STATE_ORDER.index(target) + + if target_idx > current_idx: + current_idx += 1 + elif target_idx < current_idx: + current_idx -= 1 + # else: target_idx == current_idx → no change + + self.state = _STATE_ORDER[current_idx] + cfg = self._table[self.state] + return DccReactiveOutput( + state=self.state, + packet_rate_hz=cfg.packet_rate_hz, + t_off_ms=cfg.t_off_ms, + ) diff --git a/src/flexstack/security/asn1/EtsiTs103097ExtensionModule.py b/src/flexstack/security/asn1/EtsiTs103097ExtensionModule.py new file mode 100644 index 0000000..3cd2a2f --- /dev/null +++ b/src/flexstack/security/asn1/EtsiTs103097ExtensionModule.py @@ -0,0 +1,41 @@ +# pylint: skip-file +ETSI_TS_103_097_EXTENSION_MODULE_ASN1_DESCRIPTIONS = """ +EtsiTs103097ExtensionModule +{itu-t(0) identified-organization(4) etsi(0) itsDomain(5) wg5(5) secHeaders(103097) extension(2) major-version-1(1) minor-version-2(2)} +DEFINITIONS AUTOMATIC TAGS ::= BEGIN + +IMPORTS + Extension, + ExtId, + HashedId8, + Time32, + Uint8 +FROM Ieee1609Dot2BaseTypes {iso(1) identified-organization(3) ieee(111) + standards-association-numbered-series-standards(2) wave-stds(1609) + dot2(2) base(1) base-types(2) major-version-2 (2) minor-version-5 (5)} +; + +ExtensionModuleVersion::= INTEGER(2) + +EtsiOriginatingHeaderInfoExtension ::= Extension + +EtsiTs102941CrlRequest::= SEQUENCE { + issuerId HashedId8, + lastKnownUpdate Time32 OPTIONAL +} + +EtsiTs102941CtlRequest::= SEQUENCE { + issuerId HashedId8, + lastKnownCtlSequence Uint8 OPTIONAL +} + +EtsiTs102941FullCtlRequest::= SEQUENCE { + issuerId HashedId8, + lastKnownCtlSequence Uint8 OPTIONAL, + segmentNumber Uint8 OPTIONAL +} + +EtsiTs102941DeltaCtlRequest::= EtsiTs102941CtlRequest + +END +""" diff --git a/src/flexstack/security/asn1/EtsiTs103097Module.py b/src/flexstack/security/asn1/EtsiTs103097Module.py new file mode 100644 index 0000000..ec3beeb --- /dev/null +++ b/src/flexstack/security/asn1/EtsiTs103097Module.py @@ -0,0 +1,183 @@ +# pylint: skip-file +ETSI_TS_103_097_MODULE_ASN1_DESCRIPTIONS = """ +EtsiTs103097Module +{itu-t(0) identified-organization(4) etsi(0) itsDomain(5) wg5(5) secHeaders(103097) core(1) major-version-3(3) minor-version-2(2)} + +DEFINITIONS AUTOMATIC TAGS ::= BEGIN + +IMPORTS + +Ieee1609Dot2Data, Certificate +FROM Ieee1609Dot2 {iso(1) identified-organization(3) ieee(111) + standards-association-numbered-series-standards(2) wave-stds(1609) + dot2(2) base(1) schema(1) major-version-2(2) minor-version-7(7)} + +ExtensionModuleVersion +FROM EtsiTs103097ExtensionModule {itu-t(0) identified-organization(4) + etsi(0) itsDomain(5) wg5(5) secHeaders(103097) extension(2) major-version-1(1) minor-version-2(2)} +; + +EtsiTs103097Certificate::= Certificate (WITH COMPONENTS{..., + issuer (WITH COMPONENTS{ -- constraints on issuer + sha256AndDigest, + self (sha256 | sha384), + sha384AndDigest + }), + toBeSigned (WITH COMPONENTS{..., + id (WITH COMPONENTS{..., -- constraints on id + linkageData ABSENT, + binaryId ABSENT + }), + certRequestPermissions ABSENT, + canRequestRollover ABSENT, + encryptionKey (WITH COMPONENTS { -- constraints on encryptionKey + supportedSymmAlg (aes128Ccm), + publicKey (WITH COMPONENTS { + eciesNistP256, + eciesBrainpoolP256r1 + }) + }), + verifyKeyIndicator (WITH COMPONENTS {..., -- constraints on verifyKeyIndicator + verificationKey (WITH COMPONENTS { + ecdsaNistP256, + ecdsaBrainpoolP256r1, + ecdsaBrainpoolP384r1, + ecdsaNistP384 + }) + }) --, + -- certRequestExtension ABSENT + }), + signature (WITH COMPONENTS { -- constraints on signature + ecdsaNistP256Signature, + ecdsaBrainpoolP256r1Signature, + ecdsaBrainpoolP384r1Signature, + ecdsaNistP384Signature + }) +}) + +EtsiTs103097Data::=Ieee1609Dot2Data (WITH COMPONENTS {..., + content (WITH COMPONENTS {..., + signedData (WITH COMPONENTS {..., -- constraints on signed data headers + hashId (sha256 | sha384), + tbsData (WITH COMPONENTS { -- constraints on tbsData + headerInfo (WITH COMPONENTS {..., + generationTime PRESENT, + p2pcdLearningRequest ABSENT, + missingCrlIdentifier ABSENT, + encryptionKey (WITH COMPONENTS { -- constraints on encryptionKey + public (WITH COMPONENTS { + supportedSymmAlg (aes128Ccm), + publicKey (WITH COMPONENTS { + eciesNistP256, + eciesBrainpoolP256r1 + }) + }), + symmetric (WITH COMPONENTS { + aes128Ccm + }) + }) + }) + }), + signer (WITH COMPONENTS {..., -- constraints on the certificate + certificate ((WITH COMPONENT (EtsiTs103097Certificate))^(SIZE(1))) + }), + signature (WITH COMPONENTS {..., -- constraints on the signature + sm2Signature ABSENT + }) + }), + encryptedData (WITH COMPONENTS {..., -- constraints on encrypted data headers + recipients (WITH COMPONENT ( + (WITH COMPONENTS {..., + certRecipInfo (WITH COMPONENTS{ + encKey (WITH COMPONENTS{ + eciesNistP256, + eciesBrainpoolP256r1 + }) + }), + signedDataRecipInfo(WITH COMPONENTS{ + encKey (WITH COMPONENTS{ + eciesNistP256, + eciesBrainpoolP256r1 + }) + }), + pskRecipInfo ABSENT, + symmRecipInfo ABSENT, + rekRecipInfo ABSENT + }) + )), + ciphertext (WITH COMPONENTS { + aes128ccm + }) + }), + signedCertificateRequest ABSENT + }) +}) + +EtsiTs103097Data-Unsecured {ToBeSentDataContent} ::= EtsiTs103097Data (WITH COMPONENTS {..., + content (WITH COMPONENTS { + unsecuredData (CONTAINING ToBeSentDataContent) + }) +}) + +EtsiTs103097Data-Signed {ToBeSignedDataContent} ::= EtsiTs103097Data (WITH COMPONENTS {..., + content (WITH COMPONENTS { + signedData (WITH COMPONENTS {..., + tbsData (WITH COMPONENTS { + payload (WITH COMPONENTS { + data (WITH COMPONENTS {..., + content (WITH COMPONENTS { + unsecuredData (CONTAINING ToBeSignedDataContent) + }) + }) PRESENT + }) + }) + }) + }) +}) + +EtsiTs103097Data-SignedExternalPayload ::= EtsiTs103097Data (WITH COMPONENTS {..., + content (WITH COMPONENTS { + signedData (WITH COMPONENTS {..., + tbsData (WITH COMPONENTS { + payload (WITH COMPONENTS { + extDataHash (WITH COMPONENTS { + sha256HashedData PRESENT + }) PRESENT + }) + }) + }) + }) +}) + +EtsiTs103097Data-Encrypted {ToBeEncryptedDataContent} ::= EtsiTs103097Data (WITH COMPONENTS {..., + content (WITH COMPONENTS { + encryptedData (WITH COMPONENTS {..., + ciphertext (WITH COMPONENTS {..., + aes128ccm (WITH COMPONENTS {..., + ccmCiphertext (CONSTRAINED BY {-- ccm encryption of -- ToBeEncryptedDataContent}) + }) + }) + }) + }) +}) + +EtsiTs103097Data-SignedAndEncrypted {ToBesignedAndEncryptedDataContent} ::= EtsiTs103097Data-Encrypted {EtsiTs103097Data-Signed {ToBesignedAndEncryptedDataContent}} + +EtsiTs103097Data-Encrypted-Unicast {ToBeEncryptedDataContent} ::= EtsiTs103097Data-Encrypted { EtsiTs103097Data-Unsecured{ToBeEncryptedDataContent}} (WITH COMPONENTS {..., + content (WITH COMPONENTS { + encryptedData (WITH COMPONENTS {..., + recipients (SIZE(1)) + }) + }) +}) + +EtsiTs103097Data-SignedAndEncrypted-Unicast {ToBesignedAndEncryptedDataContent} ::= EtsiTs103097Data-Encrypted {EtsiTs103097Data-Signed {ToBesignedAndEncryptedDataContent}} (WITH COMPONENTS {..., + content (WITH COMPONENTS { + encryptedData (WITH COMPONENTS {..., + recipients (SIZE(1)) + }) + }) +}) + +END +""" diff --git a/src/flexstack/security/asn1/__init__.py b/src/flexstack/security/asn1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/flexstack/security/asn1/ieee1609dot2/Ieee1609Dot2.py b/src/flexstack/security/asn1/ieee1609dot2/Ieee1609Dot2.py new file mode 100644 index 0000000..6caf6e7 --- /dev/null +++ b/src/flexstack/security/asn1/ieee1609dot2/Ieee1609Dot2.py @@ -0,0 +1,1479 @@ +# pylint: skip-file +IEEE_1609_DOT_2_MODULE_ASN1_DESCRIPTIONS = """ +--***************************************************************************-- +-- IEEE Std 1609.2 -- +--***************************************************************************-- + +/** + * @note Section references in this file are to clauses in IEEE Std + * 1609.2 unless indicated otherwise. Full forms of acronyms and + * abbreviations used in this file are specified in 3.2. + */ + +Ieee1609Dot2 {iso(1) identified-organization(3) ieee(111) + standards-association-numbered-series-standards(2) wave-stds(1609) + dot2(2) base(1) schema(1) major-version-2(2) minor-version-7(7)} + +DEFINITIONS AUTOMATIC TAGS ::= BEGIN + +IMPORTS + CrlSeries, + EccP256CurvePoint, + EcencP256EncryptedKey, + EciesP256EncryptedKey, + EncryptionKey, + Extension, + ExtId, + GeographicRegion, + GroupLinkageValue, + HashAlgorithm, + HashedId3, + HashedId8, + HashedId32, + HashedId48, + Hostname, + IValue, + LinkageValue, + Opaque, + Psid, + PsidSsp, + PsidSspRange, + PublicEncryptionKey, + PublicVerificationKey, + SequenceOfHashedId3, + SequenceOfPsidSsp, + SequenceOfPsidSspRange, + ServiceSpecificPermissions, + Signature, + SubjectAssurance, + SymmetricEncryptionKey, + ThreeDLocation, + Time64, + Uint3, + Uint8, + Uint16, + Uint32, + ValidityPeriod +FROM Ieee1609Dot2BaseTypes {iso(1) identified-organization(3) ieee(111) + standards-association-numbered-series-standards(2) wave-stds(1609) dot2(2) + base(1) base-types(2) major-version-2(2) minor-version-4(4)} + + EtsiOriginatingHeaderInfoExtension +FROM EtsiTs103097ExtensionModule {itu-t(0) identified-organization(4) etsi(0) + itsDomain(5) wg5(5) secHeaders(103097) extension(2) major-version-1(1) + minor-version-0(0)} +; + +--***************************************************************************-- +-- Secured Data -- +--***************************************************************************-- + +/** + * @brief This data type is used to contain the other data types in this + * clause. The fields in the Ieee1609Dot2Data have the following meanings: + * + * @param protocolVersion: contains the current version of the protocol. The + * version specified in this standard is version 3, represented by the + * integer 3. There are no major or minor version numbers. + * + * @param content: contains the content in the form of an Ieee1609Dot2Content. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2. The canonicalization + * applies to the Ieee1609Dot2Content. + */ +Ieee1609Dot2Data ::= SEQUENCE { + protocolVersion Uint8(3), + content Ieee1609Dot2Content +} + +/** + * @brief In this structure: + * + * @param unsecuredData: indicates that the content is an OCTET STRING to be + * consumed outside the SDS. + * + * @param signedData: indicates that the content has been signed according to + * this standard. + * + * @param encryptedData: indicates that the content has been encrypted + * according to this standard. + * + * @param signedCertificateRequest: indicates that the content is a + * certificate request signed by an IEEE 1609.2 certificate or self-signed. + * + * @param signedX509CertificateRequest: indicates that the content is a + * certificate request signed by an ITU-T X.509 certificate. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2 if it is of type signedData. + * The canonicalization applies to the SignedData. + */ +Ieee1609Dot2Content ::= CHOICE { + unsecuredData Opaque, + signedData SignedData, + encryptedData EncryptedData, + signedCertificateRequest Opaque, + ..., + signedX509CertificateRequest Opaque +} + +/** + * @brief In this structure: + * + * @param hashId: indicates the hash algorithm to be used to generate the hash + * of the message for signing and verification. + * + * @param tbsData: contains the data that is hashed as input to the signature. + * + * @param signer: determines the keying material and hash algorithm used to + * sign the data. + * + * @param signature: contains the digital signature itself, calculated as + * specified in 5.3.1. + * - If signer indicates the choice self, then the signature calculation + * is parameterized as follows: + * - Data input is equal to the COER encoding of the tbsData field + * canonicalized according to the encoding considerations given in 6.3.6. + * - Verification type is equal to self. + * - Signer identifier input is equal to the empty string. + * - If signer indicates certificate or digest, then the signature + * calculation is parameterized as follows: + * - Data input is equal to the COER encoding of the tbsData field + * canonicalized according to the encoding considerations given in 6.3.6. + * - Verification type is equal to certificate. + * - Signer identifier input equal to the COER-encoding of the + * Certificate that is to be used to verify the SPDU, canonicalized according + * to the encoding considerations given in 6.4.3. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2. The canonicalization + * applies to the ToBeSignedData and the Signature. + */ +SignedData ::= SEQUENCE { + hashId HashAlgorithm, + tbsData ToBeSignedData, + signer SignerIdentifier, + signature Signature +} + +/** + * @brief This structure contains the data to be hashed when generating or + * verifying a signature. See 6.3.4 for the specification of the input to the + * hash. + * + * @param payload: contains data that is provided by the entity that invokes + * the SDS. + * + * @param headerInfo: contains additional data that is inserted by the SDS. + * This structure is used as follows to determine the "data input" to the + * hash operation for signing or verification as specified in 5.3.1.2.2 or + * 5.3.1.3. + * - If payload does not contain the field omitted, the data input to the + * hash operation is the COER encoding of the ToBeSignedData. + * - If payload field in this ToBeSignedData instance contains the field + * omitted, the data input to the hash operation is the COER encoding of the + * ToBeSignedData, concatenated with the hash of the omitted payload. The hash + * of the omitted payload is calculated with the same hash algorithm that is + * used to calculate the hash of the data input for signing or verification. + * The data input to the hash operation is simply the COER encoding of the + * ToBeSignedData, concatenated with the hash of the omitted payload: there is + * no additional wrapping or length indication. As noted in 5.2.4.3.4, the + * means by which the signer and verifier establish the contents of the + * omitted payload are outside the scope of this standard. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2. The canonicalization + * applies to the SignedDataPayload if it is of type data, and to the + * HeaderInfo. + */ +ToBeSignedData ::= SEQUENCE { + payload SignedDataPayload, + headerInfo HeaderInfo +} + +/** + * @brief This structure contains the data payload of a ToBeSignedData. This + * structure contains at least one of the optional elements, and may contain + * more than one. See 5.2.4.3.4 for more details. + * The security profile in Annex C allows an implementation of this standard + * to state which forms of SignedDataPayload are supported by that + * implementation, and also how the signer and verifier are intended to obtain + * the external data for hashing. The specification of an SDEE that uses + * external data is expected to be explicit and unambiguous about how this + * data is obtained and how it is formatted prior to processing by the hash + * function. + * + * @param data: contains data that is explicitly transported within the + * structure. + * + * @param extDataHash: contains the hash of data that is not explicitly + * transported within the structure, and which the creator of the structure + * wishes to cryptographically bind to the signature. + * + * @param omitted: indicates that there is data to be included in the hash + * calculation for the signature that is not included in the SPDU, either in + * data or by use of the extDataHash. The mechanism for including the omitted + * data in the hash calculation is specified in 6.3.6. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2. The canonicalization + * applies to the Ieee1609Dot2Data. + */ +SignedDataPayload ::= SEQUENCE { + data Ieee1609Dot2Data OPTIONAL, + extDataHash HashedData OPTIONAL, + ..., + omitted NULL OPTIONAL +} (WITH COMPONENTS {..., data PRESENT} | + WITH COMPONENTS {..., extDataHash PRESENT} | + WITH COMPONENTS {..., omitted PRESENT}) + + +/** + * @brief This structure contains the hash of some data with a specified hash + * algorithm. See 5.3.3 for specification of the permitted hash algorithms. + * + * @param sha256HashedData: indicates data hashed with SHA-256. + * + * @param sha384HashedData: indicates data hashed with SHA-384. + * + * @param sm3HashedData: indicates data hashed with SM3. + * + * @note Critical information fields: If present, this is a critical + * information field as defined in 5.2.6. An implementation that does not + * recognize the indicated CHOICE for this type when verifying a signed SPDU + * shall indicate that the signed SPDU is invalid in the sense of 4.2.2.3.2, + * that is, it is invalid in the sense that its validity cannot be established. + */ +HashedData::= CHOICE { + sha256HashedData HashedId32, + ..., + sha384HashedData HashedId48, + sm3HashedData HashedId32 +} + +/** + * @brief This structure contains the following information that is used to establish + * validity by the criteria of 5.2. + * + * @param psid: indicates the application area with which the sender is + * claiming the payload is to be associated. + * + * @param generationTime: indicates the time at which the structure was + * generated. See 5.2.5.2.2 and 5.2.5.2.3 for discussion of the use of this + * field. + * + * @param expiryTime: if present, contains the time after which the data + * is no longer considered relevant. If both generationTime and + * expiryTime are present, the signed SPDU is invalid if generationTime is + * not strictly earlier than expiryTime. + * + * @param generationLocation: if present, contains the location at which the + * signature was generated. + * + * @param p2pcdLearningRequest: if present, is used by the SDS to request + * certificates for which it has seen identifiers and does not know the + * entire certificate. A specification of this peer-to-peer certificate + * distribution (P2PCD) mechanism is given in Clause 8. This field is used + * for the separate-certificate-pdu flavor of P2PCD and shall only be present + * if inlineP2pcdRequest is not present. The HashedId3 is calculated with the + * whole-certificate hash algorithm, determined as described in 6.4.3, + * applied to the COER-encoded certificate, canonicalized as defined in the + * definition of Certificate. + * + * @param missingCrlIdentifier: if present, is used by the SDS to request + * CRLs which it knows to have been issued and have not received. This is + * provided for future use and the associated mechanism is not defined in + * this version of this standard. + * + * @param encryptionKey: if present, is used to provide a key that is to + * be used to encrypt at least one response to this SPDU. The SDEE + * specification is expected to specify which response SPDUs are to be + * encrypted with this key. One possible use of this key to encrypt a + * response is specified in 6.3.35, 6.3.37, and 6.3.34. An encryptionKey + * field of type symmetric should only be used if the SignedData containing + * this field is securely encrypted by some means. + * + * @param inlineP2pcdRequest: if present, is used by the SDS to request + * unknown certificates per the inline peer-to-peer certificate distribution + * mechanism is given in Clause 8. This field shall only be present if + * p2pcdLearningRequest is not present. The HashedId3 is calculated with the + * whole-certificate hash algorithm, determined as described in 6.4.3, applied + * to the COER-encoded certificate, canonicalized as defined in the definition + * of Certificate. + * + * @param requestedCertificate: if present, is used by the SDS to provide + * certificates per the "inline" version of the peer-to-peer certificate + * distribution mechanism given in Clause 8. + * + * @param pduFunctionalType: if present, is used to indicate that the SPDU is + * to be consumed by a process other than an application process as defined + * in ISO 21177 [B14a]. See 6.3.23b for more details. + * + * @param contributedExtensions: if present, is used to contain additional + * extensions defined using the ContributedExtensionBlocks structure. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2. The canonicalization + * applies to the EncryptionKey. If encryptionKey is present, and indicates + * the choice public, and contains a BasePublicEncryptionKey that is an + * elliptic curve point (i.e., of type EccP256CurvePoint or + * EccP384CurvePoint), then the elliptic curve point is encoded in compressed + * form, i.e., such that the choice indicated within the Ecc*CurvePoint is + * compressed-y-0 or compressed-y-1. + * The canonicalization does not apply to any fields after the extension + * marker, including any fields in contributedExtensions. + */ +HeaderInfo ::= SEQUENCE { + psid Psid, + generationTime Time64 OPTIONAL, + expiryTime Time64 OPTIONAL, + generationLocation ThreeDLocation OPTIONAL, + p2pcdLearningRequest HashedId3 OPTIONAL, + missingCrlIdentifier MissingCrlIdentifier OPTIONAL, + encryptionKey EncryptionKey OPTIONAL, + ..., + inlineP2pcdRequest SequenceOfHashedId3 OPTIONAL, + requestedCertificate Certificate OPTIONAL, + pduFunctionalType PduFunctionalType OPTIONAL, + contributedExtensions ContributedExtensionBlocks OPTIONAL +} + +/** + * @brief This structure may be used to request a CRL that the SSME knows to + * have been issued and has not yet received. It is provided for future use + * and its use is not defined in this version of this standard. + * + * @param cracaId: is the HashedId3 of the CRACA, as defined in 5.1.3. The + * HashedId3 is calculated with the whole-certificate hash algorithm, + * determined as described in 6.4.3, applied to the COER-encoded certificate, + * canonicalized as defined in the definition of Certificate. + * + * @param crlSeries: is the requested CRL Series value. See 5.1.3 for more + * information. + */ +MissingCrlIdentifier ::= SEQUENCE { + cracaId HashedId3, + crlSeries CrlSeries, + ... +} + +/** + * @brief This data structure identifies the functional entity that is + * intended to consume an SPDU, for the case where that functional entity is + * not an application process, and are instead security support services for an + * application process. Further details and the intended use of this field are + * defined in ISO 21177 [B20]. + * + * @param tlsHandshake: indicates that the Signed SPDU is not to be directly + * consumed as an application PDU and is to be used to provide information + * about the holder�s permissions to a Transport Layer Security (TLS) + * (IETF 5246 [B15], IETF 8446 [B16]) handshake process operating to secure + * communications to an application process. See IETF [B15] and ISO 21177 + * [B20] for further information. + * + * @param iso21177ExtendedAuth: indicates that the Signed SPDU is not to be + * directly consumed as an application PDU and is to be used to provide + * additional information about the holder�s permissions to the ISO 21177 + * Security Subsystem for an application process. See ISO 21177 [B20] for + * further information. + * + * @param iso21177SessionExtension: indicates that the Signed SPDU is not to + * be directly consumed as an application PDU and is to be used to extend an + * existing ISO 21177 secure session. This enables a secure session to + * persist beyond the lifetime of the certificates used to establish that + * session. + */ +PduFunctionalType ::= INTEGER (0..255) + +tlsHandshake PduFunctionalType ::= 1 +iso21177ExtendedAuth PduFunctionalType ::= 2 +iso21177SessionExtension PduFunctionalType ::= 3 + + +/** + * @brief This type is used for clarity of definitions. + */ +ContributedExtensionBlocks ::= SEQUENCE (SIZE(1..MAX)) OF + ContributedExtensionBlock + +/** + * @brief This data structure defines the format of an extension block + * provided by an identified contributor by using the temnplate provided + * in the class IEEE1609DOT2-HEADERINFO-CONTRIBUTED-EXTENSION constraint + * to the objects in the set Ieee1609Dot2HeaderInfoContributedExtensions. + * + * @param contributorId: uniquely identifies the contributor. + * + * @param extns: contains a list of extensions from that contributor. + * Extensions are expected and not required to follow the format specified + * in 6.5. + */ +ContributedExtensionBlock ::= SEQUENCE { + contributorId HeaderInfoContributorId, + extns SEQUENCE (SIZE(1..MAX)) OF OCTET STRING +} + +/** + * @brief This is an integer used to identify a HeaderInfo extension + * contributing organization. In this version of this standard two values are + * defined: + * - ieee1609OriginatingExtensionId indicating extensions originating with + * IEEE Std 1609. + * - etsiOriginatingExtensionId indicating extensions originating with + * ETSI TC ITS. + */ +HeaderInfoContributorId ::= INTEGER (0..255) + +ieee1609HeaderInfoContributorId HeaderInfoContributorId ::= 1 +etsiHeaderInfoContributorId HeaderInfoContributorId ::= 2 + + +/** + * @brief This structure allows the recipient of data to determine which + * keying material to use to authenticate the data. It also indicates the + * verification type to be used to generate the hash for verification, as + * specified in 5.3.1. + * + * @param digest: If the choice indicated is digest: + * - The structure contains the HashedId8 of the relevant certificate. The + * HashedId8 is calculated with the whole-certificate hash algorithm, + * determined as described in 6.4.3. + * - The verification type is certificate and the certificate data + * passed to the hash function as specified in 5.3.1 is the authorization + * certificate. + * + * @param certificate: If the choice indicated is certificate: + * - The structure contains one or more Certificate structures, in order + * such that the first certificate is the authorization certificate and each + * subsequent certificate is the issuer of the one before it. The certificate + * chain may be of any length. It should not include the root CA certificate + * (as the receiving SDS is assumed to know all valid root CAs already). + * - The verification type is certificate and the certificate data + * passed to the hash function as specified in 5.3.1 is the authorization + * certificate. + * + * @param self: If the choice indicated is self: + * - The structure does not contain any data beyond the indication that + * the choice value is self. + * - The verification type is self-signed. + * + * @note Critical information fields: + * - If present, this is a critical information field as defined in 5.2.6. + * An implementation that does not recognize the CHOICE value for this type + * when verifying a signed SPDU shall indicate that the signed SPDU is invalid. + * - If present, certificate is a critical information field as defined in + * 5.2.6. An implementation that does not support the number of certificates + * in certificate when verifying a signed SPDU shall indicate that the signed + * SPDU is invalid. A compliant implementation shall support certificate + * fields containing at least one certificate. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2. The canonicalization + * applies to every Certificate in the certificate field. + */ +SignerIdentifier ::= CHOICE { + digest HashedId8, + certificate SequenceOfCertificate, + self NULL, + ... +} + +/** + * @brief This data structure is used to perform a countersignature over an + * already-signed SPDU. This is the profile of an Ieee1609Dot2Data containing + * a signedData. The tbsData within content is composed of a payload + * containing the hash (extDataHash) of the externally generated, pre-signed + * SPDU over which the countersignature is performed. + */ +Countersignature ::= Ieee1609Dot2Data (WITH COMPONENTS {..., + content (WITH COMPONENTS {..., + signedData (WITH COMPONENTS {..., + tbsData (WITH COMPONENTS {..., + payload (WITH COMPONENTS {..., + data ABSENT, + extDataHash PRESENT, + omitted ABSENT + }), + headerInfo(WITH COMPONENTS {..., + generationTime PRESENT, + expiryTime ABSENT, + generationLocation ABSENT, + p2pcdLearningRequest ABSENT, + missingCrlIdentifier ABSENT, + encryptionKey ABSENT + }) + }) + }) + }) +}) + + +--***************************************************************************-- +-- Encrypted Data -- +--***************************************************************************-- + +/** + * @brief This data structure encodes data that has been encrypted to one or + * more recipients using the recipients� public or symmetric keys as + * specified in 5.3.4. + * + * @param recipients: contains one or more RecipientInfos. These entries may + * be more than one RecipientInfo, and more than one type of RecipientInfo, + * as long as all entries are indicating or containing the same data encryption + * key. + * + * @param ciphertext: contains the encrypted data. This is the encryption of + * an encoded Ieee1609Dot2Data structure as specified in 5.3.4.2. + * + * @note Critical information fields: + * - If present, recipients is a critical information field as defined in + * 5.2.6. An implementation that does not support the number of RecipientInfo + * in recipients when decrypted shall indicate that the encrypted SPDU could + * not be decrypted due to unsupported critical information fields. A + * compliant implementation shall support recipients fields containing at + * least eight entries. + * + * @note If the plaintext is raw data, i.e., it has not been output from a + * previous operation of the SDS, then it is trivial to encapsulate it in an + * Ieee1609Dot2Data of type unsecuredData as noted in 4.2.2.2.2. For example, + * '03 80 08 01 23 45 67 89 AB CD EF' is the C-OER encoding of '01 23 45 67 + * 89 AB CD EF' encapsulated in an Ieee1609Dot2Data of type unsecuredData. + * The first byte of the encoding 03 is the protocolVersion, the second byte + * 80 indicates the choice unsecuredData, and the third byte 08 is the length + * of the raw data '01 23 45 67 89 AB CD EF'. + */ +EncryptedData ::= SEQUENCE { + recipients SequenceOfRecipientInfo, + ciphertext SymmetricCiphertext +} + +/** + * @brief This data structure is used to transfer the data encryption key to + * an individual recipient of an EncryptedData. The option pskRecipInfo is + * selected if the EncryptedData was encrypted using the static encryption + * key approach specified in 5.3.4. The other options are selected if the + * EncryptedData was encrypted using the ephemeral encryption key approach + * specified in 5.3.4. The meanings of the choices are as follows: + * + * @param pskRecipInfo: The data was encrypted directly using a pre-shared + * symmetric key. + * + * @param symmRecipInfo: The data was encrypted with a data encryption key, + * and the data encryption key was encrypted using a symmetric key. + * + * @param certRecipInfo: The data was encrypted with a data encryption key, + * the data encryption key was encrypted using a public key encryption scheme, + * where the public encryption key was obtained from a certificate. In this + * case, the parameter P1 to ECIES as defined in 5.3.5 is the hash of the + * certificate, calculated with the whole-certificate hash algorithm, + * determined as described in 6.4.3, applied to the COER-encoded certificate, + * canonicalized as defined in the definition of Certificate. + * + * @note If the encryption algorithm is SM2, there is no equivalent of the + * parameter P1 and so no input to the encryption process that uses the hash + * of the certificate. + * + * @param signedDataRecipInfo: The data was encrypted with a data encryption + * key, the data encryption key was encrypted using a public key encryption + * scheme, where the public encryption key was obtained as the public response + * encryption key from a SignedData. In this case, if ECIES is the encryption + * algorithm, then the parameter P1 to ECIES as defined in 5.3.5 is the + * SHA-256 hash of the Ieee1609Dot2Data of type signedData containing the + * response encryption key, canonicalized as defined in the definition of + * Ieee1609Dot2Data. + * + * @note If the encryption algorithm is SM2, there is no equivalent of the + * parameter P1 and so no input to the encryption process that uses the hash + * of the Ieee1609Dot2Data. + * + * @param rekRecipInfo: The data was encrypted with a data encryption key, + * the data encryption key was encrypted using a public key encryption scheme, + * where the public encryption key was not obtained from a Signed-Data or a + * certificate. In this case, the SDEE specification is expected to specify + * how the public key is obtained, and if ECIES is the encryption algorithm, + * then the parameter P1 to ECIES as defined in 5.3.5 is the hash of the + * empty string. + * + * @note If the encryption algorithm is SM2, there is no equivalent of the + * parameter P1 and so no input to the encryption process that uses the hash + * of the empty string. + * + * See C.8 for guidance on when it may be appropriate to use each of these + * approaches. + * + * @note The material input to encryption is the bytes of the encryption key + * with no headers, encapsulation, or length indication. Contrast this to + * encryption of data, where the data is encapsulated in an Ieee1609Dot2Data. + */ +RecipientInfo ::= CHOICE { + pskRecipInfo PreSharedKeyRecipientInfo, + symmRecipInfo SymmRecipientInfo, + certRecipInfo PKRecipientInfo, + signedDataRecipInfo PKRecipientInfo, + rekRecipInfo PKRecipientInfo +} + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfRecipientInfo ::= SEQUENCE OF RecipientInfo + +/** + * @brief This data structure is used to indicate a symmetric key that may + * be used directly to decrypt a SymmetricCiphertext. It consists of the + * low-order 8 bytes of the hash of the COER encoding of a + * SymmetricEncryptionKey structure containing the symmetric key in question. + * The HashedId8 is calculated with the hash algorithm determined as + * specified in 5.3.9.3. The symmetric key may be established by any + * appropriate means agreed by the two parties to the exchange. + */ +PreSharedKeyRecipientInfo ::= HashedId8 + +/** + * @brief This data structure contains the following fields: + * + * @param recipientId: contains the hash of the symmetric key encryption key + * that may be used to decrypt the data encryption key. It consists of the + * low-order 8 bytes of the hash of the COER encoding of a + * SymmetricEncryptionKey structure containing the symmetric key in question. + * The HashedId8 is calculated with the hash algorithm determined as + * specified in 5.3.9.4. The symmetric key may be established by any + * appropriate means agreed by the two parties to the exchange. + * + * @param encKey: contains the encrypted data encryption key within a + * SymmetricCiphertext, where the data encryption key is input to the data + * encryption key encryption process with no headers, encapsulation, or + * length indication. + */ +SymmRecipientInfo ::= SEQUENCE { + recipientId HashedId8, + encKey SymmetricCiphertext +} + +/** + * @brief This data structure contains the following fields: + * + * @param recipientId: contains the hash of the container for the encryption + * public key as specified in the definition of RecipientInfo. Specifically, + * depending on the choice indicated by the containing RecipientInfo structure: + * - If the containing RecipientInfo structure indicates certRecipInfo, + * this field contains the HashedId8 of the certificate. The HashedId8 is + * calculated with the whole-certificate hash algorithm, determined as + * described in 6.4.3, applied to the COER-encoded certificate, canonicalized + * as defined in the definition of Certificate. + * - If the containing RecipientInfo structure indicates + * signedDataRecipInfo, this field contains the HashedId8 of the + * Ieee1609Dot2Data of type signedData that contained the encryption key, + * with that Ieee��1609�Dot2��Data canonicalized per 6.3.4. The HashedId8 is + * calculated with the hash algorithm determined as specified in 5.3.9.5. + * - If the containing RecipientInfo structure indicates rekRecipInfo, this + * field contains the HashedId8 of the COER encoding of a PublicEncryptionKey + * structure containing the response encryption key. The HashedId8 is + * calculated with the hash algorithm determined as specified in 5.3.9.5. + * + * @param encKey: contains the encrypted data encryption key, where the data + * encryption key is input to the data encryption key encryption process with + * no headers, encapsulation, or length indication. + */ +PKRecipientInfo ::= SEQUENCE { + recipientId HashedId8, + encKey EncryptedDataEncryptionKey +} + +/** + * @brief This data structure contains an encrypted data encryption key, + * where the data encryption key is input to the data encryption key + * encryption process with no headers, encapsulation, or length indication. + * + * Critical information fields: If present and applicable to + * the receiving SDEE, this is a critical information field as defined in + * 5.2.6. If an implementation receives an encrypted SPDU and determines that + * one or more RecipientInfo fields are relevant to it, and if all of those + * RecipientInfos contain an EncryptedDataEncryptionKey such that the + * implementation does not recognize the indicated CHOICE, the implementation + * shall indicate that the encrypted SPDU is not decryptable. + */ +EncryptedDataEncryptionKey ::= CHOICE { + eciesNistP256 EciesP256EncryptedKey, + eciesBrainpoolP256r1 EciesP256EncryptedKey, + ..., + ecencSm2256 EcencP256EncryptedKey +} + +/** + * @brief This data structure encapsulates a ciphertext generated with an + * approved symmetric algorithm. + * + * @note Critical information fields: If present, this is a critical + * information field as defined in 5.2.6. An implementation that does not + * recognize the indicated CHOICE value for this type in an encrypted SPDU + * shall indicate that the signed SPDU is invalid in the sense of 4.2.2.3.2, + * that is, it is invalid in the sense that its validity cannot be established. + */ +SymmetricCiphertext ::= CHOICE { + aes128ccm One28BitCcmCiphertext, + ..., + sm4Ccm One28BitCcmCiphertext +} + +/** + * @brief This data structure encapsulates an encrypted ciphertext for any + * symmetric algorithm with 128-bit blocks in CCM mode. The ciphertext is + * 16 bytes longer than the corresponding plaintext due to the inclusion of + * the message authentication code (MAC). The plaintext resulting from a + * correct decryption of the ciphertext is either a COER-encoded + * Ieee1609Dot2Data structure (see 6.3.41), or a 16-byte symmetric key + * (see 6.3.44). + * + * The ciphertext is 16 bytes longer than the corresponding plaintext. + * + * The plaintext resulting from a correct decryption of the + * ciphertext is a COER-encoded Ieee1609Dot2Data structure. + * + * @param nonce: contains the nonce N as specified in 5.3.8. + * + * @param ccmCiphertext: contains the ciphertext C as specified in 5.3.8. + * + * @note In the name of this structure, "One28" indicates that the + * symmetric cipher block size is 128 bits. It happens to also be the case + * that the keys used for both AES-128-CCM and SM4-CCM are also 128 bits long. + * This is, however, not what �One28� refers to. Since the cipher is used in + * counter mode, i.e., as a stream cipher, the fact that that block size is 128 + * bits affects only the size of the MAC and does not affect the size of the + * raw ciphertext. + */ +One28BitCcmCiphertext ::= SEQUENCE { + nonce OCTET STRING (SIZE (12)), + ccmCiphertext Opaque +} + +/** + * @brief This type is defined only for backwards compatibility. + */ +Aes128CcmCiphertext ::= One28BitCcmCiphertext + +--***************************************************************************-- +-- Certificates and other Security Management -- +--***************************************************************************-- + +/** + * @brief This structure is a profile of the structure CertificateBase, which + * specifies the valid combinations of fields to transmit implicit and + * explicit certificates. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2. The canonicalization + * applies to the CertificateBase. + */ +Certificate ::= + CertificateBase (ImplicitCertificate | ExplicitCertificate) + +TestCertificate ::= Certificate + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfCertificate ::= SEQUENCE OF Certificate + +/** + * @brief The fields in this structure have the following meaning: + * + * @param version: contains the version of the certificate format. In this + * version of the data structures, this field is set to 3. + * + * @param type: states whether the certificate is implicit or explicit. This + * field is set to explicit for explicit certificates and to implicit for + * implicit certificates. See ExplicitCertificate and ImplicitCertificate for + * more details. + * + * @param issuer: identifies the issuer of the certificate. + * + * @param toBeSigned: is the certificate contents. This field is an input to + * the hash when generating or verifying signatures for an explicit + * certificate, or generating or verifying the public key from the + * reconstruction value for an implicit certificate. The details of how this + * field are encoded are given in the description of the + * ToBeSignedCertificate type. + * + * @param signature: is included in an ExplicitCertificate. It is the + * signature, calculated by the signer identified in the issuer field, over + * the hash of toBeSigned. The hash is calculated as specified in 5.3.1, where: + * - Data input is the encoding of toBeSigned, canonicalized as described + * next. + * - Signer identifier input depends on the verification type, which in + * turn depends on the choice indicated by issuer. If the choice indicated by + * issuer is self, the verification type is self-signed and the signer + * identifier input is the empty string. If the choice indicated by issuer is + * not self, the verification type is certificate and the signer identifier + * input is the canonicalized COER encoding of the certificate indicated by + * issuer. The canonicalization is carried out as specified in the + * Canonicalization section of this subclause. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2. The canonicalization + * applies to the ToBeSignedCertificate and to the Signature. + * + * @note Whole-certificate hash: If the entirety of a certificate is hashed + * to calculate a HashedId3, HashedId8, or HashedId10, the algorithm used for + * this purpose is known as the whole-certificate hash. The method used to + * determine the whole-certificate hash algorithm is specified in 5.3.9.2. + */ +CertificateBase ::= SEQUENCE { + version Uint8(3), + type CertificateType, + issuer IssuerIdentifier, + toBeSigned ToBeSignedCertificate, + signature Signature OPTIONAL +} + +/** + * @brief This enumerated type indicates whether a certificate is explicit or + * implicit. + * + * @note Critical information fields: If present, this is a critical + * information field as defined in 5.2.5. An implementation that does not + * recognize the indicated CHOICE for this type when verifying a signed SPDU + * shall indicate that the signed SPDU is invalid in the sense of 4.2.2.3.2, + * that is, it is invalid in the sense that its validity cannot be + * established. + */ +CertificateType ::= ENUMERATED { + explicit, + implicit, + ... +} + +/** + * @brief This is a profile of the CertificateBase structure providing all + * the fields necessary for an implicit certificate, and no others. + */ +ImplicitCertificate ::= CertificateBase (WITH COMPONENTS {..., + type(implicit), + toBeSigned(WITH COMPONENTS {..., + verifyKeyIndicator(WITH COMPONENTS {reconstructionValue}) + }), + signature ABSENT +}) + +/** + * @brief This is a profile of the CertificateBase structure providing all + * the fields necessary for an explicit certificate, and no others. + */ +ExplicitCertificate ::= CertificateBase (WITH COMPONENTS {..., + type(explicit), + toBeSigned (WITH COMPONENTS {..., + verifyKeyIndicator(WITH COMPONENTS {verificationKey}) + }), + signature PRESENT +}) + +/** + * @brief This structure allows the recipient of a certificate to determine + * which keying material to use to authenticate the certificate. + * + * If the choice indicated is sha256AndDigest, sha384AndDigest, or + * sm3AndDigest: + * - The structure contains the HashedId8 of the issuing certificate. The + * HashedId8 is calculated with the whole-certificate hash algorithm, + * determined as described in 6.4.3, applied to the COER-encoded certificate, + * canonicalized as defined in the definition of Certificate. + * - The hash algorithm to be used to generate the hash of the certificate + * for verification is SHA-256 (in the case of sha256AndDigest), SM3 (in the + * case of sm3AndDigest) or SHA-384 (in the case of sha384AndDigest). + * - The certificate is to be verified with the public key of the + * indicated issuing certificate. + * + * If the choice indicated is self: + * - The structure indicates what hash algorithm is to be used to generate + * the hash of the certificate for verification. + * - The certificate is to be verified with the public key indicated by + * the verifyKeyIndicator field in theToBeSignedCertificate. + * + * @note Critical information fields: If present, this is a critical + * information field as defined in 5.2.5. An implementation that does not + * recognize the indicated CHOICE for this type when verifying a signed SPDU + * shall indicate that the signed SPDU is invalid in the sense of 4.2.2.3.2, + * that is, it is invalid in the sense that its validity cannot be + * established. + */ +IssuerIdentifier ::= CHOICE { + sha256AndDigest HashedId8, + self HashAlgorithm, + ..., + sha384AndDigest HashedId8, + sm3AndDigest HashedId8 +} + +/** + * @brief The fields in the ToBeSignedCertificate structure have the + * following meaning: + * + * In other words, for implicit certificates, the value H (CertU) in SEC 4, + * section 3, is for purposes of this standard taken to be H [H + * (canonicalized ToBeSignedCertificate from the subordinate certificate) || + * H (entirety of issuer Certificate)]. See 5.3.2 for further discussion, + * including material differences between this standard and SEC 4 regarding + * how the hash function output is converted from a bit string to an integer. + * + * @param id: contains information that is used to identify the certificate + * holder if necessary. + * + * @param cracaId: identifies the Certificate Revocation Authorization CA + * (CRACA) responsible for certificate revocation lists (CRLs) on which this + * certificate might appear. Use of the cracaId is specified in 5.1.3. The + * HashedId3 is calculated with the whole-certificate hash algorithm, + * determined as described in 6.4.3, applied to the COER-encoded certificate, + * canonicalized as defined in the definition of Certificate. + * + * @param crlSeries: represents the CRL series relevant to a particular + * Certificate Revocation Authorization CA (CRACA) on which the certificate + * might appear. Use of this field is specified in 5.1.3. + * + * @param validityPeriod: contains the validity period of the certificate. + * + * @param region: if present, indicates the validity region of the + * certificate. If it is omitted the validity region is determined as follows: + * - If the enclosing certificate is self-signed, i.e., the choice indicated + * by the issuer field in the enclosing certificate structure is self, the + * certificate is valid worldwide. + * - Otherwise, the certificate has the same validity region as the + * certificate that issued it. + * + * The above algorithm is applied recursively, i.e. if region is omitted from + * the issuing certificate, then the issuing certificate of that certificate is + * inspected to determine if region is present, and so on. A certificate, + * therefore, has global geographic validity as defined in 5.2.6.6.3.1 if + * region is not present in the certificate or in any certificate in its chain. + * Otherwise, i.e., if region is present in the certificate or in at least one + * certificate in its chain, the certificate has area validity as defined in + * 5.2.6.6.3.1. + * + * The use of the validity region to determine geographic consistency of an + * SPDU is specified in 5.2.6.6.3.1. The use of the validity region to + * determine geographic consistency of a subordinate certificate with an + * issuing certificate is specified in 5.1.2.4. + * + * @param assuranceLevel: indicates the assurance level of the certificate + * holder. + * + * @param appPermissions: indicates the permissions that the certificate + * holder has to sign application data with this certificate. A valid + * instance of appPermissions contains any particular Psid value in at most + * one entry. + * + * @param certIssuePermissions: indicates the permissions that the certificate + * holder has to sign certificates with this certificate. A valid instance of + * this array contains no more than one entry whose psidSspRange field + * indicates all. If the array has multiple entries and one entry has its + * psidSspRange field indicate all, then the entry indicating all specifies + * the permissions for all PSIDs other than the ones explicitly specified in + * the other entries. See the description of PsidGroupPermissions for further + * discussion. + * + * @param certRequestPermissions: indicates the permissions that the + * certificate holder can request in its certificate. A valid instance of this + * array contains no more than one entry whose psidSspRange field indicates + * all. If the array has multiple entries and one entry has its psidSspRange + * field indicate all, then the entry indicating all specifies the permissions + * for all PSIDs other than the ones explicitly specified in the other entries. + * See the description of PsidGroupPermissions for further discussion. + * + * @param canRequestRollover: indicates that the certificate may be used to + * sign a request for another certificate with the same permissions. This + * field is provided for future use and its use is not defined in this + * version of this standard. + * + * @param encryptionKey: contains a public key for encryption for which the + * certificate holder holds the corresponding private key. + * + * @param verifyKeyIndicator: contains material that may be used to recover + * the public key that may be used to verify data signed by this certificate. + * + * @param flags: indicates additional yes/no properties of the certificate + * holder. The only bit with defined semantics in this string in this version + * of this standard is usesCubk. If set, the usesCubk bit indicates that the + * certificate holder supports the compact unified butterfly key response. + * Further material about the compact unified butterfly key response can be + * found in IEEE Std 1609.2.1. + * + * If this field is present, at least one of the bits in the field shall be + * non-zero. + * + * @note usesCubk is only relevant for CA certificates, and the only + * functionality defined associated with this field is associated with + * consistency checks on received certificate responses. No functionality + * associated with communications between peer SDEEs is defined associated + * with this field. + * + * @param appExtensions: indicates additional permissions that may be applied + * to application activities that the certificate holder is carrying out. + * + * @param certIssueExtensions: indicates additional permissions to issue + * certificates containing appExtensions. + * + * @param certRequestExtensions: indicates additional permissions to request + * certificates containing endEntityExtensions. + * + * @note In IEEE Std 1609.2-2022 these were not marked optional; they are in + * this version of the standard; this is technically not backwards compatible + * but in practice there are no scenarios in which a legacy system will break + * (because it would have to be the case that Issue or Request was included and + * App wasn't, but no issue or request extension values are currently defined). + * + * @note Issue and Request extensions are specified in this version of this + * standard for future use but do not currently have any values defined. The + * only certificate extension defined is OperatingOrganizationId and that can + * be issued by any CA. It can be taken as likely that future appExtensions + * will also be issuable by any CA, as otherwise consistency rules will differ + * between appExtensions, and so in practice these certIssueExtensions and + * certRequestExtensions fields will never be use. See Annex G for discussion + * of how the standard could in principle be extended to include extensions + * that do have a need to be validated up the chain. + * + * @note Calculating the hash of a certificate: + * For both implicit and explicit certificates, when the certificate + * is hashed to create or recover the public key (in the case of an implicit + * certificate) or to generate or verify the signature (in the case of an + * explicit certificate), the hash is Hash (Data input) || Hash ( + * Signer identifier input), where: + * - Data input is the COER encoding of toBeSigned, canonicalized + * as described above. + * - Signer identifier input depends on the verification type, + * which in turn depends on the choice indicated by issuer. If the choice + * indicated by issuer is self, the verification type is self-signed and the + * signer identifier input is the empty string. If the choice indicated by + * issuer is not self, the verification type is certificate and the signer + * identifier input is the COER encoding of the canonicalization per 6.4.3 of + * the certificate indicated by issuer. + * + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2. The canonicalization + * applies to the PublicEncryptionKey and to the VerificationKeyIndicator. + * + * If the PublicEncryptionKey contains a BasePublicEncryptionKey that is an + * elliptic curve point (i.e., of type EccP256CurvePoint or EccP384CurvePoint), + * then the elliptic curve point is encoded in compressed form, i.e., such + * that the choice indicated within the Ecc*CurvePoint is compressed-y-0 or + * compressed-y-1. + * + * @note Critical information fields: + * - If present, appPermissions is a critical information field as defined + * in 5.2.6. If an implementation of verification does not support the number + * of PsidSsp in the appPermissions field of a certificate that signed a + * signed SPDU, that implementation shall indicate that the signed SPDU is + * invalid in the sense of 4.2.2.3.2, that is, it is invalid in the sense + * that its validity cannot be established.. A conformant implementation + * shall support appPermissions fields containing at least eight entries. + * It may be the case that an implementation of verification does not support + * the number of entries in the appPermissions field and the appPermissions + * field is not relevant to the verification: this will occur, for example, + * if the certificate in question is a CA certificate and so the + * certIssuePermissions field is relevant to the verification and the + * appPermissions field is not. In this case, whether the implementation + * indicates that the signed SPDU is valid (because it could validate all + * relevant fields) or invalid (because it could not parse the entire + * certificate) is implementation-specific. + * - If present, certIssuePermissions is a critical information field as + * defined in 5.2.6. If an implementation of verification does not support + * the number of PsidGroupPermissions in the certIssuePermissions field of a + * CA certificate in the chain of a signed SPDU, the implementation shall + * indicate that the signed SPDU is invalid in the sense of 4.2.2.3.2, that + * is, it is invalid in the sense that its validity cannot be established. + * A conformant implementation shall support certIssuePermissions fields + * containing at least eight entries. + * It may be the case that an implementation of verification does not support + * the number of entries in the certIssuePermissions field and the + * certIssuePermissions field is not relevant to the verification: this will + * occur, for example, if the certificate in question is the signing + * certificate for the SPDU and so the appPermissions field is relevant to + * the verification and the certIssuePermissions field is not. In this case, + * whether the implementation indicates that the signed SPDU is valid + * (because it could validate all relevant fields) or invalid (because it + * could not parse the entire certificate) is implementation-specific. + * - If present, certRequestPermissions is a critical information field as + * defined in 5.2.6. If an implementaiton of verification of a certificate + * request does not support the number of PsidGroupPermissions in + * certRequestPermissions, the implementation shall indicate that the signed + * SPDU is invalid in the sense of 4.2.2.3.2, that is, it is invalid in the + * sense that its validity cannot be established. A conformant implementation + * shall support certRequestPermissions fields containing at least eight + * entries. + * It may be the case that an implementation of verification does not support + * the number of entries in the certRequestPermissions field and the + * certRequestPermissions field is not relevant to the verification: this will + * occur, for example, if the certificate in question is the signing + * certificate for the SPDU and so the appPermissions field is relevant to + * the verification and the certRequestPermissions field is not. In this + * case, whether the implementation indicates that the signed SPDU is valid + * (because it could validate all relevant fields) or invalid (because it + * could not parse the entire certificate) is implementation-specific. + */ +ToBeSignedCertificate ::= SEQUENCE { + id CertificateId, + cracaId HashedId3, + crlSeries CrlSeries, + validityPeriod ValidityPeriod, + region GeographicRegion OPTIONAL, + assuranceLevel SubjectAssurance OPTIONAL, + appPermissions SequenceOfPsidSsp OPTIONAL, + certIssuePermissions SequenceOfPsidGroupPermissions OPTIONAL, + certRequestPermissions SequenceOfPsidGroupPermissions OPTIONAL, + canRequestRollover NULL OPTIONAL, + encryptionKey PublicEncryptionKey OPTIONAL, + verifyKeyIndicator VerificationKeyIndicator, + ..., + flags BIT STRING {usesCubk (0)} (SIZE (8)) OPTIONAL, + appExtensions SequenceOfAppExtensions OPTIONAL, + certIssueExtensions SequenceOfCertIssueExtensions OPTIONAL, + certRequestExtension SequenceOfCertRequestExtensions OPTIONAL +} +(WITH COMPONENTS { ..., appPermissions PRESENT} | + WITH COMPONENTS { ..., certIssuePermissions PRESENT} | + WITH COMPONENTS { ..., certRequestPermissions PRESENT}) + +/** + * @brief This structure contains information that is used to identify the + * certificate holder if necessary. + * + * @param linkageData: is used to identify the certificate for revocation + * purposes in the case of certificates that appear on linked certificate + * CRLs. See 5.1.3 and 7.3 for further discussion. + * + * @param name: is used to identify the certificate holder in the case of + * non-anonymous certificates. The contents of this field are a matter of + * policy and are expected to be human-readable. + * + * @param binaryId: supports identifiers that are not human-readable. + * + * @param none: indicates that the certificate does not include an identifier. + * + * @note Critical information fields: + * - If present, this is a critical information field as defined in 5.2.6. + * An implementation that does not recognize the choice indicated in this + * field shall reject a signed SPDU as invalid. + */ +CertificateId ::= CHOICE { + linkageData LinkageData, + name Hostname, + binaryId OCTET STRING(SIZE(1..64)), + none NULL, + ... +} + +/** + * @brief This structure contains information that is matched against + * information obtained from a linkage ID-based CRL to determine whether the + * containing certificate has been revoked. See 5.1.3.4 and 7.3 for details + * of use. + */ +LinkageData ::= SEQUENCE { + iCert IValue, + linkage-value LinkageValue, + group-linkage-value GroupLinkageValue OPTIONAL +} + +/** + * @brief This type indicates which type of permissions may appear in + * end-entity certificates the chain of whose permissions passes through the + * PsidGroupPermissions field containing this value. If app is indicated, the + * end-entity certificate may contain an appPermissions field. If enroll is + * indicated, the end-entity certificate may contain a certRequestPermissions + * field. + */ +EndEntityType ::= + BIT STRING {app (0), enrol (1) } (SIZE (8)) (ALL EXCEPT {}) + +/** + * @brief This structure states the permissions that a certificate holder has + * with respect to issuing and requesting certificates for a particular set + * of PSIDs. For examples, see D.5.3 and D.5.4. + * + * @param subjectPermissions: indicates PSIDs and SSP Ranges covered by this + * field. + * + * @param minChainLength: and chainLengthRange indicate how long the + * certificate chain from this certificate to the end-entity certificate is + * permitted to be. As specified in 5.1.2.1, the length of the certificate + * chain is the number of certificates "below" this certificate in the chain, + * down to and including the end-entity certificate. The length is permitted + * to be (a) greater than or equal to minChainLength certificates and (b) + * less than or equal to minChainLength + chainLengthRange certificates. A + * value of 0 for minChainLength is not permitted when this type appears in + * the certIssuePermissions field of a ToBeSignedCertificate; a certificate + * that has a value of 0 for this field is invalid. The value -1 for + * chainLengthRange is a special case: if the value of chainLengthRange is -1 + * it indicates that the certificate chain may be any length equal to or + * greater than minChainLength. See the examples below for further discussion. + * + * @param eeType: takes one or more of the values app and enroll and indicates + * the type of certificates or requests that this instance of + * PsidGroupPermissions in the certificate is entitled to authorize. + * Different instances of PsidGroupPermissions within a ToBeSignedCertificate + * may have different values for eeType. + * - If this field indicates app, the chain is allowed to end in an + * authorization certificate, i.e., a certificate in which these permissions + * appear in an appPermissions field (in other words, if the field does not + * indicate app and the chain ends in an authorization certificate, the + * chain shall be considered invalid). + * - If this field indicates enroll, the chain is allowed to end in an + * enrollment certificate, i.e., a certificate in which these permissions + * appear in a certRequestPermissions permissions field (in other words, if the + * field does not indicate enroll and the chain ends in an enrollment + * certificate, the chain shall be considered invalid). + */ +PsidGroupPermissions ::= SEQUENCE { + subjectPermissions SubjectPermissions, + minChainLength INTEGER DEFAULT 1, + chainLengthRange INTEGER DEFAULT 0, + eeType EndEntityType DEFAULT {app} +} + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfPsidGroupPermissions ::= SEQUENCE OF PsidGroupPermissions + +/** + * @brief This indicates the PSIDs and associated SSPs for which certificate + * issuance or request permissions are granted by a PsidGroupPermissions + * structure. If this takes the value explicit, the enclosing + * PsidGroupPermissions structure grants certificate issuance or request + * permissions for the indicated PSIDs and SSP Ranges. If this takes the + * value all, the enclosing PsidGroupPermissions structure grants certificate + * issuance or request permissions for all PSIDs not indicated by other + * PsidGroupPermissions in the same certIssuePermissions or + * certRequestPermissions field. + * + * @note Critical information fields: + * - If present, this is a critical information field as defined in 5.2.6. + * An implementation that does not recognize the indicated CHOICE when + * verifying a signed SPDU shall indicate that the signed SPDU is + * invalidin the sense of 4.2.2.3.2, that is, it is invalid in the sense that + * its validity cannot be established. + * - If present, explicit is a critical information field as defined in + * 5.2.6. An implementation that does not support the number of PsidSspRange + * in explicit when verifying a signed SPDU shall indicate that the signed + * SPDU is invalid in the sense of 4.2.2.3.2, that is, it is invalid in the + * sense that its validity cannot be established. A conformant implementation + * shall support explicit fields containing at least eight entries. + */ +SubjectPermissions ::= CHOICE { + explicit SequenceOfPsidSspRange, + all NULL, + ... +} + +/** + * @brief The contents of this field depend on whether the certificate is an + * implicit or an explicit certificate. + * + * @param verificationKey: is included in explicit certificates. It contains + * the public key to be used to verify signatures generated by the holder of + * the Certificate. + * + * @param reconstructionValue: is included in implicit certificates. It + * contains the reconstruction value, which is used to recover the public key + * as specified in SEC 4 and 5.3.2. + * + * @note Critical information fields: If present, this is a critical + * information field as defined in 5.2.5. An implementation that does not + * recognize the indicated CHOICE for this type when verifying a signed SPDU + * shall indicate that the signed SPDU is invalid indicate that the signed + * SPDU is invalid in the sense of 4.2.2.3.2, that is, it is invalid in the + * sense that its validity cannot be established. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2. The canonicalization + * applies to the PublicVerificationKey and to the EccP256CurvePoint. The + * EccP256CurvePoint is encoded in compressed form, i.e., such that the + * choice indicated within the EccP256CurvePoint is compressed-y-0 or + * compressed-y-1. + */ +VerificationKeyIndicator ::= CHOICE { + verificationKey PublicVerificationKey, + reconstructionValue EccP256CurvePoint, + ... +} + +/** + * @brief This structure uses the parameterized type Extension to define an + * Ieee1609ContributedHeaderInfoExtension as an open Extension Content field + * identified by an extension identifier. The extension identifier value is + * unique to extensions defined by ETSI and need not be unique among all + * extension identifier values defined by all contributing organizations. + */ +Ieee1609ContributedHeaderInfoExtension ::= Extension + +/** + * @brief This is an integer used to identify an + * Ieee1609ContributedHeaderInfoExtension. + */ +Ieee1609HeaderInfoExtensionId ::= ExtId + +p2pcd8ByteLearningRequestId Ieee1609HeaderInfoExtensionId ::= 1 + +/** + * @brief This structure contains any AppExtensions that apply to the + * certificate holder. As specified in 5.2.4.2.3, each individual + * AppExtension type is associated with consistency conditions, specific to + * that extension, that govern its consistency with SPDUs signed by the + * certificate holder and with the CertIssueExtensions in the CA certificates + * in that certificate holder�s chain. Those consistency conditions are + * specified for each individual AppExtension below. + */ +SequenceOfAppExtensions ::= SEQUENCE (SIZE(1..MAX)) OF AppExtension + +/** + * @brief This structure contains an individual AppExtension. AppExtensions + * specified in this standard are drawn from the ASN.1 Information Object Set + * SetCertExtensions. This set, and its use in the AppExtension type, is + * structured so that each AppExtension is associated with a + * CertIssueExtension and a CertRequestExtension and all are identified by + * the same id value. In this structure: + * + * @param id: identifies the extension type. + * + * @param content: provides the content of the extension. + */ +AppExtension ::= SEQUENCE { + id ExtId, + content OCTET STRING +} + +/** + * @brief This field contains any CertIssueExtensions that apply to the + * certificate holder. As specified in 5.2.4.2.3, each individual + * CertIssueExtension type is associated with consistency conditions, + * specific to that extension, that govern its consistency with + * AppExtensions in certificates issued by the certificate holder and with + * the CertIssueExtensions in the CA certificates in that certificate + * holder�s chain. Those consistency conditions are specified for each + * individual CertIssueExtension below. + */ +SequenceOfCertIssueExtensions ::= + SEQUENCE (SIZE(1..MAX)) OF CertIssueExtension + +/** + * @brief This field contains an individual CertIssueExtension. + * CertIssueExtensions specified in this standard are drawn from the ASN.1 + * Information Object Set SetCertExtensions. This set, and its use in the + * CertIssueExtension type, is structured so that each CertIssueExtension + * is associated with a AppExtension and a CertRequestExtension and all are + * identified by the same id value. In this structure: + * + * @param id: identifies the extension type. + * + * @param permissions: indicates the permissions. Within this field. + * - all indicates that the certificate is entitled to issue all values of + * the extension. + * - specific is used to specify which values of the extension may be + * issued in the case where all does not apply. + */ +CertIssueExtension ::= SEQUENCE { + id ExtId, + permissions CHOICE { + specific OCTET STRING, + all NULL + } +} + +/** + * @brief This field contains any CertRequestExtensions that apply to the + * certificate holder. As specified in 5.2.4.2.3, each individual + * CertRequestExtension type is associated with consistency conditions, + * specific to that extension, that govern its consistency with + * AppExtensions in certificates issued by the certificate holder and with + * the CertRequestExtensions in the CA certificates in that certificate + * holder�s chain. Those consistency conditions are specified for each + * individual CertRequestExtension below. + */ +SequenceOfCertRequestExtensions ::= SEQUENCE (SIZE(1..MAX)) OF CertRequestExtension + +/** + * @brief This field contains an individual CertRequestExtension. + * CertRequestExtensions specified in this standard are drawn from the + * ASN.1 Information Object Set SetCertExtensions. This set, and its use in + * the CertRequestExtension type, is structured so that each + * CertRequestExtension is associated with a AppExtension and a + * CertRequestExtension and all are identified by the same id value. In this + * structure: + * + * @param id: identifies the extension type. + * + * @param permissions: indicates the permissions. Within this field. + * - all indicates that the certificate is entitled to issue all values of + * the extension. + * - specific is used to specify which values of the extension may be + * issued in the case where all does not apply. + */ +CertRequestExtension ::= SEQUENCE { + id ExtId, + permissions CHOICE { + content OCTET STRING, + all NULL + } +} + +/** + * @brief This type is the AppExtension used to identify an operating + * organization. See 5.2.6.6.7.2 for discussion of how the + * OperatingOrganizationId can be integrated into the SPDU payload by an + * SDEE specifier. + * + * A certificate may have an OperatingOrganizationId associated with it even if + * the certificate does not contain an OperatingOrganizationId field. If the + * certificate does not contain an OperatingOrganizationId field, the + * associated OperatingOrganizationId is determined as follows: + * + * - If the certificate is self-signed, that is, the choice indicated by the + * issuer field in the enclosing certificate structure is self, the + * certificate has no OperatingOrganizationId associated with it. + * + * - Otherwise, the certificate has the same OperatingOrganizationId as + * the certificate that issued it. + * + * The above algorithm is applied recursively, i.e. if + * OperatingOrganizationId is omitted from the issuing certificate, then + * the issuing certificate of that certificate is inspected to determine if + * OperatingOrganizationId is present, and so on. + * + * Consistency with SPDU payload. As discussed in 5.2.6.6.7.2, the SPDU payload + * design might or might not include OperatingOrganizationId material. + * + * If OperatingOrganizationId material appears in the SPDU payload, then the + * SDEE specification is expected to state that consistency is required between + * the payload and the certificate (although, as discussed in 5.2.6.6.7.2, this + * approach is not recommended). + * + * If consistency is required between the OperatingOrganizationID + * and operating organization information represented by an OBJECT + * IDENTIFIER in the SPDU payload, then the SDEE specification for that SPDU is + * required to specify how the SPDU can be used to determine an OBJECT + * IDENTIFIER of the same length as the OperatingOrganizationId in the + * certificate (e.g., by including the full OBJECT IDENTIFIER in the SPDU, or + * by including a RELATIVE-OID with clear instructions about how a full OBJECT + * IDENTIFIER can be obtained from the RELATIVE-OID, or by truncating an + * OBJECT IDENTIFIER from the message to be the same length as the OBJECT + * IDENTIFIER in the certificate). The SPDU is then consistent with this type + * if the OBJECT IDENTIFIER determined from the SPDU is identical to the OBJECT + * IDENTIFIER contained in this field. + * + * Consistency with issuing certificate. This AppExtension does not have + * consistency conditions with a corresponding CertIssueExtension. It can + * appear in a certificate issued by any CA. + * + * Consistency with certificate request signing certificate. This AppExtension + * does not have consistency conditions with a corresponding + * CertRequestExtension. It can appear in a certificate request signed by any + * certificate containing certRequestPermissions, i.e. by any enrollment + * certificate. + */ +OperatingOrganizationId ::= OBJECT IDENTIFIER + +certExtId-OperatingOrganization ExtId ::= 1 + +END +""" diff --git a/src/flexstack/security/asn1/ieee1609dot2/Ieee1609Dot2BaseTypes.py b/src/flexstack/security/asn1/ieee1609dot2/Ieee1609Dot2BaseTypes.py new file mode 100644 index 0000000..edad9ff --- /dev/null +++ b/src/flexstack/security/asn1/ieee1609dot2/Ieee1609Dot2BaseTypes.py @@ -0,0 +1,1399 @@ +# pylint: skip-file +IEEE_1609_DOT_2_BASE_TYPES_ASN1_DESCRIPTIONS = """ +--***************************************************************************-- +-- IEEE Std 1609.2: Base Data Types -- +--***************************************************************************-- + +/** + * @note Section references in this file are to clauses in IEEE Std + * 1609.2 unless indicated otherwise. Full forms of acronyms and + * abbreviations used in this file are specified in 3.2. + */ + +Ieee1609Dot2BaseTypes {iso(1) identified-organization(3) ieee(111) + standards-association-numbered-series-standards(2) wave-stds(1609) dot2(2) + base(1) base-types(2) major-version-2(2) minor-version-5(5)} + +DEFINITIONS AUTOMATIC TAGS ::= BEGIN + +--***************************************************************************-- +-- Integer Types -- +--***************************************************************************-- + +/** + * @brief This atomic type is used in the definition of other data structures. + * It is for non-negative integers up to 7, i.e., (hex)07. + */ +Uint3 ::= INTEGER (0..7) + +/** + * @brief This atomic type is used in the definition of other data structures. + * It is for non-negative integers up to 255, i.e., (hex)ff. + */ +Uint8 ::= INTEGER (0..255) + +/** + * @brief This atomic type is used in the definition of other data structures. + * It is for non-negative integers up to 65,535, i.e., (hex)ff ff. + */ +Uint16 ::= INTEGER (0..65535) + +/** + * @brief This atomic type is used in the definition of other data structures. + * It is for non-negative integers up to 4,294,967,295, i.e., + * (hex)ff ff ff ff. + */ +Uint32 ::= INTEGER (0..4294967295) + +/** + * @brief This atomic type is used in the definition of other data structures. + * It is for non-negative integers up to 18,446,744,073,709,551,615, i.e., + * (hex)ff ff ff ff ff ff ff ff. + */ +Uint64 ::= INTEGER (0..18446744073709551615) + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfUint8 ::= SEQUENCE OF Uint8 + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfUint16 ::= SEQUENCE OF Uint16 + + +--***************************************************************************-- +-- OCTET STRING Types -- +--***************************************************************************-- + +/** + * @brief This is a synonym for ASN.1 OCTET STRING, and is used in the + * definition of other data structures. + */ +Opaque ::= OCTET STRING + +/** + * @brief This type contains the truncated hash of another data structure. + * The HashedId3 for a given data structure is calculated by calculating the + * hash of the encoded data structure and taking the low-order three bytes of + * the hash output. The low-order three bytes are the last three bytes of the + * 32-byte hash when represented in network byte order. If the data structure + * is subject to canonicalization it is canonicalized before hashing. See + * Example below. + * + * The hash algorithm to be used to calculate a HashedId3 within a + * structure depends on the context. In this standard, for each structure + * that includes a HashedId3 field, the corresponding text indicates how the + * hash algorithm is determined. See also the discussion in 5.3.9. + * + * Example: Consider the SHA-256 hash of the empty string: + * + * SHA-256("") = + * e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + * + * The HashedId3 derived from this hash corresponds to the following: + * + * HashedId3 = 52b855. + */ +HashedId3 ::= OCTET STRING (SIZE(3)) + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfHashedId3 ::= SEQUENCE OF HashedId3 + +/** + * @brief This type contains the truncated hash of another data structure. + * The HashedId8 for a given data structure is calculated by calculating the + * hash of the encoded data structure and taking the low-order eight bytes of + * the hash output. The low-order eight bytes are the last eight bytes of the + * hash when represented in network byte order. If the data structure + * is subject to canonicalization it is canonicalized before hashing. See + * Example below. + * + * The hash algorithm to be used to calculate a HashedId8 within a + * structure depends on the context. In this standard, for each structure + * that includes a HashedId8 field, the corresponding text indicates how the + * hash algorithm is determined. See also the discussion in 5.3.9. + * + * Example: Consider the SHA-256 hash of the empty string: + * + * SHA-256("") = + * e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + * + * The HashedId8 derived from this hash corresponds to the following: + * + * HashedId8 = a495991b7852b855. + */ +HashedId8 ::= OCTET STRING (SIZE(8)) + +/** + * @brief This type contains the truncated hash of another data structure. + * The HashedId10 for a given data structure is calculated by calculating the + * hash of the encoded data structure and taking the low-order ten bytes of + * the hash output. The low-order ten bytes are the last ten bytes of the + * hash when represented in network byte order. If the data structure + * is subject to canonicalization it is canonicalized before hashing. See + * Example below. + * + * The hash algorithm to be used to calculate a HashedId10 within a + * structure depends on the context. In this standard, for each structure + * that includes a HashedId10 field, the corresponding text indicates how the + * hash algorithm is determined. See also the discussion in 5.3.9. + * + * Example: Consider the SHA-256 hash of the empty string: + * + * SHA-256("") = + * e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + * + * The HashedId10 derived from this hash corresponds to the following: + * + * HashedId10 = 934ca495991b7852b855. + */ +HashedId10 ::= OCTET STRING (SIZE(10)) + +/** + * @brief This data structure contains the truncated hash of another data + * structure. The HashedId32 for a given data structure is calculated by + * calculating the hash of the encoded data structure and taking the + * low-order 32 bytes of the hash output. The low-order 32 bytes are the last + * 32 bytes of the hash when represented in network byte order. If the data + * structure is subject to canonicalization it is canonicalized before + * hashing. See Example below. + * + * The hash algorithm to be used to calculate a HashedId32 within a + * structure depends on the context. In this standard, for each structure + * that includes a HashedId32 field, the corresponding text indicates how the + * hash algorithm is determined. See also the discussion in 5.3.9. + * + * Example: Consider the SHA-256 hash of the empty string: + * + * SHA-256("") = + * e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + * + * The HashedId32 derived from this hash corresponds to the following: + * + * HashedId32 = e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b8 + * 55. + */ +HashedId32 ::= OCTET STRING (SIZE(32)) + +/** + * @brief This data structure contains the truncated hash of another data + * structure. The HashedId48 for a given data structure is calculated by + * calculating the hash of the encoded data structure and taking the + * low-order 48 bytes of the hash output. The low-order 48 bytes are the last + * 48 bytes of the hash when represented in network byte order. If the data + * structure is subject to canonicalization it is canonicalized before + * hashing. See Example below. + * + * The hash algorithm to be used to calculate a HashedId48 within a + * structure depends on the context. In this standard, for each structure + * that includes a HashedId48 field, the corresponding text indicates how the + * hash algorithm is determined. See also the discussion in 5.3.9. + * + * Example: Consider the SHA-384 hash of the empty string: + * + * SHA-384("") = 38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6 + * e1da274edebfe76f65fbd51ad2f14898b95b + * + * The HashedId48 derived from this hash corresponds to the following: + * + * HashedId48 = 38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e + * 1da274edebfe76f65fbd51ad2f14898b95b. + */ +HashedId48 ::= OCTET STRING(SIZE(48)) + + +--***************************************************************************-- +-- Time Structures -- +--***************************************************************************-- + +/** + * @brief This type gives the number of (TAI) seconds since 00:00:00 UTC, 1 + * January 2004. + */ +Time32 ::= Uint32 + +/** + * @brief This data structure is a 64-bit integer giving an estimate of the + * number of (TAI) microseconds since 00:00:00 UTC, 1 January 2004. + */ +Time64 ::= Uint64 + +/** + * @brief This type gives the validity period of a certificate. The start of + * the validity period is given by start and the end is given by + * start + duration. + */ +ValidityPeriod ::= SEQUENCE { + start Time32, + duration Duration +} + +/** + * @brief This structure represents the duration of validity of a + * certificate. The Uint16 value is the duration, given in the units denoted + * by the indicated choice. A year is considered to be 31556952 seconds, + * which is the average number of seconds in a year. + * + * @note Years can be mapped more closely to wall-clock days using the hours + * choice for up to 7 years and the sixtyHours choice for up to 448 years. + */ +Duration ::= CHOICE { + microseconds Uint16, + milliseconds Uint16, + seconds Uint16, + minutes Uint16, + hours Uint16, + sixtyHours Uint16, + years Uint16 +} + + +--***************************************************************************-- +-- Location Structures -- +--***************************************************************************-- + +/** + * @brief This structure represents a geographic region of a specified form. + * A certificate is not valid if any part of the region indicated in its + * scope field lies outside the region indicated in the scope of its issuer. + * + * @param circularRegion: contains a single instance of the CircularRegion + * structure. + * + * @param rectangularRegion: is an array of RectangularRegion structures + * containing at least one entry. This field is interpreted as a series of + * rectangles, which may overlap or be disjoint. The permitted region is any + * point within any of the rectangles. + * + * @param polygonalRegion: contains a single instance of the PolygonalRegion + * structure. + * + * @param identifiedRegion: is an array of IdentifiedRegion structures + * containing at least one entry. The permitted region is any point within + * any of the identified regions. + * + * @note Critical information fields: + * - If present, this is a critical information field as defined in 5.2.6. + * An implementation that does not recognize the indicated CHOICE when + * verifying a signed SPDU shall indicate that the signed SPDU is invalid in + * the sense of 4.2.2.3.2, that is, it is invalid in the sense that its + * validity cannot be established. + * - If selected, rectangularRegion is a critical information field as + * defined in 5.2.6. An implementation that does not support the number of + * RectangularRegion in rectangularRegions when verifying a signed SPDU shall + * indicate that the signed SPDU is invalid in the sense of 4.2.2.3.2, that + * is, it is invalid in the sense that its validity cannot be established. + * A conformant implementation shall support rectangularRegions fields + * containing at least eight entries. + * - If selected, identifiedRegion is a critical information field as + * defined in 5.2.6. An implementation that does not support the number of + * IdentifiedRegion in identifiedRegion shall reject the signed SPDU as + * invalid in the sense of 4.2.2.3.2, that is, it is invalid in the sense + * that its validity cannot be established. A conformant implementation shall + * support identifiedRegion fields containing at least eight entries. + */ +GeographicRegion ::= CHOICE { + circularRegion CircularRegion, + rectangularRegion SequenceOfRectangularRegion, + polygonalRegion PolygonalRegion, + identifiedRegion SequenceOfIdentifiedRegion, + ... +} + +/** + * @brief This structure specifies a circle with its center at center, its + * radius given in meters, and located tangential to the reference ellipsoid. + * The indicated region is all the points on the surface of the reference + * ellipsoid whose distance to the center point over the reference ellipsoid + * is less than or equal to the radius. A point which contains an elevation + * component is considered to be within the circular region if its horizontal + * projection onto the reference ellipsoid lies within the region. + */ +CircularRegion ::= SEQUENCE { + center TwoDLocation, + radius Uint16 +} + +/** + * @brief This structure specifies a �rectangle� on the surface of the WGS84 ellipsoid where the + * sides are given by lines of constant latitude or longitude. + * A point which contains an elevation component is considered to be within the rectangular region + * if its horizontal projection onto the reference ellipsoid lies within the region. + * A RectangularRegion is invalid if the northWest value is south of the southEast value, or if the + * latitude values in the two points are equal, or if the longitude values in the two points are + * equal; otherwise it is valid. A certificate that contains an invalid RectangularRegion is invalid. + * + * @param northWest: is the north-west corner of the rectangle. + * + * @param southEast is the south-east corner of the rectangle. + */ +RectangularRegion ::= SEQUENCE { + northWest TwoDLocation, + southEast TwoDLocation +} + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfRectangularRegion ::= SEQUENCE OF RectangularRegion + +/** + * @brief This structure defines a region using a series of distinct + * geographic points, defined on the surface of the reference ellipsoid. The + * region is specified by connecting the points in the order they appear, + * with each pair of points connected by the geodesic on the reference + * ellipsoid. The polygon is completed by connecting the final point to the + * first point. The allowed region is the interior of the polygon and its + * boundary. + * + * A point which contains an elevation component is considered to be + * within the polygonal region if its horizontal projection onto the + * reference ellipsoid lies within the region. + * + * A valid PolygonalRegion contains at least three points. In a valid + * PolygonalRegion, the implied lines that make up the sides of the polygon + * do not intersect. + * + * @note This type does not support enclaves / exclaves. This might be + * addressed in a future version of this standard. + * + * @note Critical information fields: If present, this is a critical + * information field as defined in 5.2.6. An implementation that does not + * support the number of TwoDLocation in the PolygonalRegion when verifying a + * signed SPDU shall indicate that the signed SPDU is invalid. A compliant + * implementation shall support PolygonalRegions containing at least eight + * TwoDLocation entries. + */ +PolygonalRegion ::= SEQUENCE SIZE (3..MAX) OF TwoDLocation + +/** + * @brief This structure is used to define validity regions for use in + * certificates. The latitude and longitude fields contain the latitude and + * longitude as defined above. + * + * @note This data structure is consistent with the location encoding + * used in SAE J2735, except that values 900 000 001 for latitude (used to + * indicate that the latitude was not available) and 1 800 000 001 for + * longitude (used to indicate that the longitude was not available) are not + * valid. + */ +TwoDLocation ::= SEQUENCE { + latitude Latitude, + longitude Longitude +} + +/** + * @brief This structure indicates the region of validity of a certificate + * using region identifiers. + * A conformant implementation that supports this type shall support at least + * one of the possible CHOICE values. The Protocol Implementation Conformance + * Statement (PICS) provided in Annex A allows an implementation to state + * which CountryOnly values it recognizes. + * + * @param countryOnly: indicates that only a country (or a geographic entity + * included in a country list) is given. + * + * @param countryAndRegions: indicates that one or more top-level regions + * within a country (as defined by the region listing associated with that + * country) is given. + * + * @param countryAndSubregions: indicates that one or more regions smaller + * than the top-level regions within a country (as defined by the region + * listing associated with that country) is given. + * + * Critical information fields: If present, this is a critical + * information field as defined in 5.2.6. An implementation that does not + * recognize the indicated CHOICE when verifying a signed SPDU shall indicate + * that the signed SPDU is invalid in the sense of 4.2.2.3.2, that is, it is + * invalid in the sense that its validity cannot be established. + */ +IdentifiedRegion ::= CHOICE { + countryOnly UnCountryId, + countryAndRegions CountryAndRegions, + countryAndSubregions CountryAndSubregions, + ... +} + + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfIdentifiedRegion ::= SEQUENCE OF IdentifiedRegion + +/** + * @brief This type contains the integer representation of the country or + * area identifier as defined by the United Nations Statistics Division in + * October 2013 (see normative references in Clause 0). + * A conformant implementation that implements IdentifiedRegion shall + * recognize (in the sense of �be able to determine whether a two dimensional + * location lies inside or outside the borders identified by�) at least one + * value of UnCountryId. The Protocol Implementation Conformance Statement + * (PICS) provided in Annex A allows an implementation to state which + * UnCountryId values it recognizes. + * Since 2013 and before the publication of this version of this standard, + * three changes have been made to the country code list, to define the + * region "sub-Saharan Africa" and remove the "developed regions", and + * "developing regions". A conformant implementation may recognize these + * region identifiers in the sense defined in the previous paragraph. + * If a verifying implementation is required to check that relevant + * geographic information in a signed SPDU is consistent with a certificate + * containing one or more instances of this type, then the SDS is permitted + * to indicate that the signed SPDU is valid even if some instances of this + * type are unrecognized in the sense defined above, so long as the + * recognized instances of this type completely contain the relevant + * geographic information. Informally, if the recognized values in the + * certificate allow the SDS to determine that the SPDU is valid, then it + * can make that determination even if there are also unrecognized values in + * the certificate. This field is therefore not a "critical information + * field" as defined in 5.2.6, because unrecognized values are permitted so + * long as the validity of the SPDU can be established with the recognized + * values. However, as discussed in 5.2.6, the presence of an unrecognized + * value in a certificate can make it impossible to determine whether the + * certificate and the SPDU are valid. + */ +UnCountryId ::= Uint16 + +/** + * @brief This type is defined only for backwards compatibility. + */ +CountryOnly ::= UnCountryId + +/** + * @brief A conformant implementation that supports CountryAndRegions shall + * support a regions field containing at least eight entries. + * A conformant implementation that implements this type shall recognize + * (in the sense of "be able to determine whether a two dimensional location + * lies inside or outside the borders identified by") at least one value of + * UnCountryId and at least one value for a region within the country + * indicated by that recognized UnCountryId value. In this version of this + * standard, the only means to satisfy this is for a conformant + * implementation to recognize the value of UnCountryId indicating USA and + * at least one of the FIPS state codes for US states. The Protocol + * Implementation Conformance Statement (PICS) provided in Annex A allows + * an implementation to state which UnCountryId values it recognizes and + * which region values are recognized within that country. + * If a verifying implementation is required to check that relevant + * geographic information in a signed SPDU is consistent with a certificate + * containing one or more instances of this type, then the SDS is permitted + * to indicate that the signed SPDU is valid even if some values of country + * or within regions are unrecognized in the sense defined above, so long + * as the recognized instances of this type completely contain the relevant + * geographic information. Informally, if the recognized values in the + * certificate allow the SDS to determine that the SPDU is valid, then it + * can make that determination even if there are also unrecognized values + * in the certificate. This field is therefore not a "critical information + * field" as defined in 5.2.6, because unrecognized values are permitted so + * long as the validity of the SPDU can be established with the recognized + * values. However, as discussed in 5.2.6, the presence of an unrecognized + * value in a certificate can make it impossible to determine whether the + * certificate is valid and so whether the SPDU is valid. + * In this type: + * + * @param countryOnly: is a UnCountryId as defined above. + * + * @param regions: identifies one or more regions within the country. If + * country indicates the United States of America, the values in this field + * identify the state or statistically equivalent entity using the integer + * version of the 2010 FIPS codes as provided by the U.S. Census Bureau + * (see normative references in Clause 0). For other values of country, the + * meaning of region is not defined in this version of this standard. + */ +CountryAndRegions ::= SEQUENCE { + countryOnly UnCountryId, + regions SequenceOfUint8 +} + +/** + * @brief A conformant implementation that supports CountryAndSubregions + * shall support a regionAndSubregions field containing at least eight + * entries. + * A conformant implementation that implements this type shall recognize + * (in the sense of �be able to determine whether a two dimensional location + * lies inside or outside the borders identified by�) at least one value of + * country and at least one value for a region within the country indicated + * by that recognized country value. In this version of this standard, the + * only means to satisfy this is for a conformant implementation to recognize + * the value of UnCountryId indicating USA and at least one of the FIPS state + * codes for US states. The Protocol Implementation Conformance Statement + * (PICS) provided in Annex A allows an implementation to state which + * UnCountryId values it recognizes and which region values are recognized + * within that country. + * If a verifying implementation is required to check that relevant + * geographic information in a signed SPDU is consistent with a certificate + * containing one or more instances of this type, then the SDS is permitted + * to indicate that the signed SPDU is valid even if some values of country + * or within regionAndSubregions are unrecognized in the sense defined above, + * so long as the recognized instances of this type completely contain the + * relevant geographic information. Informally, if the recognized values in + * the certificate allow the SDS to determine that the SPDU is valid, then + * it can make that determination even if there are also unrecognized values + * in the certificate. This field is therefore not a "critical information + * field" as defined in 5.2.6, because unrecognized values are permitted so + * long as the validity of the SPDU can be established with the recognized + * values. However, as discussed in 5.2.6, the presence of an unrecognized + * value in a certificate can make it impossible to determine whether the + * certificate is valid and so whether the SPDU is valid. + * In this structure: + * + * @param countryOnly: is a UnCountryId as defined above. + * + * @param regionAndSubregions: identifies one or more subregions within + * country. + */ +CountryAndSubregions ::= SEQUENCE { + countryOnly UnCountryId, + regionAndSubregions SequenceOfRegionAndSubregions +} + +/** + * @brief The meanings of the fields in this structure are to be interpreted + * in the context of a country within which the region is located, referred + * to as the "enclosing country". If this structure is used in a + * CountryAndSubregions structure, the enclosing country is the one indicated + * by the country field in the CountryAndSubregions structure. If other uses + * are defined for this structure in the future, it is anticipated (in the + * sense of 4.4) that that definition will include a specification of how the + * enclosing country can be determined. + * If the enclosing country is the United States of America: + * - The region field identifies the state or statistically equivalent + * entity using the integer version of the 2010 FIPS codes as provided by the + * U.S. Census Bureau (see normative references in Clause 0). + * - The values in the subregions field identify the county or county + * equivalent entity using the integer version of the 2010 FIPS codes as + * provided by the U.S. Census Bureau. + * If the enclosing country is a different country from the USA, the meaning + * of regionAndSubregions is not defined in this version of this standard. + * A conformant implementation that implements this type shall recognize (in + * the sense of "be able to determine whether a two-dimensional location lies + * inside or outside the borders identified by"), for at least one enclosing + * country, at least one value for a region within that country and at least + * one subregion for the indicated region. In this version of this standard, + * the only means to satisfy this is for a conformant implementation to + * recognize, for the USA, at least one of the FIPS state codes for US + * states, and at least one of the county codes in at least one of the + * recognized states. The Protocol Implementation Conformance Statement + * (PICS) provided in Annex A allows an implementation to state which + * UnCountryId values it recognizes and which region values are recognized + * within that country. + * If a verifying implementation is required to check that an relevant + * geographic information in a signed SPDU is consistent with a certificate + * containing one or more instances of this type, then the SDS is permitted + * to indicate that the signed SPDU is valid even if some values within + * subregions are unrecognized in the sense defined above, so long as the + * recognized instances of this type completely contain the relevant + * geographic information. Informally, if the recognized values in the + * certificate allow the SDS to determine that the SPDU is valid, then it + * can make that determination even if there are also unrecognized values + * in the certificate. This field is therefore not a "critical + * information field" as defined in 5.2.6, because unrecognized values are + * permitted so long as the validity of the SPDU can be established with the + * recognized values. However, as discussed in 5.2.6, the presence of an + * unrecognized value in a certificate can make it impossible to determine + * whether the certificate is valid and so whether the SPDU is valid. + * In this structure: + * + * @param region: identifies a region within a country. + * + * @param subregions: identifies one or more subregions within region. A + * conformant implementation that supports RegionAndSubregions shall support + * a subregions field containing at least eight entries. + */ +RegionAndSubregions ::= SEQUENCE { + region Uint8, + subregions SequenceOfUint16 +} + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfRegionAndSubregions ::= SEQUENCE OF RegionAndSubregions + +/** + * @brief This structure contains an estimate of 3D location. + * + * @note The units used in this data structure are consistent with the + * location data structures used in SAE J2735 [B26], though the encoding is + * incompatible. + */ +ThreeDLocation ::= SEQUENCE { + latitude Latitude, + longitude Longitude, + elevation Elevation +} + +/** + * @brief This type contains an INTEGER encoding an estimate of the latitude + * with precision 1/10th microdegree relative to the World Geodetic System + * (WGS)-84 datum as defined in NIMA Technical Report TR8350.2. + * The integer in the latitude field is no more than 900 000 000 and no less + * than ?900 000 000, except that the value 900 000 001 is used to indicate + * the latitude was not available to the sender. + */ +Latitude ::= NinetyDegreeInt + +/** + * @brief This type contains an INTEGER encoding an estimate of the longitude + * with precision 1/10th microdegree relative to the World Geodetic System + * (WGS)-84 datum as defined in NIMA Technical Report TR8350.2. + * The integer in the longitude field is no more than 1 800 000 000 and no + * less than ?1 799 999 999, except that the value 1 800 000 001 is used to + * indicate that the longitude was not available to the sender. + */ +Longitude ::= OneEightyDegreeInt + +/** + * @brief This structure contains an estimate of the geodetic altitude above + * or below the WGS84 ellipsoid. The 16-bit value is interpreted as an + * integer number of decimeters representing the height above a minimum + * height of -409.5 m, with the maximum height being 6143.9 m. + */ +Elevation ::= Uint16 + +/** + * @brief The integer in the latitude field is no more than 900,000,000 and + * no less than -900,000,000, except that the value 900,000,001 is used to + * indicate the latitude was not available to the sender. + */ +NinetyDegreeInt ::= INTEGER { + min (-900000000), + max (900000000), + unknown (900000001) +} (-900000000..900000001) + +/** + * @brief The known latitudes are from -900,000,000 to +900,000,000 in 0.1 + * microdegree intervals. + */ +KnownLatitude ::= NinetyDegreeInt (min..max) + +/** + * @brief The value 900,000,001 indicates that the latitude was not + * available to the sender. + */ +UnknownLatitude ::= NinetyDegreeInt (unknown) + +/** + * @brief The integer in the longitude field is no more than 1,800,000,000 + * and no less than -1,799,999,999, except that the value 1,800,000,001 is + * used to indicate that the longitude was not available to the sender. + */ +OneEightyDegreeInt ::= INTEGER { + min (-1799999999), + max (1800000000), + unknown (1800000001) +} (-1799999999..1800000001) + +/** + * @brief The known longitudes are from -1,799,999,999 to +1,800,000,000 in + * 0.1 microdegree intervals. + */ +KnownLongitude ::= OneEightyDegreeInt (min..max) + +/** + * @brief The value 1,800,000,001 indicates that the longitude was not + * available to the sender. + */ +UnknownLongitude ::= OneEightyDegreeInt (unknown) + + +--***************************************************************************-- +-- Crypto Structures -- +--***************************************************************************-- + +/** + * @brief This structure represents a signature for a supported public key + * algorithm. It may be contained within SignedData or Certificate. + * + * @note Critical information fields: If present, this is a critical + * information field as defined in 5.2.5. An implementation that does not + * recognize the indicated CHOICE for this type when verifying a signed SPDU + * shall indicate that the signed SPDU is invalid in the sense of 4.2.2.3.2, + * that is, it is invalid in the sense that its validity cannot be + * established. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2. The canonicalization + * applies to instances of this data structure of form EcdsaP256Signature + * and EcdsaP384Signature. + */ +Signature ::= CHOICE { + ecdsaNistP256Signature EcdsaP256Signature, + ecdsaBrainpoolP256r1Signature EcdsaP256Signature, + ..., + ecdsaBrainpoolP384r1Signature EcdsaP384Signature, + ecdsaNistP384Signature EcdsaP384Signature, + sm2Signature EcsigP256Signature +} + +/** + * @brief This structure represents an ECDSA signature. The signature is + * generated as specified in 5.3.1. + * + * If the signature process followed the specification of FIPS 186-5 + * and output the integer r, r is represented as an EccP256CurvePoint + * indicating the selection x-only. + * + * If the signature process followed the specification of SEC 1 and + * output the elliptic curve point R to allow for fast verification, R is + * represented as an EccP256CurvePoint indicating the choice compressed-y-0, + * compressed-y-1, or uncompressed at the sender's discretion. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2. When this data structure + * is canonicalized, the EccP256CurvePoint in rSig is represented in the + * form x-only. + * + * @note When the signature is of form x-only, the x-value in rSig is + * an integer mod n, the order of the group; when the signature is of form + * compressed-y-, the x-value in rSig is an integer mod p, the underlying + * prime defining the finite field. In principle, this means that to convert a + * signature from form compressed-y- to form x-only, the converter checks + * the x-value to see if it lies between n and p and reduces it mod n if so. + * In practice, this check is unnecessary: Haase's Theorem states that + * difference between n and p is always less than 2*square-root(p), and so the + * chance that an integer lies between n and p, for a 256-bit curve, is + * bounded above by approximately square-root(p)/p or 2^(-128). For the + * 256-bit curves in this standard, the exact values of n and p in hexadecimal + * are: + * + * NISTp256: + * - p = FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF + * - n = FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551 + * + * Brainpoolp256: + * - p = A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377 + * - n = A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7 + */ +EcdsaP256Signature ::= SEQUENCE { + rSig EccP256CurvePoint, + sSig OCTET STRING (SIZE (32)) +} + +/** + * @brief This structure represents an ECDSA signature. The signature is + * generated as specified in 5.3.1. + * + * If the signature process followed the specification of FIPS 186-5 + * and output the integer r, r is represented as an EccP384CurvePoint + * indicating the selection x-only. + * + * If the signature process followed the specification of SEC 1 and + * output the elliptic curve point R to allow for fast verification, R is + * represented as an EccP384CurvePoint indicating the choice compressed-y-0, + * compressed-y-1, or uncompressed at the sender's discretion. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2. When this data structure + * is canonicalized, the EccP384CurvePoint in rSig is represented in the + * form x-only. + * + * @note When the signature is of form x-only, the x-value in rSig is + * an integer mod n, the order of the group; when the signature is of form + * compressed-y-, the x-value in rSig is an integer mod p, the underlying + * prime defining the finite field. In principle, this means that to convert a + * signature from form compressed-y-* to form x-only, the converter checks the + * x-value to see if it lies between n and p and reduces it mod n if so. In + * practice, this check is unnecessary: Haase's Theorem states that difference + * between n and p is always less than 2*square-root(p), and so the chance + * that an integer lies between n and p, for a 384-bit curve, is bounded + * above by approximately square-root(p)/p or 2^(-192). For the 384-bit curve + * in this standard, the exact values of n and p in hexadecimal are: + * - p = 8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123 + * ACD3A729901D1A71874700133107EC53 + * - n = 8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7 + * CF3AB6AF6B7FC3103B883202E9046565 + */ +EcdsaP384Signature ::= SEQUENCE { + rSig EccP384CurvePoint, + sSig OCTET STRING (SIZE (48)) +} + +/** + * @brief This structure represents a elliptic curve signature where the + * component r is constrained to be an integer. This structure supports SM2 + * signatures as specified in 5.3.1.3. + */ +EcsigP256Signature ::= SEQUENCE { + rSig OCTET STRING (SIZE (32)), + sSig OCTET STRING (SIZE (32)) +} + +/** + * @brief This structure specifies a point on an elliptic curve in Weierstrass + * form defined over a 256-bit prime number. The curves supported in this + * standard are NIST p256 as defined in FIPS 186-5, Brainpool p256r1 as + * defined in RFC 5639, and the SM2 curve as defined in GB/T 32918.5-2017. + * The fields in this structure are OCTET STRINGS produced with the elliptic + * curve point encoding and decoding methods defined in subclause 5.5.6 of + * IEEE Std 1363-2000. The x-coordinate is encoded as an unsigned integer of + * length 32 octets in network byte order for all values of the CHOICE; the + * encoding of the y-coordinate y depends on whether the point is x-only, + * compressed, or uncompressed. If the point is x-only, y is omitted. If the + * point is compressed, the value of type depends on the least significant + * bit of y: if the least significant bit of y is 0, type takes the value + * compressed-y-0, and if the least significant bit of y is 1, type takes the + * value compressed-y-1. If the point is uncompressed, y is encoded explicitly + * as an unsigned integer of length 32 octets in network byte order. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2 if it appears in a + * HeaderInfo or in a ToBeSignedCertificate. See the definitions of HeaderInfo + * and ToBeSignedCertificate for a specification of the canonicalization + * operations. + */ +EccP256CurvePoint::= CHOICE { + x-only OCTET STRING (SIZE (32)), + fill NULL, + compressed-y-0 OCTET STRING (SIZE (32)), + compressed-y-1 OCTET STRING (SIZE (32)), + uncompressedP256 SEQUENCE { + x OCTET STRING (SIZE (32)), + y OCTET STRING (SIZE (32)) + } +} + +/** + * @brief This structure specifies a point on an elliptic curve in + * Weierstrass form defined over a 384-bit prime number. The only supported + * such curve in this standard is Brainpool p384r1 as defined in RFC 5639. + * The fields in this structure are octet strings produced with the elliptic + * curve point encoding and decoding methods defined in subclause 5.5.6 of + * IEEE Std 1363-2000. The x-coordinate is encoded as an unsigned integer of + * length 48 octets in network byte order for all values of the CHOICE; the + * encoding of the y-coordinate y depends on whether the point is x-only, + * compressed, or uncompressed. If the point is x-only, y is omitted. If the + * point is compressed, the value of type depends on the least significant + * bit of y: if the least significant bit of y is 0, type takes the value + * compressed-y-0, and if the least significant bit of y is 1, type takes the + * value compressed-y-1. If the point is uncompressed, y is encoded + * explicitly as an unsigned integer of length 48 octets in network byte order. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2 if it appears in a + * HeaderInfo or in a ToBeSignedCertificate. See the definitions of HeaderInfo + * and ToBeSignedCertificate for a specification of the canonicalization + * operations. + */ +EccP384CurvePoint::= CHOICE { + x-only OCTET STRING (SIZE (48)), + fill NULL, + compressed-y-0 OCTET STRING (SIZE (48)), + compressed-y-1 OCTET STRING (SIZE (48)), + uncompressedP384 SEQUENCE { + x OCTET STRING (SIZE (48)), + y OCTET STRING (SIZE (48)) + } +} + +/** + * @brief This enumerated value indicates supported symmetric algorithms. The + * algorithm identifier identifies both the algorithm itself and a specific + * mode of operation. The symmetric algorithms supported in this version of + * this standard are AES-128 and SM4. The only mode of operation supported is + * Counter Mode Encryption With Cipher Block Chaining Message Authentication + * Code (CCM). Full details are given in 5.3.8. + */ +SymmAlgorithm ::= ENUMERATED { + aes128Ccm, + ..., + sm4Ccm +} + +/** + * @brief This structure identifies a hash algorithm. The value sha256, + * indicates SHA-256. The value sha384 indicates SHA-384. The value sm3 + * indicates SM3. See 5.3.3 for more details. + * + * @note Critical information fields: This is a critical information field as + * defined in 5.2.6. An implementation that does not recognize the enumerated + * value of this type in a signed SPDU when verifying a signed SPDU shall + * indicate that the signed SPDU is invalid in the sense of 4.2.2.3.2, that + * is, it is invalid in the sense that its validity cannot be established. + */ +HashAlgorithm ::= ENUMERATED { + sha256, + ..., + sha384, + sm3 +} + +/** + * @brief This data structure is used to transfer a 16-byte symmetric key + * encrypted using ECIES as specified in IEEE Std 1363a-2004. The symmetric + * key is input to the key encryption process with no headers, encapsulation, + * or length indication. Encryption and decryption are carried out as + * specified in 5.3.5.1. + * + * @param v: is the sender's ephemeral public key, which is the output V from + * encryption as specified in 5.3.5.1. + * + * @param c: is the encrypted symmetric key, which is the output C from + * encryption as specified in 5.3.5.1. The algorithm for the symmetric key + * is identified by the CHOICE indicated in the following SymmetricCiphertext. + * For ECIES this shall be AES-128. + * + * @param t: is the authentication tag, which is the output tag from + * encryption as specified in 5.3.5.1. + */ +EciesP256EncryptedKey ::= SEQUENCE { + v EccP256CurvePoint, + c OCTET STRING (SIZE (16)), + t OCTET STRING (SIZE (16)) +} + +/** + * @brief This data structure is used to transfer a 16-byte symmetric key + * encrypted using SM2 encryption as specified in 5.3.3. The symmetric key is + * input to the key encryption process with no headers, encapsulation, or + * length indication. Encryption and decryption are carried out as specified + * in 5.3.5.2. + * + * @param v: is the sender's ephemeral public key, which is the output V from + * encryption as specified in 5.3.5.2. + * + * @param c: is the encrypted symmetric key, which is the output C from + * encryption as specified in 5.3.5.2. The algorithm for the symmetric key + * is identified by the CHOICE indicated in the following SymmetricCiphertext. + * For SM2 this algorithm shall be SM4. + * + * @param t: is the authentication tag, which is the output tag from + * encryption as specified in 5.3.5.2. + */ +EcencP256EncryptedKey ::= SEQUENCE { + v EccP256CurvePoint, + c OCTET STRING (SIZE (16)), + t OCTET STRING (SIZE (32)) +} + + +/** + * @brief This structure contains an encryption key, which may be a public or + * a symmetric key. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2 if it appears in a + * HeaderInfo or in a ToBeSignedCertificate. The canonicalization applies to + * the PublicEncryptionKey. See the definitions of HeaderInfo and + * ToBeSignedCertificate for a specification of the canonicalization + * operations. + */ +EncryptionKey ::= CHOICE { + public PublicEncryptionKey, + symmetric SymmetricEncryptionKey +} + +/** + * @brief This structure specifies a public encryption key and the associated + * symmetric algorithm which is used for bulk data encryption when encrypting + * for that public key. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2 if it appears in a + * HeaderInfo or in a ToBeSignedCertificate. The canonicalization applies to + * the BasePublicEncryptionKey. See the definitions of HeaderInfo and + * ToBeSignedCertificate for a specification of the canonicalization + * operations. + */ +PublicEncryptionKey ::= SEQUENCE { + supportedSymmAlg SymmAlgorithm, + publicKey BasePublicEncryptionKey +} + +/** + * @brief This structure specifies the bytes of a public encryption key for + * a particular algorithm. Supported public key encryption algorithms are + * defined in 5.3.5. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2 if it appears in a + * HeaderInfo or in a ToBeSignedCertificate. See the definitions of HeaderInfo + * and ToBeSignedCertificate for a specification of the canonicalization + * operations. + */ +BasePublicEncryptionKey ::= CHOICE { + eciesNistP256 EccP256CurvePoint, + eciesBrainpoolP256r1 EccP256CurvePoint, + ..., + ecencSm2 EccP256CurvePoint +} + +/** + * @brief This structure represents a public key and states with what + * algorithm the public key is to be used. Cryptographic mechanisms are + * defined in 5.3. + * An EccP256CurvePoint or EccP384CurvePoint within a PublicVerificationKey + * structure is invalid if it indicates the choice x-only. + * + * @note Critical information fields: If present, this is a critical + * information field as defined in 5.2.6. An implementation that does not + * recognize the indicated CHOICE when verifying a signed SPDU shall indicate + * that the signed SPDU is invalid indicate that the signed SPDU is invalid + * in the sense of 4.2.2.3.2, that is, it is invalid in the sense that its + * validity cannot be established. + * + * @note Canonicalization: This data structure is subject to canonicalization + * for the relevant operations specified in 6.1.2. The canonicalization + * applies to the EccP256CurvePoint and the Ecc384CurvePoint. Both forms of + * point are encoded in compressed form, i.e., such that the choice indicated + * within the Ecc*CurvePoint is compressed-y-0 or compressed-y-1. + */ +PublicVerificationKey ::= CHOICE { + ecdsaNistP256 EccP256CurvePoint, + ecdsaBrainpoolP256r1 EccP256CurvePoint, + ... , + ecdsaBrainpoolP384r1 EccP384CurvePoint, + ecdsaNistP384 EccP384CurvePoint, + ecsigSm2 EccP256CurvePoint +} + +/** + * @brief This structure provides the key bytes for use with an identified + * symmetric algorithm. The supported symmetric algorithms are AES-128 and + * SM4 in CCM mode as specified in 5.3.8. + */ +SymmetricEncryptionKey ::= CHOICE { + aes128Ccm OCTET STRING(SIZE(16)), + ..., + sm4Ccm OCTET STRING(SIZE(16)) +} + + +--***************************************************************************-- +-- PSID / ITS-AID -- +--***************************************************************************-- + +/** + * @brief This structure represents the permissions that the certificate + * holder has with respect to activities for a single application area, + * identified by a Psid. + * + * @note The determination as to whether the activities are consistent with + * the permissions indicated by the PSID and ServiceSpecificPermissions is + * made by the SDEE and not by the SDS; the SDS provides the PSID and SSP + * information to the SDEE to enable the SDEE to make that determination. + * See 5.2.4.3.3 for more information. + * + * @note The SDEE specification is expected to specify what application + * activities are permitted by particular ServiceSpecificPermissions values. + * The SDEE specification is also expected EITHER to specify application + * activities that are permitted if the ServiceSpecificPermissions is + * omitted, OR to state that the ServiceSpecificPermissions need to always be + * present. + * + * @note Consistency with signed SPDU: As noted in 5.1.1, + * consistency between the SSP and the signed SPDU is defined by rules + * specific to the given PSID and is outside the scope of this standard. + * + * @note Consistency with issuing certificate: If a certificate has an + * appPermissions entry A for which the ssp field is omitted, A is consistent + * with the issuing certificate if the issuing certificate contains a + * PsidSspRange P for which the following holds: + * - The psid field in P is equal to the psid field in A and one of the + * following is true: + * - The sspRange field in P indicates all. + * - The sspRange field in P indicates opaque and one of the entries in + * opaque is an OCTET STRING of length 0. + * + * For consistency rules for other forms of the ssp field, see the + * following subclauses. + */ +PsidSsp ::= SEQUENCE { + psid Psid, + ssp ServiceSpecificPermissions OPTIONAL +} + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfPsidSsp ::= SEQUENCE OF PsidSsp + +/** + * @brief This type represents the PSID defined in IEEE Std 1609.12. + */ +Psid ::= INTEGER (0..MAX) + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfPsid ::= SEQUENCE OF Psid + +/** + * @brief This structure represents the Service Specific Permissions (SSP) + * relevant to a given entry in a PsidSsp. The meaning of the SSP is specific + * to the associated Psid. SSPs may be PSID-specific octet strings or + * bitmap-based. See Annex C for further discussion of how application + * specifiers may choose which SSP form to use. + * + * @note Consistency with issuing certificate: If a certificate has an + * appPermissions entry A for which the ssp field is opaque, A is consistent + * with the issuing certificate if the issuing certificate contains one of + * the following: + * - (OPTION 1) A SubjectPermissions field indicating the choice all and + * no PsidSspRange field containing the psid field in A; + * - (OPTION 2) A PsidSspRange P for which the following holds: + * - The psid field in P is equal to the psid field in A and one of the + * following is true: + * - The sspRange field in P indicates all. + * - The sspRange field in P indicates opaque and one of the entries in + * the opaque field in P is an OCTET STRING identical to the opaque field in + * A. + * + * For consistency rules for other types of ServiceSpecificPermissions, + * see the following subclauses. + */ +ServiceSpecificPermissions ::= CHOICE { + opaque OCTET STRING (SIZE(0..MAX)), + ..., + bitmapSsp BitmapSsp +} + +/** + * @brief This structure represents a bitmap representation of a SSP. The + * mapping of the bits of the bitmap to constraints on the signed SPDU is + * PSID-specific. + * + * @note Consistency with issuing certificate: If a certificate has an + * appPermissions entry A for which the ssp field is bitmapSsp, A is + * consistent with the issuing certificate if the certificate contains one + * of the following: + * - (OPTION 1) A SubjectPermissions field indicating the choice all and + * no PsidSspRange field containing the psid field in A; + * - (OPTION 2) A PsidSspRange P for which the following holds: + * - The psid field in P is equal to the psid field in A and one of the + * following is true: + * - EITHER The sspRange field in P indicates all + * - OR The sspRange field in P indicates bitmapSspRange and for every + * bit set to 1 in the sspBitmask in P, the bit in the identical position in + * the sspValue in A is set equal to the bit in that position in the + * sspValue in P. + * + * @note To restate the final sub-bullet point immediately above: A BitmapSsp B + * is consistent with a BitmapSspRange R if for every bit set to 1 in the + * sspBitmask in R, the bit in the identical position in B is set equal to the + * bit in that position in the sspValue in R. For each bit set to 0 in the + * sspBitmask in R, the corresponding bit in the identical position in B may be + * freely set to 0 or 1, i.e., if a bit is set to 0 in the sspBitmask in R, the + * value of corresponding bit in the identical position in B has no bearing on + * whether B and R are consistent. + * + * @note Where a BitmapSsp in an authorization certificate is being compared + * with a BitmapSspRange in an issuing certificate, the rules given above imply + * that the BitmapSsp: (a) cannot be longer than BitmapSspRange in the issuing + * cert; (b) Can be shorter than the BitmapSspRange but must be long enough to + * reach the last "1" bit in the sspBitmask. + */ +BitmapSsp ::= OCTET STRING (SIZE(0..31)) + +/** + * @brief This structure represents the certificate issuing or requesting + * permissions of the certificate holder with respect to one particular set + * of application permissions. + * + * @param psid: identifies the application area. + * + * @param sspRange: identifies the SSPs associated with that PSID for which + * the holder may issue or request certificates. If sspRange is omitted, the + * holder may issue or request certificates for any SSP for that PSID. + */ +PsidSspRange ::= SEQUENCE { + psid Psid, + sspRange SspRange OPTIONAL +} + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfPsidSspRange ::= SEQUENCE OF PsidSspRange + +/** + * @brief This structure identifies the SSPs associated with a PSID for + * which the holder may issue or request certificates. + * + * @note Consistency with issuing certificate: If a certificate has a + * PsidSspRange A for which the ssp field is opaque, A is consistent with + * the issuing certificate if the issuing certificate contains one of the + * following: + * - (OPTION 1) A SubjectPermissions field indicating the choice all and + * no PsidSspRange field containing the psid field in A; + * - (OPTION 2) A PsidSspRange P for which the following holds: + * - The psid field in P is equal to the psid field in A and one of the + * following is true: + * - The sspRange field in P indicates all. + * - The sspRange field in P indicates opaque, and the sspRange field in + * A indicates opaque, and every OCTET STRING within the opaque in A is a + * duplicate of an OCTET STRING within the opaque in P. + * + * If a certificate has a PsidSspRange A for which the ssp field is all, + * A is consistent with the issuing certificate if the issuing certificate + * contains a PsidSspRange P for which the following holds: + * - (OPTION 1) A SubjectPermissions field indicating the choice all and + * no PsidSspRange field containing the psid field in A; + * - (OPTION 2) A PsidSspRange P for which the psid field in P is equal to + * the psid field in A and the sspRange field in P indicates all. + * + * For consistency rules for other types of SspRange, see the following + * subclauses. + * + * @note The choice "all" may also be indicated by omitting the + * SspRange in the enclosing PsidSspRange structure. Omitting the SspRange is + * preferred to explicitly indicating "all". + */ +SspRange ::= CHOICE { + opaque SequenceOfOctetString, + all NULL, + ..., + bitmapSspRange BitmapSspRange +} + +/** + * @brief This structure represents a bitmap representation of a SSP. The + * sspValue indicates permissions. The sspBitmask contains an octet string + * used to permit or constrain sspValue fields in issued certificates. The + * sspValue and sspBitmask fields shall be of the same length. + * + * @note Consistency with issuing certificate: If a certificate has an + * PsidSspRange value P for which the sspRange field is bitmapSspRange, + * P is consistent with the issuing certificate if the issuing certificate + * contains one of the following: + * - (OPTION 1) A SubjectPermissions field indicating the choice all and + * no PsidSspRange field containing the psid field in P; + * - (OPTION 2) A PsidSspRange R for which the following holds: + * - The psid field in R is equal to the psid field in P and one of the + * following is true: + * - EITHER The sspRange field in R indicates all + * - OR The sspRange field in R indicates bitmapSspRange and for every + * bit set to 1 in the sspBitmask in R: + * - The bit in the identical position in the sspBitmask in P is set + * equal to 1, AND + * - The bit in the identical position in the sspValue in P is set equal + * to the bit in that position in the sspValue in R. + * + * Reference ETSI TS 103 097 for more information on bitmask SSPs. + */ +BitmapSspRange ::= SEQUENCE { + sspValue OCTET STRING (SIZE(1..32)), + sspBitmask OCTET STRING (SIZE(1..32)) +} + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfOctetString ::= + SEQUENCE (SIZE (0..MAX)) OF OCTET STRING (SIZE(0..MAX)) + + +--***************************************************************************-- +-- Certificate Components -- +--***************************************************************************-- + +/** + * @brief This field contains the certificate holder's assurance level, which + * indicates the security of both the platform and storage of secret keys as + * well as the confidence in this assessment. + * + * This field is encoded as defined in Table 1, where "A" denotes bit + * fields specifying an assurance level, "R" reserved bit fields, and "C" bit + * fields specifying the confidence. + * + * Table 1: Bitwise encoding of subject assurance + * + * | Bit number | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | + * | -------------- | --- | --- | --- | --- | --- | --- | --- | --- | + * | Interpretation | A | A | A | R | R | R | C | C | + * + * In Table 1, bit number 0 denotes the least significant bit. Bit 7 + * to bit 5 denote the device's assurance levels, bit 4 to bit 2 are reserved + * for future use, and bit 1 and bit 0 denote the confidence. + * + * The specification of these assurance levels as well as the + * encoding of the confidence levels is outside the scope of this + * standard. It can be assumed that a higher assurance value indicates that + * the holder is more trusted than the holder of a certificate with lower + * assurance value and the same confidence value. + * + * @note This field was originally specified in ETSI TS 103 097, and + * future uses of this field are anticipated to be consistent with future + * versions of that standard. + */ +SubjectAssurance ::= OCTET STRING (SIZE(1)) + +/** + * @brief This integer identifies a series of CRLs issued under the authority + * of a particular CRACA. + */ +CrlSeries ::= Uint16 + + +--***************************************************************************-- +-- Pseudonym Linkage -- +--***************************************************************************-- + +/** + * @brief This atomic type is used in the definition of other data structures. + */ +IValue ::= Uint16 + +/** + * @brief This is a UTF-8 string as defined in IETF RFC 3629. The contents + * are determined by policy. + */ +Hostname ::= UTF8String (SIZE(0..255)) + +/** + * @brief This is the individual linkage value. See 5.1.3 and 7.3 for details + * of use. + */ +LinkageValue ::= OCTET STRING (SIZE(9)) + +/** + * @brief This is the group linkage value. See 5.1.3 and 7.3 for details of + * use. + */ +GroupLinkageValue ::= SEQUENCE { + jValue OCTET STRING (SIZE(4)), + value OCTET STRING (SIZE(9)) +} + +/** + * @brief This structure contains a LA Identifier for use in the algorithms + * specified in 5.1.3.4. + */ +LaId ::= OCTET STRING (SIZE(2)) + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfLinkageSeed ::= SEQUENCE OF LinkageSeed + +/** + * @brief This structure contains a linkage seed value for use in the + * algorithms specified in 5.1.3.4. + */ +LinkageSeed ::= OCTET STRING (SIZE(16)) + +--***************************************************************************-- +-- Extension Types -- +--***************************************************************************-- + +/** + * @brief This type represents a (id, content) pair where content carries the + * OER-encoded extension value as an OCTET STRING. + */ +Extension ::= SEQUENCE { + id ExtId, + content OCTET STRING +} + +/** + * @brief This type is used as an identifier for an Extension content field. + */ +ExtId ::= INTEGER(0..255) + + +END + +""" diff --git a/src/flexstack/security/asn1/ieee1609dot2/Ieee1609Dot2Crl.py b/src/flexstack/security/asn1/ieee1609dot2/Ieee1609Dot2Crl.py new file mode 100644 index 0000000..4b1f770 --- /dev/null +++ b/src/flexstack/security/asn1/ieee1609dot2/Ieee1609Dot2Crl.py @@ -0,0 +1,71 @@ +# pylint: skip-file +IEEE_1609_DOT_2_CRL_ASN1_DESCRIPTIONS = """--***************************************************************************-- +-- IEEE Std 1609.2: CRL Data Types -- +--***************************************************************************-- + +/** + * @note Section references in this file are to clauses in IEEE Std + * 1609.2 unless indicated otherwise. Full forms of acronyms and + * abbreviations used in this file are specified in 3.2. + */ + +Ieee1609Dot2Crl {iso(1) identified-organization(3) ieee(111) + standards-association-numbered-series-standards(2) wave-stds(1609) dot2(2) + crl(3) major-version-3(3) minor-version-2(2)} + +DEFINITIONS AUTOMATIC TAGS ::= BEGIN + +IMPORTS + Ieee1609Dot2Data +FROM Ieee1609Dot2 {iso(1) identified-organization(3) ieee(111) + standards-association-numbered-series-standards(2) wave-stds(1609) + dot2(2) base(1) schema(1) major-version-2(2) minor-version-6(6)} + + Opaque, + Psid +FROM Ieee1609Dot2BaseTypes {iso(1) identified-organization(3) ieee(111) + standards-association-numbered-series-standards(2) wave-stds(1609) dot2(2) + base(1) base-types(2) major-version-2(2) minor-version-4(4)} + + CrlContents +FROM Ieee1609Dot2CrlBaseTypes {iso(1) identified-organization(3) ieee(111) + standards-association-numbered-series-standards(2) wave-stds(1609) dot2(2) + crl(3) base-types(2) major-version-3(3) minor-version-2(2)} +; + +/** + * @brief This is the PSID for the CRL application. + */ +CrlPsid ::= Psid(256) + +/** + * @brief This structure is the SPDU used to contain a signed CRL. A valid + * signed CRL meets the validity criteria of 7.4. + */ +SecuredCrl ::= Ieee1609Dot2Data (WITH COMPONENTS {..., + content (WITH COMPONENTS { + signedData (WITH COMPONENTS {..., + tbsData (WITH COMPONENTS { + payload (WITH COMPONENTS {..., + data (WITH COMPONENTS {..., + content (WITH COMPONENTS { + unsecuredData (CONTAINING CrlContents) + }) + }) + }), + headerInfo (WITH COMPONENTS {..., + psid (CrlPsid), + generationTime ABSENT, + expiryTime ABSENT, + generationLocation ABSENT, + p2pcdLearningRequest ABSENT, + missingCrlIdentifier ABSENT, + encryptionKey ABSENT + }) + }) + }) + }) +}) + +END +""" diff --git a/src/flexstack/security/asn1/ieee1609dot2/Ieee1609Dot2CrlBaseTypes.py b/src/flexstack/security/asn1/ieee1609dot2/Ieee1609Dot2CrlBaseTypes.py new file mode 100644 index 0000000..9be5ad2 --- /dev/null +++ b/src/flexstack/security/asn1/ieee1609dot2/Ieee1609Dot2CrlBaseTypes.py @@ -0,0 +1,557 @@ +# pylint: skip-file +IEEE_1609_DOT_2_CRL_BASE_TYPES_ASN1_DESCRIPTIONS = """--***************************************************************************-- +-- IEEE Std 1609.2: CRL Base Data Types -- +--***************************************************************************-- + +/** + * @note Section references in this file are to clauses in IEEE Std + * 1609.2 unless indicated otherwise. Full forms of acronyms and + * abbreviations used in this file are specified in 3.2. + */ + +Ieee1609Dot2CrlBaseTypes {iso(1) identified-organization(3) ieee(111) + standards-association-numbered-series-standards(2) wave-stds(1609) dot2(2) + crl(3) base-types(2) major-version-3(3) minor-version-3(3)} + +DEFINITIONS AUTOMATIC TAGS ::= BEGIN + +IMPORTS + CrlSeries, + Duration, + GeographicRegion, + HashedId8, + HashedId10, + IValue, + LaId, + LinkageSeed, + Opaque, + Psid, + SequenceOfLinkageSeed, + Signature, + Time32, + Uint3, + Uint8, + Uint16, + Uint32, + ValidityPeriod +FROM Ieee1609Dot2BaseTypes {iso(1) identified-organization(3) ieee(111) + standards-association-numbered-series-standards(2) wave-stds(1609) dot2(2) + base(1) base-types(2) major-version-2(2) minor-version-4(4)} +; + +/** + * @brief The fields in this structure have the following meaning: + * + * @param version: is the version number of the CRL. For this version of this + * standard it is 1. + * + * @param crlSeries: represents the CRL series to which this CRL belongs. This + * is used to determine whether the revocation information in a CRL is relevant + * to a particular certificate as specified in 5.1.3.2. + * + * @param crlCraca: contains the low-order eight octets of the hash of the + * certificate of the Certificate Revocation Authorization CA (CRACA) that + * ultimately authorized the issuance of this CRL. This is used to determine + * whether the revocation information in a CRL is relevant to a particular + * certificate as specified in 5.1.3.2. In a valid signed CRL as specified in + * 7.4 the crlCraca is consistent with the associatedCraca field in the + * Service Specific Permissions as defined in 7.4.3.3. The HashedId8 is + * calculated with the whole-certificate hash algorithm, determined as + * described in 6.4.3, applied to the COER-encoded certificate, canonicalized + * as defined in the definition of Certificate. + * + * @param issueDate: specifies the time when the CRL was issued. + * + * @param nextCrl: contains the time when the next CRL with the same crlSeries + * and cracaId is expected to be issued. The CRL is invalid unless nextCrl is + * strictly after issueDate. This field is used to set the expected update time + * for revocation information associated with the (crlCraca, crlSeries) pair as + * specified in 5.1.3.6. + * + * @param priorityInfo: contains information that assists devices with limited + * storage space in determining which revocation information to retain and + * which to discard. + * + * @param typeSpecific: contains the CRL body. + */ +CrlContents ::= SEQUENCE { + version Uint8 (1), + crlSeries CrlSeries, + crlCraca HashedId8, + issueDate Time32, + nextCrl Time32, + priorityInfo CrlPriorityInfo, + typeSpecific TypeSpecificCrlContents +} + +/** + * @brief This data structure contains information that assists devices with + * limited storage space in determining which revocation information to retain + * and which to discard. + * + * @param priority: indicates the priority of the revocation information + * relative to other CRLs issued for certificates with the same cracaId and + * crlSeries values. A higher value for this field indicates higher importance + * of this revocation information. + * + * @note This mechanism is for future use; details are not specified in this + * version of the standard. + */ +CrlPriorityInfo ::= SEQUENCE { + priority Uint8 OPTIONAL, + ... +} + +/** + * @brief This structure contains type-specific CRL contents. + * + * @param fullHashCrl: contains a full hash-based CRL, i.e., a listing of the + * hashes of all certificates that: + * - contain the indicated cracaId and crlSeries values, and + * - are revoked by hash, and + * - have been revoked + * + * @param deltaHashCrl: contains a delta hash-based CRL, i.e., a listing of + * the hashes of all certificates that: + * - contain the indicated cracaId and crlSeries values, and + * - are revoked by hash, and + * - have been revoked since the previous CRL that contained the indicated + * cracaId and crlSeries values. + * + * A Hash-based CRL should not include any certificates that had expired at the + * time the CRL was generated; however, the inclusion of expired certificates + * does not make a CRL invalid, and there is no expectation that receivers of a + * CRL will check whether any of the certificates on the CRL have expired. + * + * @note Since a recipient of a hash-based CRLonly receives the hash, they + * cannot directly establish the validity period of any certificate on the CRL + * without obtaining the certificate itself; this would render impractical any + * validity check for CRLs based on the expiry status of the revoked + * certificates. + * + * @param fullLinkedCrl and fullLinkedCrlWithAlg: contain a full linkage + * ID-based CRL, i.e., a listing of the individual and/or group linkage data + * for all certificates that: + * - contain the indicated cracaId and crlSeries values, and + * - are revoked by linkage value, and + * - have been revoked + * The difference between fullLinkedCrl and fullLinkedCrlWithAlg is in how + * the cryptographic algorithms to be used in the seed evolution function and + * linkage value generation function of 5.1.3.4 are communicated to the + * receiver of the CRL. See below in this subclause for details. + * + * @param deltaLinkedCrl and deltaLinkedCrlWithAlg: contain a delta linkage + * ID-based CRL, i.e., a listing of the individual and/or group linkage data + * for all certificates that: + * - contain the specified cracaId and crlSeries values, and + * - are revoked by linkage data, and + * - have been revoked since the previous CRL that contained the indicated + * cracaId and crlSeries values. + * The difference between deltaLinkedCrl and deltaLinkedCrlWithAlg is in how + * the cryptographic algorithms to be used in the seed evolution function + * and linkage value generation function of 5.1.3.4 are communicated to the + * receiver of the CRL. See below in this subclause for details. + * + * @note It is the intent of this standard that once a certificate is revoked, + * it remains revoked for the rest of its lifetime. CRL signers are expected + * to include a revoked certificate on all CRLs issued between the + * certificate's revocation and its expiry. + * + * @note Seed evolution function and linkage value generation function + * identification. In order to derive linkage values per the mechanisms given + * in 5.1.3.4, a receiver needs to know the seed evolution function and the + * linkage value generation function. + * + * If the contents of this structure is a + * ToBeSignedLinkageValueCrlWithAlgIdentifier, then the seed evolution function + * and linkage value generation function are given explicitly as specified in + * the specification of ToBeSignedLinkageValueCrlWithAlgIdentifier. + * + * If the contents of this structure is a ToBeSignedLinkageValueCrl, then the + * seed evolution function and linkage value generation function are obtained + * based on the crlCraca field in the CrlContents: + * - If crlCraca was obtained with SHA-256 or SHA-384, then + * seedEvolutionFunctionIdentifier is seedEvoFn1-sha256 and + * linkageValueGenerationFunctionIdentifier is lvGenFn1-aes128. + * - If crlCraca was obtained with SM3, then seedEvolutionFunctionIdentifier + * is seedEvoFn1-sm3 and linkageValueGenerationFunctionIdentifier is + * lvGenFn1-sm4. + */ +TypeSpecificCrlContents ::= CHOICE { + fullHashCrl ToBeSignedHashIdCrl, + deltaHashCrl ToBeSignedHashIdCrl, + fullLinkedCrl ToBeSignedLinkageValueCrl, + deltaLinkedCrl ToBeSignedLinkageValueCrl, + ..., + fullLinkedCrlWithAlg ToBeSignedLinkageValueCrlWithAlgIdentifier, + deltaLinkedCrlWithAlg ToBeSignedLinkageValueCrlWithAlgIdentifier +} + +/** + * @brief This data structure represents information about a revoked + * certificate. + * + * @param crlSerial: is a counter that increments by 1 every time a new full + * or delta CRL is issued for the indicated crlCraca and crlSeries values. A + * "new full or delta CRL" is a CRL with a new issueDate, whether or not the + * contents of the CRL have changed. + * + * @param entries: contains the individual revocation information items. + * + * @note To indicate that a hash-based CRL contains no individual revocation + * information items, the recommended approach is for the SEQUENCE OF in the + * SequenceOfHashBasedRevocationInfo in this field to indicate zero entries. + */ +ToBeSignedHashIdCrl ::= SEQUENCE { + crlSerial Uint32, + entries SequenceOfHashBasedRevocationInfo, + ... +} + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfHashBasedRevocationInfo ::= + SEQUENCE OF HashBasedRevocationInfo + +/** + * @brief In this structure: + * + * @param id: is the HashedId10 identifying the revoked certificate. The + * HashedId10 is calculated with the whole-certificate hash algorithm, + * determined as described in 6.4.3, applied to the COER-encoded certificate, + * canonicalized as defined in the definition of Certificate. + * + * @param expiry: is the value computed from the validity period's start and + * duration values in that certificate. + */ +HashBasedRevocationInfo ::= SEQUENCE { + id HashedId10, + expiry Time32, + ... +} + +/** + * @brief In this structure: + * + * @param iRev: is the value iRev used in the algorithm given in 5.1.3.4. This + * value applies to all linkage-based revocation information included within + * either indvidual or groups. + * + * @param indexWithinI: is a counter that is set to 0 for the first CRL issued + * for the indicated combination of crlCraca, crlSeries, and iRev, and + * increments by 1 every time a new full or delta CRL is issued for the + * indicated crlCraca and crlSeries values without changing iRev. + * + * @param individual: contains individual linkage data. + * + * @note To indicate that a linkage ID-based CRL contains no individual + * linkage data, the recommended approach is for the SEQUENCE OF in the + * SequenceOfJMaxGroup in this field to indicate zero entries. + * + * @param groups: contains group linkage data. + * + * @note To indicate that a linkage ID-based CRL contains no group linkage + * data, the recommended approach is for the SEQUENCE OF in the + * SequenceOfGroupCrlEntry in this field to indicate zero entries. + * + * @param groupsSingleSeed: contains group linkage data generated with a single + * seed. + * + * @param iPeriodInfo contains information about the duration of the revocation + * time periods, to allow a receiver to determine at what point it will be + * necessary to have calculated the linkage values associated with future time + * periods. + */ +ToBeSignedLinkageValueCrl ::= SEQUENCE { + iRev IValue, + indexWithinI Uint8, + individual SequenceOfJMaxGroup OPTIONAL, + groups SequenceOfGroupCrlEntry OPTIONAL, + ..., + groupsSingleSeed SequenceOfGroupSingleSeedCrlEntry OPTIONAL, + iPeriodInfo SequenceOfIPeriodInfo OPTIONAL +} (WITH COMPONENTS {..., individual PRESENT} | + WITH COMPONENTS {..., groups PRESENT} | + WITH COMPONENTS {..., groupsSingleSeed PRESENT}) + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfJMaxGroup ::= SEQUENCE OF JMaxGroup + +/** + * @brief In this structure: + * + * @param jMax: is the value jMax used in the algorithm given in 5.1.3.4. This + * value applies to all linkage-based revocation information included within + * contents. + * + * @param contents: contains individual linkage data. + */ +JMaxGroup ::= SEQUENCE { + jmax Uint8, + contents SequenceOfLAGroup, + ... +} + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfLAGroup ::= SEQUENCE OF LAGroup + +/** + * @brief In this structure: + * + * @param la1Id: is the value LinkageAuthorityIdentifier1 used in the + * algorithm given in 5.1.3.4. This value applies to all linkage-based + * revocation information included within contents. + * + * @param la2Id: is the value LinkageAuthorityIdentifier2 used in the + * algorithm given in 5.1.3.4. This value applies to all linkage-based + * revocation information included within contents. + * + * @param contents: contains individual linkage data. + */ +LAGroup ::= SEQUENCE { + la1Id LaId, + la2Id LaId, + contents SequenceOfIMaxGroup, + ... +} + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfIMaxGroup ::= SEQUENCE OF IMaxGroup + +/** + * @brief In this structure: + * + * @param iMax indicates that for the entries in contents, revocation + * information need no longer be calculated once iCert > iMax as the holder + * is known to have no more valid certs at that point. iMax is not directly + * used in the calculation of the linkage values, it is used to determine + * when revocation information can safely be deleted. + * + * @param contents contains individual linkage data for certificates that are + * revoked using two seeds, per the algorithm given in per the mechanisms + * given in 5.1.3.4 and with seedEvolutionFunctionIdentifier and + * linkageValueGenerationFunctionIdentifier obtained as specified in 7.3.3. + * + * @param singleSeed contains individual linkage data for certificates that + * are revoked using a single seed, per the algorithm given in per the + * mechanisms given in 5.1.3.4 and with seedEvolutionFunctionIdentifier and + * linkageValueGenerationFunctionIdentifier obtained as specified in 7.3.3. + */ +IMaxGroup ::= SEQUENCE { + iMax Uint16, + contents SequenceOfIndividualRevocation, + ..., + singleSeed SequenceOfLinkageSeed OPTIONAL +} + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfIndividualRevocation ::= + SEQUENCE (SIZE(0..MAX)) OF IndividualRevocation + +/** + * @brief In this structure: + * + * @param linkageSeed1 is the value LinkageSeed1 used in the algorithm given + * in 5.1.3.4. + * + * @param linkageSeed2 is the value LinkageSeed2 used in the algorithm given + * in 5.1.3.4. + */ +IndividualRevocation ::= SEQUENCE { + linkageSeed1 LinkageSeed, + linkageSeed2 LinkageSeed, + ... +} + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfGroupCrlEntry ::= SEQUENCE OF GroupCrlEntry + +/** + * @brief In this structure: + * + * @param iMax: indicates that for these certificates, revocation information + * need no longer be calculated once iCert > iMax as the holders are known + * to have no more valid certs for that (crlCraca, crlSeries) at that point. + * + * @param la1Id: is the value LinkageAuthorityIdentifier1 used in the + * algorithm given in 5.1.3.4. This value applies to all linkage-based + * revocation information included within contents. + * + * @param linkageSeed1: is the value LinkageSeed1 used in the algorithm given + * in 5.1.3.4. + * + * @param la2Id: is the value LinkageAuthorityIdentifier2 used in the + * algorithm given in 5.1.3.4. This value applies to all linkage-based + * revocation information included within contents. + * + * @param linkageSeed2: is the value LinkageSeed2 used in the algorithm given + * in 5.1.3.4. + */ +GroupCrlEntry ::= SEQUENCE { + iMax Uint16, + la1Id LaId, + linkageSeed1 LinkageSeed, + la2Id LaId, + linkageSeed2 LinkageSeed, + ... +} + +/** + * @brief In this structure: + * + * @param iRev is the value iRev used in the algorithm given in 5.1.3.4. This + * value applies to all linkage-based revocation information included within + * either indvidual or groups. + * + * @param indexWithinI is a counter that is set to 0 for the first CRL issued + * for the indicated combination of crlCraca, crlSeries, and iRev, and increments + * by 1 every time a new full or delta CRL is issued for the indicated crlCraca + * and crlSeries values without changing iRev. + * + * @param seedEvolution contains an identifier for the seed evolution + * function, used as specified in 5.1.3.4. + * + * @param lvGeneration contains an identifier for the linkage value + * generation function, used as specified in 5.1.3.4. + * + * @param individual contains individual linkage data. + * + * @param groups contains group linkage data for linkage value generation + * with two seeds. + * + * @param groupsSingleSeed contains group linkage data for linkage value + * generation with one seed. + * + * @param iPeriodInfo contains information about the duration of the + * revocation time periods, to allow a receiver to determine at what point + * it will be necessary to have calculated the linkage values associated + * with future time periods. + */ +ToBeSignedLinkageValueCrlWithAlgIdentifier ::= SEQUENCE { + iRev IValue, + indexWithinI Uint8, + seedEvolution SeedEvolutionFunctionIdentifier, + lvGeneration LvGenerationFunctionIdentifier, + individual SequenceOfJMaxGroup OPTIONAL, + groups SequenceOfGroupCrlEntry OPTIONAL, + groupsSingleSeed SequenceOfGroupSingleSeedCrlEntry OPTIONAL, + ... +} (WITH COMPONENTS {..., individual PRESENT} | + WITH COMPONENTS {..., groups PRESENT} | + WITH COMPONENTS {..., groupsSingleSeed PRESENT}) + +/** + * @brief This type is used for clarity of definitions. + */ +SequenceOfGroupSingleSeedCrlEntry ::= + SEQUENCE OF GroupSingleSeedCrlEntry + +/** + * @brief This structure contains the linkage seed for group revocation with + * a single seed. The seed is used as specified in the algorithms in 5.1.3.4. + */ +GroupSingleSeedCrlEntry ::= SEQUENCE { + iMax Uint16, + laId LaId, + linkageSeed LinkageSeed +} + +/** + * @brief This structure contains an identifier for the algorithms specified + * in 5.1.3.4. + */ +ExpansionAlgorithmIdentifier ::= ENUMERATED { + sha256ForI-aesForJ, + sm3ForI-sm4ForJ, + ... +} + +/** + * @brief This is the identifier for the seed evolution function. See 5.1.3 + * for details of use. + */ +SeedEvolutionFunctionIdentifier ::= NULL + +/** + * @brief This is the identifier for the linkage value generation function. + * See 5.1.3 for details of use. + */ +LvGenerationFunctionIdentifier ::= NULL + +/** + * @brief This structure contains information about when future revocation + * time periods start. Revocation time periods are discussed in 5.1.3.4. + * Linkage value based CRLs contain linkage seeds which can be used to + * calculate the linkage values that will appear in certificates for + * revocation time periods that are in the future relative to the issuance + * time of the CRL; the IPeriodInfo structure allows the CRL signer to + * communicate the start time for future time periods, so that a CRL recipient + * can calculate the linkage values before the relevant time period starts. + * The CRL contains a SEQUENCE of IPeriodInfo to support the case where the + * CRL issuer knows that the duration of the time periods is going to change + * at some point in the future; the number of IPeriodInfo in the sequence + * should be the minimum necessary to convey the information, e.g. if the + * duration of the time periods is not going to change, the CRL should contain + * a single IPeriodInfo. + * + * @note The information about the duration of future time periods can be + * assumed to be available to the CRL signer, because pseudonym certificates + * that use linkage values are typically issued for future time periods rather + * than only the current time period, and so the length of future time periods + * had to be known to the CA at the time of certificate issuance and can be + * provided to the CRL signer. This creates a requirement that if multiple CAs + * issue certificates that use the same CRL Series and CRACA Id values, all of + * those CAs will be expected to implement any time period length changes in + * synch with each other so that all certificates on the same CRL will have + * synchronized time period starts and ends. How these CAs are synchronized + * with each other is out of scope for this document. + * + * An IPeriodInfo appears in a CRL that has an iRev field. The CRL contains a + * SEQUENCE of IPeriodInfo. Each IPeriodInfo makes use of the previous iRev + * value, prevI. For the first IPeriodInfo in the SEQUENCE, prevI is the value + * of iRev in the CRL. For each subsequent IPeriodInfo in the SEQUENCE, prevI + * is the value of guaranteedToIValue in the previous IPeriodInfo. + * + * In this structure: + * + * @param startOfNextIPeriod is the start time of the i-period with i = prevI + + * 1. This is the earliest time at which certificates with i-period equal to + * prevI + 1 will be valid, i.e. if a certificate with the cracaId and + * crlSeries corresponding to this CRL has + * ToBeSignedCertificate.id.linkageData.iCert = prevI + 1, then + * ToBeSignedCertificate.validityPeriod.start will be no earlier than this + * startOfNextIPeriod value. + * + * @param iPeriodLength is the length of all time periods from prevI + 1 to + * guaranteedToIValue inclusive, i.e., each time period starts exactly + * iPeriodLength after the previous time period started. + * + * @param guaranteedToIValue is last i-period which is guaranteed to have the + * indicated duration, i.e., all time periods from prevI + 1 to + * guaranteedToIValue are guaranteed to have that duration and time period + * guaranteedToIValue +1 is not guaranteed to have that duration. + */ + +IPeriodInfo ::= SEQUENCE { + startOfNextIPeriod Time32, + iPeriodLength Duration, + guaranteedToIValue IValue +} + +SequenceOfIPeriodInfo ::= SEQUENCE OF IPeriodInfo + + + +END +""" diff --git a/src/flexstack/security/asn1/ieee1609dot2/__init__.py b/src/flexstack/security/asn1/ieee1609dot2/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/flexstack/security/certificate.py b/src/flexstack/security/certificate.py index 296e1b0..18868db 100644 --- a/src/flexstack/security/certificate.py +++ b/src/flexstack/security/certificate.py @@ -95,7 +95,7 @@ def as_clear_certificate() -> Certificate: ), "toBeSigned": { "id": ("name", "i2cat.net"), - "cracaId": (0xA49599).to_bytes(3, byteorder="big"), + "cracaId": b"\x00\x00\x00", "crlSeries": 0, "validityPeriod": {"start": 0, "duration": ("seconds", 30)}, "appPermissions": [ @@ -526,12 +526,112 @@ def verify(self, backend: ECDSABackend) -> bool: bool True if the certificate is valid, False otherwise. """ + # §6: verifyKeyIndicator must match certificate type + cert_type = self.certificate.get("type") + vki = self.certificate.get("toBeSigned", {}).get("verifyKeyIndicator") + if cert_type == "explicit" and (vki is None or vki[0] != "verificationKey"): + return False + if cert_type == "implicit" and (vki is None or vki[0] != "reconstructionValue"): + return False if self.issuer is not None and self.certificate_is_issued(): return self.__verify_issued_certificate(backend) if self.certificate_is_self_signed(): return self.__verify_self_signed_certificate(backend) return False + def is_authorization_ticket(self) -> bool: + """ + Check whether this certificate conforms to the §7.2.1 Authorization Ticket profile. + + §7.2.1 constraints: + - issuer SHALL be sha256AndDigest or sha384AndDigest (never self). + - toBeSigned.id SHALL be choice none. + - toBeSigned.certIssuePermissions SHALL be absent. + - toBeSigned.appPermissions SHALL be present. + """ + tbs = self.certificate.get("toBeSigned", {}) + if self.certificate.get("issuer", ("",))[0] not in ("sha256AndDigest", "sha384AndDigest"): + return False + if tbs.get("id", ("",))[0] != "none": + return False + if "certIssuePermissions" in tbs: + return False + if "appPermissions" not in tbs: + return False + return True + + def is_enrolment_credential(self) -> bool: + """ + Check whether this certificate conforms to the §7.2.2 Enrolment Credential profile. + + §7.2.2 constraints: + - type SHALL be explicit. + - issuer SHALL be sha256AndDigest or sha384AndDigest. + - toBeSigned.id SHALL be choice name. + - toBeSigned.certIssuePermissions SHALL be absent. + - toBeSigned.appPermissions SHALL be present. + """ + tbs = self.certificate.get("toBeSigned", {}) + if self.certificate.get("type") != "explicit": + return False + if self.certificate.get("issuer", ("",))[0] not in ("sha256AndDigest", "sha384AndDigest"): + return False + if tbs.get("id", ("",))[0] != "name": + return False + if "certIssuePermissions" in tbs: + return False + if "appPermissions" not in tbs: + return False + return True + + def is_root_ca_certificate(self) -> bool: + """ + Check whether this certificate conforms to the §7.2.3 Root CA profile. + + §7.2.3 constraints: + - type SHALL be explicit. + - issuer SHALL be self. + - toBeSigned.id SHALL be choice name. + - toBeSigned.certIssuePermissions SHALL be present. + - toBeSigned.appPermissions SHALL be present. + """ + tbs = self.certificate.get("toBeSigned", {}) + if self.certificate.get("type") != "explicit": + return False + if self.certificate.get("issuer", ("",))[0] != "self": + return False + if tbs.get("id", ("",))[0] != "name": + return False + if "certIssuePermissions" not in tbs: + return False + if "appPermissions" not in tbs: + return False + return True + + def is_subordinate_ca_certificate(self) -> bool: + """ + Check whether this certificate conforms to the §7.2.4 Subordinate CA profile. + + §7.2.4 constraints: + - type SHALL be explicit. + - issuer SHALL be sha256AndDigest or sha384AndDigest. + - toBeSigned.id SHALL be choice name. + - toBeSigned.encryptionKey SHALL be present. + - toBeSigned.certIssuePermissions SHALL be present. + """ + tbs = self.certificate.get("toBeSigned", {}) + if self.certificate.get("type") != "explicit": + return False + if self.certificate.get("issuer", ("",))[0] not in ("sha256AndDigest", "sha384AndDigest"): + return False + if tbs.get("id", ("",))[0] != "name": + return False + if "encryptionKey" not in tbs: + return False + if "certIssuePermissions" not in tbs: + return False + return True + def set_issuer_as_self(self) -> Certificate: """ Set the issuer as self. @@ -695,6 +795,25 @@ def initialize_certificate( @staticmethod def verify_to_be_signed_certificate(to_be_signed_certificate: dict) -> bool: + # §6: id must be name or none + id_entry = to_be_signed_certificate.get("id") + if id_entry is None or id_entry[0] not in ("name", "none"): + return False + # §6: cracaId must be 0x000000 + if to_be_signed_certificate.get("cracaId") != b"\x00\x00\x00": + return False + # §6: crlSeries must be 0 + if to_be_signed_certificate.get("crlSeries") != 0: + return False + # §6: at least one of appPermissions or certIssuePermissions must be present + if "appPermissions" not in to_be_signed_certificate and "certIssuePermissions" not in to_be_signed_certificate: + return False + # §6: certRequestPermissions must be absent + if "certRequestPermissions" in to_be_signed_certificate: + return False + # §6: canRequestRollover must be absent + if "canRequestRollover" in to_be_signed_certificate: + return False try: SECURITY_CODER.encode_ToBeSignedCertificate( to_be_signed_certificate) diff --git a/src/flexstack/security/certificate_library.py b/src/flexstack/security/certificate_library.py index 7bf05ba..80665a4 100644 --- a/src/flexstack/security/certificate_library.py +++ b/src/flexstack/security/certificate_library.py @@ -242,3 +242,26 @@ def verify_sequence_of_certificates( certificates[:-1], backend ) return None + + def get_ca_certificate_by_hashedid3(self, hashedid3: bytes) -> Certificate | None: + """ + Look up an Authorization Authority or Root CA certificate by the last + 3 bytes of its HashedId8 (HashedId3). + + Parameters + ---------- + hashedid3 : bytes + The last 3 bytes of the certificate's HashedId8. + + Returns + ------- + Certificate | None + The matching certificate, or None if not found. + """ + for hashedid8, cert in self.known_authorization_authorities.items(): + if hashedid8[-3:] == hashedid3: + return cert + for hashedid8, cert in self.known_root_certificates.items(): + if hashedid8[-3:] == hashedid3: + return cert + return None diff --git a/src/flexstack/security/ecdsa_backend.py b/src/flexstack/security/ecdsa_backend.py index e8b4c74..3e5c441 100644 --- a/src/flexstack/security/ecdsa_backend.py +++ b/src/flexstack/security/ecdsa_backend.py @@ -208,3 +208,42 @@ def verify_with_pk(self, data: bytes, signature: dict, pk: dict) -> bool: return False else: raise ValueError("Public key format not supported") + + def export_signing_key(self, identifier: int) -> bytes: + """ + Export the PEM-encoded signing key for the given identifier. + + Parameters + ---------- + identifier : int + Identifier of the key pair to export. + + Returns + ------- + bytes + PEM-encoded private signing key. + """ + return self.keys[identifier].to_pem() + + def import_signing_key(self, key_pem: bytes) -> int: + """ + Import a PEM-encoded signing key and return its new identifier. + + The imported key is appended to the internal key store. Existing keys + are not modified. + + Parameters + ---------- + key_pem : bytes + PEM-encoded NIST P-256 private signing key. + + Returns + ------- + int + Identifier that can be used with :meth:`sign`, :meth:`get_public_key`, + and :meth:`export_signing_key`. + """ + key: ecdsa.keys.SigningKey = ecdsa.SigningKey.from_pem(key_pem) + identifier = len(self.keys) + self.keys[identifier] = key + return identifier diff --git a/src/flexstack/security/security_asn1.py b/src/flexstack/security/security_asn1.py index c894467..9238765 100644 --- a/src/flexstack/security/security_asn1.py +++ b/src/flexstack/security/security_asn1.py @@ -1,2759 +1,12 @@ # pylint: skip-file -SECURITY_ASN1_DESCRIPTIONS = """--***************************************************************************-- --- IEEE Std 1609.2: Base Data Types -- ---***************************************************************************-- - -/** - * @brief NOTE: Section references in this file are to clauses in IEEE Std - * 1609.2 unless indicated otherwise. Full forms of acronyms and - * abbreviations used in this file are specified in 3.2. - */ - -Ieee1609Dot2BaseTypes {iso(1) identified-organization(3) ieee(111) - standards-association-numbered-series-standards(2) wave-stds(1609) dot2(2) - base(1) base-types(2) major-version-2(2) minor-version-3(3)} - -DEFINITIONS AUTOMATIC TAGS ::= BEGIN - -EXPORTS ALL; - ---***************************************************************************-- --- Integer Types -- ---***************************************************************************-- - -/** - * @class Uint3 - * - * @brief This atomic type is used in the definition of other data structures. - * It is for non-negative integers up to 7, i.e., (hex)07. - */ - Uint3 ::= INTEGER (0..7) - -/** - * @class Uint8 - * - * @brief This atomic type is used in the definition of other data structures. - * It is for non-negative integers up to 255, i.e., (hex)ff. - */ - Uint8 ::= INTEGER (0..255) - -/** - * @class Uint16 - * - * @brief This atomic type is used in the definition of other data structures. - * It is for non-negative integers up to 65,535, i.e., (hex)ff ff. - */ - Uint16 ::= INTEGER (0..65535) - -/** - * @class Uint32 - * - * @brief This atomic type is used in the definition of other data structures. - * It is for non-negative integers up to 4,294,967,295, i.e., - * (hex)ff ff ff ff. - */ - Uint32 ::= INTEGER (0..4294967295) - -/** - * @class Uint64 - * - * @brief This atomic type is used in the definition of other data structures. - * It is for non-negative integers up to 18,446,744,073,709,551,615, i.e., - * (hex)ff ff ff ff ff ff ff ff. - */ - Uint64 ::= INTEGER (0..18446744073709551615) - -/** - * @class SequenceOfUint8 - * - * @brief This type is used for clarity of definitions. - */ - SequenceOfUint8 ::= SEQUENCE OF Uint8 - -/** - * @class SequenceOfUint16 - * - * @brief This type is used for clarity of definitions. - */ - SequenceOfUint16 ::= SEQUENCE OF Uint16 - - ---***************************************************************************-- --- OCTET STRING Types -- ---***************************************************************************-- - -/** - * @class Opaque - * - * @brief This is a synonym for ASN.1 OCTET STRING, and is used in the - * definition of other data structures. - */ - Opaque ::= OCTET STRING - -/** - * @class HashedId3 - * - * @brief This type contains the truncated hash of another data structure. - * The HashedId3 for a given data structure is calculated by calculating the - * hash of the encoded data structure and taking the low-order three bytes of - * the hash output. If the data structure is subject to canonicalization it - * is canonicalized before hashing. The low-order three bytes are the last - * three bytes of the hash when represented in network byte order. See - * Example below. - * - *

Example: Consider the SHA-256 hash of the empty string: - * - *
SHA-256("") = - * e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - * - *

The HashedId3 derived from this hash corresponds to the following: - * - *
HashedId3 = 52b855. - */ - HashedId3 ::= OCTET STRING (SIZE(3)) - -/** - * @class SequenceOfHashedId3 - * - * @brief This type is used for clarity of definitions. - */ - SequenceOfHashedId3 ::= SEQUENCE OF HashedId3 - -/** - * @class HashedId8 - * - * @brief This type contains the truncated hash of another data structure. - * The HashedId8 for a given data structure is calculated by calculating the - * hash of the encoded data structure and taking the low-order eight bytes of - * the hash output. If the data structure is subject to canonicalization it - * is canonicalized before hashing. The low-order eight bytes are the last - * eight bytes of the hash when represented in network byte order. See - * Example below. - * - *

The hash algorithm to be used to calculate a HashedId8 within a - * structure depends on the context. In this standard, for each structure - * that includes a HashedId8 field, the corresponding text indicates how the - * hash algorithm is determined. - * - *

Example: Consider the SHA-256 hash of the empty string: - * - *
SHA-256("") = - * e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - * - *

The HashedId8 derived from this hash corresponds to the following: - * - *
HashedId8 = a495991b7852b855. - */ - HashedId8 ::= OCTET STRING (SIZE(8)) - -/** - * @class HashedId10 - * - * @brief This type contains the truncated hash of another data structure. - * The HashedId10 for a given data structure is calculated by calculating the - * hash of the encoded data structure and taking the low-order ten bytes of - * the hash output. If the data structure is subject to canonicalization it - * is canonicalized before hashing. The low-order ten bytes are the last ten - * bytes of the hash when represented in network byte order. See Example below. - * - *

The hash algorithm to be used to calculate a HashedId10 within a - * structure depends on the context. In this standard, for each structure - * that includes a HashedId10 field, the corresponding text indicates how the - * hash algorithm is determined. - * - *

Example: Consider the SHA-256 hash of the empty string: - * - *
SHA-256("") = - * e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - * - *

The HashedId10 derived from this hash corresponds to the following: - * - *
HashedId10 = 934ca495991b7852b855. - */ - HashedId10 ::= OCTET STRING (SIZE(10)) - -/** - * @class HashedId32 - * - * @brief This type contains the truncated hash of another data structure. - * The HashedId32 for a given data structure is calculated by calculating the - * hash of the encoded data structure and taking the low-order thirty two - * bytes of the hash output. If the data structure is subject to - * canonicalization it is canonicalized before hashing. The low-order thirty - * two bytes are the last thirty two bytes of the hash when represented in - * network byte order. See Example below. - * - *

The hash algorithm to be used to calculate a HashedId32 within a - * structure depends on the context. In this standard, for each structure - * that includes a HashedId32 field, the corresponding text indicates how the - * hash algorithm is determined. - * - *

Example: Consider the SHA-256 hash of the empty string: - * - *
SHA-256("") = - * e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - * - *

The HashedId32 derived from this hash corresponds to the following: - * - *
HashedId32 = - * e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855. - */ - HashedId32 ::= OCTET STRING (SIZE(32)) - ---***************************************************************************-- --- Time Structures -- ---***************************************************************************-- - -/** - * @class Time32 - * - * @brief This type gives the number of (TAI) seconds since 00:00:00 UTC, 1 - * January, 2004. - */ - Time32 ::= Uint32 - -/** - * @class Time64 - * - * @brief This type gives the number of (TAI) microseconds since 00:00:00 - * UTC, 1 January, 2004. - */ - Time64 ::= Uint64 - -/** - * @class ValidityPeriod - * - * @brief This structure gives the validity period of a certificate. The - * start of the validity period is given by start and the end is given by - * start + duration. - * - * @param start contains the starting time of the validity period. - * - * @param duration contains the duration of the validity period. - */ - ValidityPeriod ::= SEQUENCE { - start Time32, - duration Duration - } - -/** - * @class Duration - * - * @brief This structure represents the duration of validity of a - * certificate. The Uint16 value is the duration, given in the units denoted - * by the indicated choice. A year is considered to be 31556952 seconds, - * which is the average number of seconds in a year; if it is desired to map - * years more closely to wall-clock days, this can be done using the hours - * choice for up to seven years and the sixtyHours choice for up to 448. In - * this structure: - * - * @param microseconds contains the duration in microseconds. - * - * @param milliseconds contains the duration in milliseconds. - * - * @param seconds contains the duration in seconds. - * - * @param minutes contains the duration in minutes. - * - * @param hours contains the duration in hours. - * - * @param sixtyHours contains the duration in sixty-hour periods. - * - * @param years contains the duration in years. - */ - Duration ::= CHOICE { - microseconds Uint16, - milliseconds Uint16, - seconds Uint16, - minutes Uint16, - hours Uint16, - sixtyHours Uint16, - years Uint16 - } - - ---***************************************************************************-- --- Location Structures -- ---***************************************************************************-- - -/** - * @class GeographicRegion - * - * @brief This structure represents a geographic region of a specified form. - * A certificate is not valid if any part of the region indicated in its - * scope field lies outside the region indicated in the scope of its issuer. - * - *

Critical information fields: - *
    - *
  • If present, this is a critical information field as defined in 5.2.6. - * An implementation that does not recognize the indicated CHOICE when - * verifying a signed SPDU shall indicate that the signed SPDU is invalid.
  • - * - *
  • If selected, rectangularRegion is a critical information field as - * defined in 5.2.6. An implementation that does not support the number of - * RectangularRegion in rectangularRegions when verifying a signed SPDU shall - * indicate that the signed SPDU is invalid. A compliant implementation shall - * support rectangularRegions fields containing at least eight entries.
  • - * - *
  • If selected, identifiedRegion is a critical information field as - * defined in 5.2.6. An implementation that does not support the number of - * IdentifiedRegion in identifiedRegion shall reject the signed SPDU as - * invalid. A compliant implementation shall support identifiedRegion fields - * containing at least eight entries.
  • - *
- * - * Parameters: - * - * @param circularRegion contains a single instance of the CircularRegion - * structure. - * - * @param rectangularRegion is an array of RectangularRegion structures - * containing at least one entry. This field is interpreted as a series of - * rectangles, which may overlap or be disjoint. The permitted region is any - * point within any of the rectangles. - * - * @param polygonalRegion contains a single instance of the PolygonalRegion - * structure. - * - * @param identifiedRegion is an array of IdentifiedRegion structures - * containing at least one entry. The permitted region is any point within - * any of the identified regions. - */ - GeographicRegion ::= CHOICE { - circularRegion CircularRegion, - rectangularRegion SequenceOfRectangularRegion, - polygonalRegion PolygonalRegion, - identifiedRegion SequenceOfIdentifiedRegion, - ... - } - -/** - * @class CircularRegion - * - * @brief This structure specifies a circle with its center at center, its - * radius given in meters, and located tangential to the reference ellipsoid. - * The indicated region is all the points on the surface of the reference - * ellipsoid whose distance to the center point over the reference ellipsoid - * is less than or equal to the radius. A point which contains an elevation - * component is considered to be within the circular region if its horizontal - * projection onto the reference ellipsoid lies within the region. - */ - CircularRegion ::= SEQUENCE { - center TwoDLocation, - radius Uint16 - } - -/** - * @class RectangularRegion - * - * @brief This structure specifies a rectangle formed by connecting in - * sequence: (northWest.latitude, northWest.longitude), (southEast.latitude, - * northWest.longitude), (southEast.latitude, southEast.longitude), and - * (northWest.latitude, southEast.longitude). The points are connected by - * lines of constant latitude or longitude. A point which contains an - * elevation component is considered to be within the rectangular region if - * its horizontal projection onto the reference ellipsoid lies within the - * region. A RectangularRegion is valid only if the northWest value is north - * and west of the southEast value, i.e., the two points cannot have equal - * latitude or equal longitude. - */ - RectangularRegion ::= SEQUENCE { - northWest TwoDLocation, - southEast TwoDLocation - } - -/** - * @class SequenceOfRectangularRegion - * - * @brief This type is used for clarity of definitions. - */ - SequenceOfRectangularRegion ::= SEQUENCE OF RectangularRegion - -/** - * @class PolygonalRegion - * - * @brief This structure defines a region using a series of distinct - * geographic points, defined on the surface of the reference ellipsoid. The - * region is specified by connecting the points in the order they appear, - * with each pair of points connected by the geodesic on the reference - * ellipsoid. The polygon is completed by connecting the final point to the - * first point. The allowed region is the interior of the polygon and its - * boundary. - * - *

A point which contains an elevation component is considered to be - * within the polygonal region if its horizontal projection onto the - * reference ellipsoid lies within the region. - * - *

A valid PolygonalRegion contains at least three points. In a valid - * PolygonalRegion, the implied lines that make up the sides of the polygon - * do not intersect. - * - *

Critical information fields: - *
    - *
  • If present, this is a critical information field as defined in 5.2.6. - * An implementation that does not support the number of TwoDLocation in the - * PolygonalRegion when verifying a signed SPDU shall indicate that the signed - * SPDU is invalid. A compliant implementation shall support PolygonalRegions - * containing at least eight TwoDLocation entries.
  • - *
- */ - PolygonalRegion ::= SEQUENCE SIZE (3..MAX) OF TwoDLocation - -/** - * @class TwoDLocation - * - * @brief This structure is used to define validity regions for use in - * certificates. The latitude and longitude fields contain the latitude and - * longitude as defined above. - * - *

NOTE: This data structure is consistent with the location encoding - * used in SAE J2735, except that values 900 000 001 for latitude (used to - * indicate that the latitude was not available) and 1 800 000 001 for - * longitude (used to indicate that the longitude was not available) are not - * valid. - */ - TwoDLocation ::= SEQUENCE { - latitude Latitude, - longitude Longitude - } - -/** - * @class IdentifiedRegion - * - * @brief This structure indicates the region of validity of a certificate - * using region identifiers. - * - *

Critical information fields: - *
    - *
  • If present, this is a critical information field as defined in 5.2.6. - * An implementation that does not recognize the indicated CHOICE when - * verifying a signed SPDU shall indicate that the signed SPDU is invalid.
  • - *
- */ - IdentifiedRegion ::= CHOICE { - countryOnly CountryOnly, - countryAndRegions CountryAndRegions, - countryAndSubregions CountryAndSubregions, - ... - } - -/** - * @class SequenceOfIdentifiedRegion - * - * @brief This type is used for clarity of definitions. - */ - SequenceOfIdentifiedRegion ::= SEQUENCE OF IdentifiedRegion - -/** - * @class CountryOnly - * - * @brief This is the integer representation of the country or area - * identifier as defined by the United Nations Statistics Division in October - * 2013 (see normative references in Clause 2). - */ - CountryOnly ::= Uint16 - -/** - * @class CountryAndRegions - * - * @brief In this structure: - * - * @param countryOnly is a CountryOnly as defined above. - * - * @param region identifies one or more regions within the country. If - * countryOnly indicates the United States of America, the values in this - * field identify the state or statistically equivalent entity using the - * integer version of the 2010 FIPS codes as provided by the U.S. Census - * Bureau (see normative references in Clause 2). For other values of - * countryOnly, the meaning of region is not defined in this version of this - * standard. - */ - CountryAndRegions ::= SEQUENCE { - countryOnly CountryOnly, - regions SequenceOfUint8 - } - -/** - * @class CountryAndSubregions - * - * @brief In this structure: - *

Critical information fields: - *
    - *
  • If present, this is a critical information field as defined in 5.2.6. - * An implementation that does not recognize RegionAndSubregions or - * CountryAndSubregions values when verifying a signed SPDU shall indicate - * that the signed SPDU is invalid. A compliant implementation shall support - * CountryAndSubregions containing at least eight RegionAndSubregions - * entries.
  • - *
- * - * Parameters: - * - * @param country is a CountryOnly as defined above. - * - * @param regionAndSubregions identifies one or more subregions within - * country. If country indicates the United States of America, the values in - * this field identify the county or county equivalent entity using the - * integer version of the 2010 FIPS codes as provided by the U.S. Census - * Bureau (see normative references in Clause 2). For other values of - * country, the meaning of regionAndSubregions is not defined in this version - * of this standard. - */ - CountryAndSubregions ::= SEQUENCE { - country CountryOnly, - regionAndSubregions SequenceOfRegionAndSubregions - } - -/** - * @class RegionAndSubregions - * - * @brief In this structure: - *

Critical information fields: - *
    - *
  • RegionAndSubregions is a critical information field as defined in - * 5.2.5. An implementation that does not detect or recognize the the region - * or subregions values when verifying a signed SPDU shall indicate that the - * signed SPDU is invalid.
  • - *
- * - * Parameters: - * - * @param region identifies a region within a country as specified under - * CountryAndRegions. - * - * @param subregions identifies one or more subregions as specified under - * CountryAndSubregions. - */ - RegionAndSubregions ::= SEQUENCE { - region Uint8, - subregions SequenceOfUint16 - } - -/** - * @class SequenceOfRegionAndSubregions - * - * @brief This type is used for clarity of definitions. - */ - SequenceOfRegionAndSubregions ::= SEQUENCE OF RegionAndSubregions - -/** - * @class ThreeDLocation - * - * @brief This structure contains an estimate of 3D location. The details of - * the structure are given in the definitions of the individual fields below. - * - *

NOTE: The units used in this data structure are consistent with the - * location data structures used in SAE J2735, though the encoding is - * incompatible. - */ - ThreeDLocation ::= SEQUENCE { - latitude Latitude, - longitude Longitude, - elevation Elevation - } - -/** - * @class Latitude - * - * @brief This type contains an INTEGER encoding an estimate of the latitude - * with precision 1/10th microdegree relative to the World Geodetic System - * (WGS)-84 datum as defined in NIMA Technical Report TR8350.2. - */ - Latitude ::= NinetyDegreeInt - -/** - * @class Longitude - * - * @brief This type contains an INTEGER encoding an estimate of the longitude - * with precision 1/10th microdegree relative to the World Geodetic System - * (WGS)-84 datum as defined in NIMA Technical Report TR8350.2. - */ - Longitude ::= OneEightyDegreeInt - -/** - * @class Elevation - * - * @brief This structure contains an estimate of the geodetic altitude above - * or below the WGS84 ellipsoid. The 16-bit value is interpreted as an - * integer number of decimeters representing the height above a minimum - * height of −409.5 m, with the maximum height being 6143.9 m. - */ - Elevation ::= Uint16 - -/** - * @class NinetyDegreeInt - * - * @brief The integer in the latitude field is no more than 900,000,000 and - * no less than −900,000,000, except that the value 900,000,001 is used to - * indicate the latitude was not available to the sender. - */ - NinetyDegreeInt ::= INTEGER { - min (-900000000), - max (900000000), - unknown (900000001) - } (-900000000..900000001) - -/** - * @class KnownLatitude - * - * @brief The known latitudes are from -900,000,000 to +900,000,000 in 0.1 - * microdegree intervals. - */ - KnownLatitude ::= NinetyDegreeInt (min..max) - -/** - * @class UnknownLatitude - * - * @brief The value 900,000,001 indicates that the latitude was not - * available to the sender. - */ - UnknownLatitude ::= NinetyDegreeInt (unknown) - -/** - * @class OneEightyDegreeInt - * - * @brief The integer in the longitude field is no more than 1,800,000,000 - * and no less than −1,799,999,999, except that the value 1,800,000,001 is - * used to indicate that the longitude was not available to the sender. - */ - OneEightyDegreeInt ::= INTEGER { - min (-1799999999), - max (1800000000), - unknown (1800000001) - } (-1799999999..1800000001) - -/** - * @class KnownLongitude - * - * @brief The known longitudes are from -1,799,999,999 to +1,800,000,000 in - * 0.1 microdegree intervals. - */ - KnownLongitude ::= OneEightyDegreeInt (min..max) - -/** - * @class UnknownLongitude - * - * @brief The value 1,800,000,001 indicates that the longitude was not - * available to the sender. - */ - UnknownLongitude ::= OneEightyDegreeInt (unknown) - - ---***************************************************************************-- --- Crypto Structures -- ---***************************************************************************-- - -/** - * @class Signature - * - * @brief This structure represents a signature for a supported public key - * algorithm. It may be contained within SignedData or Certificate. - * - *

Critical information fields: If present, this is a critical - * information field as defined in 5.2.5. An implementation that does not - * recognize the indicated CHOICE for this type when verifying a signed SPDU - * shall indicate that the signed SPDU is invalid. - */ - Signature ::= CHOICE { - ecdsaNistP256Signature EcdsaP256Signature, - ecdsaBrainpoolP256r1Signature EcdsaP256Signature, - ..., - ecdsaBrainpoolP384r1Signature EcdsaP384Signature, - ecdsaNistP384Signature EcdsaP384Signature - } - -/** - * @class EcdsaP256Signature - * - * @brief This structure represents an ECDSA signature. The signature is - * generated as specified in 5.3.1. - * - *

If the signature process followed the specification of FIPS 186-4 - * and output the integer r, r is represented as an EccP256CurvePoint - * indicating the selection x-only. - * - *

If the signature process followed the specification of SEC 1 and - * output the elliptic curve point R to allow for fast verification, R is - * represented as an EccP256CurvePoint indicating the choice compressed-y-0, - * compressed-y-1, or uncompressed at the sender’s discretion. - * - *

Encoding considerations: If this structure is encoded for hashing, - * the EccP256CurvePoint in rSig shall be taken to be of form x-only. - * - *

NOTE: When the signature is of form x-only, the x-value in rSig is - * an integer mod n, the order of the group; when the signature is of form - * compressed-y-*, the x-value in rSig is an integer mod p, the underlying - * prime defining the finite field. In principle this means that to convert a - * signature from form compressed-y-* to form x-only, the x-value should be - * checked to see if it lies between n and p and reduced mod n if so. In - * practice this check is unnecessary: Haase’s Theorem states that difference - * between n and p is always less than 2*square-root(p), and so the chance - * that an integer lies between n and p, for a 256-bit curve, is bounded - * above by approximately square-root(p)/p or 2^(−128). For the 256-bit - * curves in this standard, the exact values of n and p in hexadecimal are: - * - *

NISTp256: - *
    - *
  • p = FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF - *
  • - *
  • n = FFFFFFFF00000000FFFFFFFFFFFFFFFFBCE6FAADA7179E84F3B9CAC2FC632551 - *
  • - *
- * - * Brainpoolp256: - *
    - *
  • p = A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377 - *
  • - *
  • n = A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7 - *
  • - *
- */ - EcdsaP256Signature ::= SEQUENCE { - rSig EccP256CurvePoint, - sSig OCTET STRING (SIZE (32)) - } - -/** - * @class EcdsaP384Signature - * - * @brief This structure represents an ECDSA signature. The signature is - * generated as specified in 5.3.1. - * - *

If the signature process followed the specification of FIPS 186-4 - * and output the integer r, r is represented as an EccP384CurvePoint - * indicating the selection x-only. - * - *

If the signature process followed the specification of SEC 1 and - * output the elliptic curve point R to allow for fast verification, R is - * represented as an EccP384CurvePoint indicating the choice compressed-y-0, - * compressed-y-1, or uncompressed at the sender’s discretion. - * - *

Encoding considerations: If this structure is encoded for hashing, - * the EccP256CurvePoint in rSig shall be taken to be of form x-only. - * - *

NOTE: When the signature is of form x-only, the x-value in rSig is - * an integer mod n, the order of the group; when the signature is of form - * compressed-y-*, the x-value in rSig is an integer mod p, the underlying - * prime defining the finite field. In principle this means that to convert a - * signature from form compressed-y-* to form x-only, the x-value should be - * checked to see if it lies between n and p and reduced mod n if so. In - * practice this check is unnecessary: Haase’s Theorem states that difference - * between n and p is always less than 2*square-root(p), and so the chance - * that an integer lies between n and p, for a 384-bit curve, is bounded - * above by approximately square-root(p)/p or 2^(−192). For the 384-bit curve - * in this standard, the exact values of n and p in hexadecimal are: - *
    - *
  • p = 8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123 - * ACD3A729901D1A71874700133107EC53
  • - * - *
  • n = 8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7 - * CF3AB6AF6B7FC3103B883202E9046565
  • - *
- */ - EcdsaP384Signature ::= SEQUENCE { - rSig EccP384CurvePoint, - sSig OCTET STRING (SIZE (48)) - } - -/** - * @class EccP256CurvePoint - * - * @brief This structure specifies a point on an elliptic curve in - * Weierstrass form defined over a 256-bit prime number. This encompasses - * both NIST p256 as defined in FIPS 186-4 and Brainpool p256r1 as defined in - * RFC 5639. The fields in this structure are OCTET STRINGS produced with the - * elliptic curve point encoding and decoding methods defined in subclause - * 5.5.6 of IEEE Std 1363-2000. The x-coordinate is encoded as an unsigned - * integer of length 32 octets in network byte order for all values of the - * CHOICE; the encoding of the y-coordinate y depends on whether the point is - * x-only, compressed, or uncompressed. If the point is x-only, y is omitted. - * If the point is compressed, the value of type depends on the least - * significant bit of y: if the least significant bit of y is 0, type takes - * the value compressed-y-0, and if the least significant bit of y is 1, type - * takes the value compressed-y-1. If the point is uncompressed, y is encoded - * explicitly as an unsigned integer of length 32 octets in network byte order. - */ - EccP256CurvePoint ::= CHOICE { - x-only OCTET STRING (SIZE (32)), - fill NULL, - compressed-y-0 OCTET STRING (SIZE (32)), - compressed-y-1 OCTET STRING (SIZE (32)), - uncompressedP256 SEQUENCE { - x OCTET STRING (SIZE (32)), - y OCTET STRING (SIZE (32)) - } - } - -/** - * @class EccP384CurvePoint - * - * @brief This structure specifies a point on an elliptic curve in - * Weierstrass form defined over a 384-bit prime number. The only supported - * such curve in this standard is Brainpool p384r1 as defined in RFC 5639. - * The fields in this structure are OCTET STRINGS produced with the elliptic - * curve point encoding and decoding methods defined in subclause 5.5.6 of - * IEEE Std 1363-2000. The x-coordinate is encoded as an unsigned integer of - * length 48 octets in network byte order for all values of the CHOICE; the - * encoding of the y-coordinate y depends on whether the point is x-only, - * compressed, or uncompressed. If the point is x-only, y is omitted. If the - * point is compressed, the value of type depends on the least significant - * bit of y: if the least significant bit of y is 0, type takes the value - * compressed-y-0, and if the least significant bit of y is 1, type takes the - * value compressed-y-1. If the point is uncompressed, y is encoded - * explicitly as an unsigned integer of length 48 octets in network byte order. - */ - EccP384CurvePoint ::= CHOICE { - x-only OCTET STRING (SIZE (48)), - fill NULL, - compressed-y-0 OCTET STRING (SIZE (48)), - compressed-y-1 OCTET STRING (SIZE (48)), - uncompressedP384 SEQUENCE { - x OCTET STRING (SIZE (48)), - y OCTET STRING (SIZE (48)) - } - } - -/** - * @class SymmAlgorithm - * - * @brief This enumerated value indicates supported symmetric algorithms. The - * only symmetric algorithm supported in this version of this standard is - * AES-CCM as specified in 5.3.7. - */ - SymmAlgorithm ::= ENUMERATED { - aes128Ccm, - ... - } - -/** - * @class HashAlgorithm - * - * @brief This structure identifies a hash algorithm. The value is sha256, - * indicates SHA-256 as specified in 5.3.3. The value sha384 indicates - * SHA-384 as specified in 5.3.3. - * - *

Critical information fields: This is a critical information - * field as defined in 5.2.6. An implementation that does not recognize the - * enumerated value of this type in a signed SPDU when verifying a signed - * SPDU shall indicate that the signed SPDU is invalid. - */ - HashAlgorithm ::= ENUMERATED { - sha256, - ..., - sha384 - } - -/** - * @class EciesP256EncryptedKey - * - * @brief This data structure is used to transfer a 16-byte symmetric key - * encrypted using ECIES as specified in IEEE Std 1363a-2004. - * - *

Encryption and decryption are carried out as specified in 5.3.4. - * - *

Parameters: - * - * @param v is the sender’s ephemeral public key, which is the output V from - * encryption as specified in 5.3.4. - * - * @param c is the encrypted symmetric key, which is the output C from - * encryption as specified in 5.3.4. The algorithm for the symmetric key is - * identified by the CHOICE indicated in the following SymmetricCiphertext. - * - * @param t is the authentication tag, which is the output tag from - * encryption as specified in 5.3.4. - */ - EciesP256EncryptedKey ::= SEQUENCE { - v EccP256CurvePoint, - c OCTET STRING (SIZE (16)), - t OCTET STRING (SIZE (16)) - } - -/** - * @class EncryptionKey - * - * @brief This structure contains an encryption key, which may be a public or - * a symmetric key. - */ - EncryptionKey ::= CHOICE { - public PublicEncryptionKey, - symmetric SymmetricEncryptionKey - } - -/** - * @class PublicEncryptionKey - * - * @brief This structure specifies a public encryption key and the associated - * symmetric algorithm which is used for bulk data encryption when encrypting - * for that public key. - */ - PublicEncryptionKey ::= SEQUENCE { - supportedSymmAlg SymmAlgorithm, - publicKey BasePublicEncryptionKey - } - -/** - * @class BasePublicEncryptionKey - * - * @brief This structure specifies the bytes of a public encryption key for a - * particular algorithm. The only algorithm supported is ECIES over either - * the NIST P256 or the Brainpool P256r1 curve as specified in 5.3.4. - */ - BasePublicEncryptionKey ::= CHOICE { - eciesNistP256 EccP256CurvePoint, - eciesBrainpoolP256r1 EccP256CurvePoint, - ... - } - -/** - * @class PublicVerificationKey - * - * @brief This structure represents a public key and states with what - * algorithm the public key is to be used. Cryptographic mechanisms are - * defined in 5.3. - * - *

An EccP256CurvePoint or EccP384CurvePoint within a - * PublicVerificationKey structure is invalid if it indicates the choice - * x-only. - * - *

Critical information fields: If present, this is a critical - * information field as defined in 5.2.6. An implementation that does not - * recognize the indicated CHOICE when verifying a signed SPDU shall indicate - * that the signed SPDU is invalid. - */ - PublicVerificationKey ::= CHOICE { - ecdsaNistP256 EccP256CurvePoint, - ecdsaBrainpoolP256r1 EccP256CurvePoint, - ..., - ecdsaBrainpoolP384r1 EccP384CurvePoint, - ecdsaNistP384 EccP384CurvePoint - } - -/** - * @class SymmetricEncryptionKey - * - * @brief This structure provides the key bytes for use with an identified - * symmetric algorithm. The only supported symmetric algorithm is AES-128 in - * CCM mode as specified in 5.3.7. - */ - SymmetricEncryptionKey ::= CHOICE { - aes128Ccm OCTET STRING(SIZE(16)), - ... - } - - ---***************************************************************************-- --- PSID / ITS-AID -- ---***************************************************************************-- - -/** - * @class PsidSsp - * - * @brief This structure represents the permissions that the certificate - * holder has with respect to data for a single application area, identified - * by a Psid. If the ServiceSpecificPermissions field is omitted, it - * indicates that the certificate holder has the default permissions - * associated with that Psid. - * - *

Consistency with signed SPDU. As noted in 5.1.1, - * consistency between the SSP and the signed SPDU is defined by rules - * specific to the given PSID and is out of scope for this standard. - * - *

Consistency with issuing certificate. - * - *

If a certificate has an appPermissions entry A for which the ssp - * field is omitted, A is consistent with the issuing certificate if the - * issuing certificate contains a PsidSspRange P for which the following holds: - *
    - *
  • The psid field in P is equal to the psid field in A and one of the - * following is true:
  • - *
      - *
    • The sspRange field in P indicates all.
    • - * - *
    • The sspRange field in P indicates opaque and one of the entries in - * opaque is an OCTET STRING of length 0.
    • - *
    - *
- * - * For consistency rules for other forms of the ssp field, see the - * following subclauses. - */ - PsidSsp ::= SEQUENCE { - psid Psid, - ssp ServiceSpecificPermissions OPTIONAL - } - -/** - * @class SequenceOfPsidSsp - * - * @brief This type is used for clarity of definitions. - */ - SequenceOfPsidSsp ::= SEQUENCE OF PsidSsp - -/** - * @class Psid - * - * @brief This type represents the PSID defined in IEEE Std 1609.12. - */ - Psid ::= INTEGER (0..MAX) - -/** - * @class SequenceOfPsid - * - * @brief This type is used for clarity of definitions. - */ - SequenceOfPsid ::= SEQUENCE OF Psid - -/** - * @class ServiceSpecificPermissions - * - * @brief This structure represents the Service Specific Permissions (SSP) - * relevant to a given entry in a PsidSsp. The meaning of the SSP is specific - * to the associated Psid. SSPs may be PSID-specific octet strings or - * bitmap-based. See Annex C for further discussion of how application - * specifiers may choose which SSP form to use. - * - *

Consistency with issuing certificate. - * - *

If a certificate has an appPermissions entry A for which the ssp - * field is opaque, A is consistent with the issuing certificate if the - * issuing certificate contains one of the following: - *
    - *
  • (OPTION 1) A SubjectPermissions field indicating the choice all and - * no PsidSspRange field containing the psid field in A;
  • - * - *
  • (OPTION 2) A PsidSspRange P for which the following holds:
  • - *
      - *
    • The psid field in P is equal to the psid field in A and one of the - * following is true:
    • - *
        - *
      • The sspRange field in P indicates all.
      • - * - *
      • The sspRange field in P indicates opaque and one of the entries in - * the opaque field in P is an OCTET STRING identical to the opaque field in - * A.
      • - *
      - *
    - *
- * - * For consistency rules for other types of ServiceSpecificPermissions, - * see the following subclauses. - */ - ServiceSpecificPermissions ::= CHOICE { - opaque OCTET STRING (SIZE(0..MAX)), - ..., - bitmapSsp BitmapSsp - } - -/** - * @class BitmapSsp - * - * @brief This structure represents a bitmap representation of a SSP. The - * mapping of the bits of the bitmap to constraints on the signed SPDU is - * PSID-specific. - * - *

Consistency with issuing certificate. - * - *

If a certificate has an appPermissions entry A for which the ssp - * field is bitmapSsp, A is consistent with the issuing certificate if the - * issuing certificate contains one of the following: - *
    - *
  • (OPTION 1) A SubjectPermissions field indicating the choice all and - * no PsidSspRange field containing the psid field in A;
  • - * - *
  • (OPTION 2) A PsidSspRange P for which the following holds:
  • - *
      - *
    • The psid field in P is equal to the psid field in A and one of the - * following is true:
    • - *
        - *
      • EITHER The sspRange field in P indicates all
      • - * - *
      • OR The sspRange field in P indicates bitmapSspRange and for every - * bit set to 1 in the sspBitmask in P, the bit in the identical position in - * the sspValue in A is set equal to the bit in that position in the - * sspValue in P.
      • - *
      - *
    - *
- * - * NOTE: A BitmapSsp B is consistent with a BitmapSspRange R if for every - * bit set to 1 in the sspBitmask in R, the bit in the identical position in - * B is set equal to the bit in that position in the sspValue in R. For each - * bit set to 0 in the sspBitmask in R, the corresponding bit in the - * identical position in B may be freely set to 0 or 1, i.e., if a bit is - * set to 0 in the sspBitmask in R, the value of corresponding bit in the - * identical position in B has no bearing on whether B and R are consistent. - */ - BitmapSsp ::= OCTET STRING (SIZE(0..31)) - -/** - * @class PsidSspRange - * - * @brief This structure represents the certificate issuing or requesting - * permissions of the certificate holder with respect to one particular set - * of application permissions. - * - * @param psid identifies the application area. - * - * @param sspRange identifies the SSPs associated with that PSID for which - * the holder may issue or request certificates. If sspRange is omitted, the - * holder may issue or request certificates for any SSP for that PSID. - */ - PsidSspRange ::= SEQUENCE { - psid Psid, - sspRange SspRange OPTIONAL - } - -/** - * @class SequenceOfPsidSspRange - * - * @brief This type is used for clarity of definitions. - */ - SequenceOfPsidSspRange ::= SEQUENCE OF PsidSspRange - -/** - * @class SspRange - * - * @brief This structure identifies the SSPs associated with a PSID for - * which the holder may issue or request certificates. - * - *

Consistency with issuing certificate. - * - *

If a certificate has a PsidSspRange A for which the ssp field is - * opaque, A is consistent with the issuing certificate if the issuing - * certificate contains one of the following: - *
    - *
  • (OPTION 1) A SubjectPermissions field indicating the choice all and - * no PsidSspRange field containing the psid field in A;
  • - * - *
  • (OPTION 2) a PsidSspRange P for which the following holds:
  • - *
      - *
    • The psid field in P is equal to the psid field in A and one of the - * following is true:
    • - *
        - *
      • The sspRange field in P indicates all.
      • - * - *
      • The sspRange field in P indicates opaque, and the sspRange field in - * A indicates opaque, and every OCTET STRING within the opaque in A is a - * duplicate of an OCTET STRING within the opaque in P.
      • - *
      - *
    - *
- * - * If a certificate has a PsidSspRange A for which the ssp field is all, - * A is consistent with the issuing certificate if the issuing certificate - * contains a PsidSspRange P for which the following holds: - *
    - *
  • (OPTION 1) A SubjectPermissions field indicating the choice all and - * no PsidSspRange field containing the psid field in A;
  • - * - *
  • (OPTION 2) A PsidSspRange P for which the psid field in P is equal to - * the psid field in A and the sspRange field in P indicates all.
  • - *
- * - * For consistency rules for other types of SspRange, see the following - * subclauses. - * - *

NOTE: The choice "all" may also be indicated by omitting the - * SspRange in the enclosing PsidSspRange structure. Omitting the SspRange is - * preferred to explicitly indicating "all". - */ - SspRange ::= CHOICE { - opaque SequenceOfOctetString, - all NULL, - ... , - bitmapSspRange BitmapSspRange - } - -/** - * @class BitmapSspRange - * - * @brief This structure represents a bitmap representation of a SSP. The - * sspValue indicates permissions. The sspBitmask contains an octet string - * used to permit or constrain sspValue fields in issued certificates. The - * sspValue and sspBitmask fields shall be of the same length. - * - *

Consistency with issuing certificate. - * - *

If a certificate has an PsidSspRange value P for which the - * sspRange field is bitmapSspRange, P is consistent with the issuing - * certificate if the issuing certificate contains one of the following: - *
    - *
  • (OPTION 1) A SubjectPermissions field indicating the choice all and - * no PsidSspRange field containing the psid field in P;
  • - * - *
  • (OPTION 2) A PsidSspRange R for which the following holds:
  • - *
      - *
    • The psid field in R is equal to the psid field in P and one of the - * following is true:
    • - *
        - *
      • EITHER The sspRange field in R indicates all
      • - * - *
      • OR The sspRange field in R indicates bitmapSspRange and for every - * bit set to 1 in the sspBitmask in R:
      • - *
          - *
        • The bit in the identical position in the sspBitmask in P is set - * equal to 1, AND
        • - * - *
        • The bit in the identical position in the sspValue in P is set equal - * to the bit in that position in the sspValue in R.
        • - *
        - *
      - *
    - *
- * - *
Reference ETSI TS 103 097 [B7] for more information on bitmask SSPs. - */ - BitmapSspRange ::= SEQUENCE { - sspValue OCTET STRING (SIZE(1..32)), - sspBitmask OCTET STRING (SIZE(1..32)) - } - -/** - * @class SequenceOfOctetString - * - * @brief This type is used for clarity of definitions. - */ - SequenceOfOctetString ::= - SEQUENCE (SIZE (0..MAX)) OF OCTET STRING (SIZE(0..MAX)) - - ---***************************************************************************-- --- Certificate Components -- ---***************************************************************************-- - -/** - * @class SubjectAssurance - * - * @brief This field contains the certificate holder’s assurance level, which - * indicates the security of both the platform and storage of secret keys as - * well as the confidence in this assessment. - * - *

This field is encoded as defined in Table 1, where "A" denotes bit - * fields specifying an assurance level, "R" reserved bit fields, and "C" bit - * fields specifying the confidence. - * - *

Table 1: Bitwise encoding of subject assurance - * - * - * - * - * - * - * - * - * - * - *
Bit number 7 6 5 43 2 1 0
Interpretation A A A RR R C C
- * - * In Table 1, bit number 0 denotes the least significant bit. Bit 7 - * to bit 5 denote the device's assurance levels, bit 4 to bit 2 are reserved - * for future use, and bit 1 and bit 0 denote the confidence. - * - *

The specification of these assurance levels as well as the - * encoding of the confidence levels is outside the scope of the present - * document. It can be assumed that a higher assurance value indicates that - * the holder is more trusted than the holder of a certificate with lower - * assurance value and the same confidence value. - * - *

NOTE: This field was originally specified in ETSI TS 103 097 [B7] - * and future uses of this field are anticipated to be consistent with future - * versions of that document. - */ - SubjectAssurance ::= OCTET STRING (SIZE(1)) - -/** - * @class CrlSeries - * - * @brief This integer identifies a series of CRLs issued under the authority - * of a particular CRACA. - */ - CrlSeries ::= Uint16 - - ---***************************************************************************-- --- Pseudonym Linkage -- ---***************************************************************************-- - -/** - * @class IValue - * - * @brief This atomic type is used in the definition of other data structures. - */ - IValue ::= Uint16 - -/** - * @class Hostname - * - * @brief This is a UTF-8 string as defined in IETF RFC 3629. The contents - * are determined by policy. - */ - Hostname ::= UTF8String (SIZE(0..255)) - -/** - * @class LinkageValue - * - * @brief This is the individual linkage value. See 5.1.3 and 7.3 for details - * of use. - */ - LinkageValue ::= OCTET STRING (SIZE(9)) - -/** - * @class GroupLinkageValue - * - * @brief This is the group linkage value. See 5.1.3 and 7.3 for details of - * use. - */ - GroupLinkageValue ::= SEQUENCE { - jValue OCTET STRING (SIZE(4)), - value OCTET STRING (SIZE(9)) - } - -/** - * @class LaId - * - * @brief This structure contains a LA Identifier for use in the algorithms - * specified in 5.1.3.4. - */ - LaId ::= OCTET STRING (SIZE(2)) - -/** - * @class LinkageSeed - * - * @brief This structure contains a linkage seed value for use in the - * algorithms specified in 5.1.3.4. - */ - LinkageSeed ::= OCTET STRING (SIZE(16)) - -END - ---***************************************************************************-- --- IEEE Std 1609.2: Data Types -- ---***************************************************************************-- - -/** - * @brief NOTE: Section references in this file are to clauses in IEEE Std - * 1609.2 unless indicated otherwise. Full forms of acronyms and - * abbreviations used in this file are specified in 3.2. - */ - -Ieee1609Dot2 {iso(1) identified-organization(3) ieee(111) - standards-association-numbered-series-standards(2) wave-stds(1609) - dot2(2) base (1) schema (1) major-version-2(2) minor-version-4(4)} - -DEFINITIONS AUTOMATIC TAGS ::= BEGIN - -EXPORTS ALL; - -IMPORTS - CrlSeries, - EccP256CurvePoint, - EciesP256EncryptedKey, - EncryptionKey, - GeographicRegion, - GroupLinkageValue, - HashAlgorithm, - HashedId3, - HashedId8, - Hostname, - IValue, - LinkageValue, - Opaque, - Psid, - PsidSsp, - PsidSspRange, - PublicEncryptionKey, - PublicVerificationKey, - SequenceOfHashedId3, - SequenceOfPsidSsp, - SequenceOfPsidSspRange, - ServiceSpecificPermissions, - Signature, - SubjectAssurance, - SymmetricEncryptionKey, - ThreeDLocation, - Time64, - Uint3, - Uint8, - Uint16, - Uint32, - ValidityPeriod -FROM Ieee1609Dot2BaseTypes {iso(1) identified-organization(3) ieee(111) - standards-association-numbered-series-standards(2) wave-stds(1609) dot2(2) - base(1) base-types(2) major-version-2(2) minor-version-3(3)} -/* WITH Successors */ - -EtsiOriginatingHeaderInfoExtension -FROM EtsiTs103097ExtensionModule {itu-t(0) identified-organization(4) - etsi(0) itsDomain(5) wg5(5) secHeaders(103097) extension(2) - version-1(1) minor-version-1(1)} -/* WITH Successors */ -; - ---***************************************************************************-- --- Secured Data -- ---***************************************************************************-- - -/** - * @class Ieee1609Dot2Data - * - * @brief This data type is used to contain the other data types in this - * clause. The fields in the Ieee1609Dot2Data have the following meanings: - * - * @param protocolVersion contains the current version of the protocol. The - * version specified in this document is version 3, represented by the - * integer 3. There are no major or minor version numbers. - * - * @param content contains the content in the form of an Ieee1609Dot2Content. - */ - Ieee1609Dot2Data ::= SEQUENCE { - protocolVersion Uint8(3), - content Ieee1609Dot2Content - } - -/** - * @class Ieee1609Dot2Content - * - * @brief In this structure: - * - * @param unsecuredData indicates that the content is an OCTET STRING to be - * consumed outside the SDS. - * - * @param signedData indicates that the content has been signed according to - * this standard. - * - * @param encryptedData indicates that the content has been encrypted - * according to this standard. - * - * @param signedCertificateRequest indicates that the content is a - * certificate request. Further specification of certificate requests is not - * provided in this version of this standard. - */ - Ieee1609Dot2Content ::= CHOICE { - unsecuredData Opaque, - signedData SignedData, - encryptedData EncryptedData, - signedCertificateRequest Opaque, - ..., - signedX509CertificateRequest Opaque - } - -/** - * @class SignedData - * - * @brief In this structure: - * - * @param hashId indicates the hash algorithm to be used to generate the hash - * of the message for signing and verification. - * - * @param tbsData contains the data that is hashed as input to the signature. - * - * @param signer determines the keying material and hash algorithm used to - * sign the data. - * - * @param signature contains the digital signature itself, calculated as - * specified in 5.3.1. - *
    - *
  • If signer indicates the choice self, then the signature calculation - * is parameterized as follows:
  • - *
      - *
    • Data input is equal to the COER encoding of the tbsData field - * canonicalized according to the encoding considerations given in 6.3.6.
    • - * - *
    • Verification type is equal to self.
    • - * - *
    • Signer identifier input is equal to the empty string.
    • - *
    - * - *
  • If signer indicates certificate or digest, then the signature - * calculation is parameterized as follows:
  • - *
      - *
    • Data input is equal to the COER encoding of the tbsData field - * canonicalized according to the encoding considerations given in 6.3.6.
    • - * - *
    • Verification type is equal to certificate.
    • - * - *
    • Signer identifier input equal to the COER-encoding of the - * Certificate that is to be used to verify the SPDU, canonicalized according - * to the encoding considerations given in 6.4.3.
    • - *
    - *
- */ - SignedData ::= SEQUENCE { - hashId HashAlgorithm, - tbsData ToBeSignedData, - signer SignerIdentifier, - signature Signature - } - -/** - * @class ToBeSignedData - * - * @brief This structure contains the data to be hashed when generating or - * verifying a signature. See 6.3.4 for the specification of the input to the - * hash. - * - *

Encoding considerations: For encoding considerations - * associated with the headerInfo field, see 6.3.9. - * - *

Parameters: - * - * @param payload contains data that is provided by the entity that invokes - * the SDS. - * - * @param headerInfo contains additional data that is inserted by the SDS. - */ - ToBeSignedData ::= SEQUENCE { - payload SignedDataPayload, - headerInfo HeaderInfo - } - -/** - * @class SignedDataPayload - * - * @brief This structure contains the data payload of a ToBeSignedData. This - * structure contains at least one of data and extDataHash, and may contain - * both. - * - * @param data contains data that is explicitly transported within the - * structure. - * - * @param extDataHash contains the hash of data that is not explicitly - * transported within the structure, and which the creator of the structure - * wishes to cryptographically bind to the signature. For example, if a - * creator wanted to indicate that some large message was still valid, they - * could use the extDataHash field to send a Signed¬Data containing the hash - * of that large message without having to resend the message itself. Whether - * or not extDataHash is used, and how it is used, is SDEE-specific. - */ - SignedDataPayload ::= SEQUENCE { - data Ieee1609Dot2Data OPTIONAL, - extDataHash HashedData OPTIONAL, - ... - } (WITH COMPONENTS {..., data PRESENT} | - WITH COMPONENTS {..., extDataHash PRESENT}) - -/** - * @class HashedData - * - * @brief This structure contains the hash of some data with a specified hash - * algorithm. The hash algorithms supported in this version of this - * standard are SHA-256 (in the root) and SHA-384 (in the first extension). - * The reserved extension is for future use. - * - *

Critical information fields: If present, this is a critical - * information field as defined in 5.2.6. An implementation that does not - * recognize the indicated CHOICE for this type when verifying a signed SPDU - * shall indicate that the signed SPDU is invalid. - */ - HashedData::= CHOICE { - sha256HashedData OCTET STRING (SIZE(32)), - ..., - sha384HashedData OCTET STRING (SIZE(48)), - reserved OCTET STRING (SIZE(32)) - } - -/** - * @class HeaderInfo - * - * @brief This structure contains information that is used to establish - * validity by the criteria of 5.2. - * - *

Encoding considerations: When the structure is encoded in - * order to be digested to generate or check a signature, if encryptionKey is - * present, and indicates the choice public, and contains a - * BasePublicEncryptionKey that is an elliptic curve point (i.e., of - * typeEccP256CurvePoint or EccP384CurvePoint), then the elliptic curve point - * is encoded in compressed form, i.e., such that the choice indicated within - * the Ecc*CurvePoint is compressed-y-0 or compressed-y-1. - * - *

Parameters: - * - * @param psid indicates the application area with which the sender is - * claiming the payload should be associated. - * - * @param generationTime indicates the time at which the structure was - * generated. See 5.2.5.2.2 and 5.2.5.2.3 for discussion of the use of this - * field. - * - * @param expiryTime, if present, contains the time after which the data - * should no longer be considered relevant. If both generationTime and - * expiryTime are present, the signed SPDU is invalid if generationTime is - * not strictly earlier than expiryTime. - * - * @param generationLocation, if present, contains the location at which the - * signature was generated. - * - * @param p2pcdLearningRequest, if present, is used by the SDS to request - * certificates for which it has seen identifiers but does not know the - * entire certificate. A specification of this peer-to-peer certificate - * distribution (P2PCD) mechanism is given in Clause 8. This field is used - * for the out-of-band flavor of P2PCD and shall only be present if - * inlineP2pcdRequest is not present. The HashedId3 is calculated with the - * whole-certificate hash algorithm, determined as described in 6.4.3. - * - * @param missingCrlIdentifier, if present, is used by the SDS to request - * CRLs which it knows to have been issued but have not received. This is - * provided for future use and the associated mechanism is not defined in - * this version of this standard. - * - * @param encryptionKey, if present, is used to indicate that a further - * communication should be encrypted with the indicated key. One possible use - * of this key to encrypt a response is specified in 6.3.35, 6.3.37, and - * 6.3.34. An encryptionKey field of type symmetric should only be used if - * the Signed¬Data containing this field is securely encrypted by some means. - * - * @param inlineP2pcdRequest, if present, is used by the SDS to request - * unknown certificates per the inline peer-to-peer certificate distribution - * mechanism is given in Clause 8. This field shall only be present if - * p2pcdLearningRequest is not present. The HashedId3 is calculated with the - * whole-certificate hash algorithm, determined as described in 6.4.3. - * - * @param requestedCertificate, if present, is used by the SDS to provide - * certificates per the “inlineâ€? version of the peer-to-peer certificate - * distribution mechanism given in Clause 8. - * - * @param pduFunctionalType, if present, is used to indicate that the SPDU is - * to be consumed by a process other than an application process as defined - * in ISO 21177 [B14a]. See 6.3.23b for more details. - * - * @param contributedExtensions, if present, is used to provide extension blocks - * defined by identified contributing organizations. - */ - HeaderInfo ::= SEQUENCE { - psid Psid, - generationTime Time64 OPTIONAL, - expiryTime Time64 OPTIONAL, - generationLocation ThreeDLocation OPTIONAL, - p2pcdLearningRequest HashedId3 OPTIONAL, - missingCrlIdentifier MissingCrlIdentifier OPTIONAL, - encryptionKey EncryptionKey OPTIONAL, - ..., - inlineP2pcdRequest SequenceOfHashedId3 OPTIONAL, - requestedCertificate Certificate OPTIONAL, - pduFunctionalType PduFunctionalType OPTIONAL, - contributedExtensions ContributedExtensionBlocks OPTIONAL - } - -/** - * @class MissingCrlIdentifier - * - * @brief This structure may be used to request a CRL that the SSME knows to - * have been issued but has not yet received. It is provided for future use - * and its use is not defined in this version of this standard. - * - * @param cracaId is the HashedId3 of the CRACA, as defined in 5.1.3. The - * HashedId3 is calculated with the whole-certificate hash algorithm, - * determined as described in 6.4.3. - * - * @param crlSeries is the requested CRL Series value. See 5.1.3 for more - * information. - */ - MissingCrlIdentifier ::= SEQUENCE { - cracaId HashedId3, - crlSeries CrlSeries, - ... - } - -/** - * @class PduFunctionalType - * - * @brief This data structure identifies the functional entity that is - * intended to consume an SPDU, for the case where that functional entity is - * not an application process but security support services for an - * application process. Further details and the intended use of this field - * are defined in ISO 21177 [B14a]. - * - *

An SPDU in which the pduFunctionalType field is present conforms - * to the security profile for that PduFunctionalType value (given in ISO - * 21177 [B14a]), not to the security profile for Application SPDUs for the - * PSID. - * - * @param tlsHandshake indicates that the Signed SPDU is not to be directly - * consumed as an application PDU but is to be used to provide information - * about the holder’s permissions to a Transport Layer Security (TLS) (IETF - * 5246 [B13], IETF 8446 [B13a]) handshake process operating to secure - * communications to an application process. See IETF [B13b] and ISO 21177 - * [B14a] for further information. - * - * @param iso21177ExtendedAuth indicates that the Signed SPDU is not to be - * directly consumed as an application PDU but is to be used to provide - * additional information about the holder’s permissions to the ISO 21177 - * Security Subsystem for an application process. See ISO 21177 [B14a] for - * further information. - */ - PduFunctionalType ::= INTEGER (0..255) - tlsHandshake PduFunctionalType ::= 1 - iso21177ExtendedAuth PduFunctionalType ::= 2 - -/** - * @class ContributedExtensionBlocks - * - * @brief This data structure defines a list of ContributedExtensionBlock - */ - ContributedExtensionBlocks ::= SEQUENCE (SIZE(1..MAX)) OF ContributedExtensionBlock - -/** - * @class ContributedExtensionBlock - * - * @brief This data structure defines the format of an extension block - * provided by an identified contributor by using the temnplate provided - * in the class IEEE1609DOT2-HEADERINFO-CONTRIBUTED-EXTENSION constraint - * to the objects in the set Ieee1609Dot2HeaderInfoContributedExtensions. - * - * @param contributorId uniquely identifies the contributor - * - * @param extns contains a list of extensions from that contributor. - */ - /* ContributedExtensionBlock ::= SEQUENCE { - contributorId IEEE1609DOT2-HEADERINFO-CONTRIBUTED-EXTENSION. - &id({Ieee1609Dot2HeaderInfoContributedExtensions}), - extns SEQUENCE (SIZE(1..MAX)) OF IEEE1609DOT2-HEADERINFO-CONTRIBUTED-EXTENSION. - &Extn({Ieee1609Dot2HeaderInfoContributedExtensions}{@.contributorId}) -}*/ -ContributedExtensionBlock ::= SEQUENCE { - contributorId NULL -} - -/** - * @class IEEE1609DOT2-HEADERINFO-CONTRIBUTED-EXTENSION - * - * @brief This data structure defines the information object class that - * provides a "template" for defining extension blocks. - */ - /* IEEE1609DOT2-HEADERINFO-CONTRIBUTED-EXTENSION ::= CLASS { - &id HeaderInfoContributorId UNIQUE, - &Extn - } WITH SYNTAX {&Extn IDENTIFIED BY &id} */ - - IEEE1609DOT2-HEADERINFO-CONTRIBUTED-EXTENSION ::= SEQUENCE { - id NULL - } - -/** - * @class Ieee1609Dot2HeaderInfoContributedExtensions - * - * @brief This data structure defines the set of ContributedExtensionBlock - * Objects. - * - * @param In this version of the standard, only the type - * EtsiOriginatingHeaderInfoExtension contributed by ETSI is supported. - * The information object EtsiOriginatingHeaderInfoExtension is imported - * from the EtsiTs103097ExtensionModule - */ - Ieee1609Dot2HeaderInfoContributedExtensions - IEEE1609DOT2-HEADERINFO-CONTRIBUTED-EXTENSION ::= { - {EtsiOriginatingHeaderInfoExtension IDENTIFIED BY etsiHeaderInfoContributorId}, - ... - } - -/** - * @class HeaderInfoContributorId - * - * @brief This data structure defines the header info contributor id type - * and its values. - * - * @param In this version of the standard, value 2 is assigned to ETSI. - */ - HeaderInfoContributorId ::= INTEGER (0..255) - etsiHeaderInfoContributorId HeaderInfoContributorId ::= 2 - -/** - * @class SignerIdentifier - * - * @brief This structure allows the recipient of data to determine which - * keying material to use to authenticate the data. It also indicates the - * verification type to be used to generate the hash for verification, as - * specified in 5.3.1. - *
    - *
  • If the choice indicated is digest:
  • - *
      - *
    • The structure contains the HashedId8 of the relevant certificate. The - * HashedId8 is calculated with the whole-certificate hash algorithm, - * determined as described in 6.4.3.
    • - * - *
    • The verification type is certificate and the certificate data - * passed to the hash function as specified in 5.3.1 is the authorization - * certificate.
    • - *
    - * - *
  • If the choice indicated is certificate:
  • - *
      - *
    • The structure contains one or more Certificate structures, in order - * such that the first certificate is the authorization certificate and each - * subsequent certificate is the issuer of the one before it.
    • - * - *
    • The verification type is certificate and the certificate data - * passed to the hash function as specified in 5.3.1 is the authorization - * certificate.
    • - *
    - * - *
  • If the choice indicated is self:
  • - *
      - *
    • The structure does not contain any data beyond the indication that - * the choice value is self.
    • - * - *
    • The verification type is self-signed.
    • - *
    - *
- * - * Critical information fields: - *
    - *
  1. If present, this is a critical information field as defined in 5.2.6. - * An implementation that does not recognize the CHOICE value for this type - * when verifying a signed SPDU shall indicate that the signed SPDU is invalid. - *
  2. - * - *
  3. If present, certificate is a critical information field as defined in - * 5.2.6. An implementation that does not support the number of certificates - * in certificate when verifying a signed SPDU shall indicate that the signed - * SPDU is invalid. A compliant implementation shall support certificate - * fields containing at least one certificate.
  4. - *
- */ - SignerIdentifier ::= CHOICE { - digest HashedId8, - certificate SequenceOfCertificate, - self NULL, - ... - } - ---***************************************************************************-- --- Encrypted Data -- ---***************************************************************************-- - -/** - * @class EncryptedData - * - * @brief This data structure encodes data that has been encrypted to one or - * more recipients using the recipients’ public or symmetric keys as - * specified in 1.1.1. - * - *

Critical information fields: - *
    - *
  • If present, recipients is a critical information field as defined in - * 5.2.6. An implementation that does not support the number of RecipientInfo - * in recipients when decrypted shall indicate that the encrypted SPDU could - * not be decrypted due to unsupported critical information fields. A - * compliant implementation shall support recipients fields containing at - * least eight entries.
  • - *
- * - * Parameters: - * - * @param recipients contains one or more RecipientInfos. These entries may - * be more than one RecipientInfo, and more than one type of RecipientInfo, - * as long as they are all indicating or containing the same data encryption - * key. - * - * @param ciphertext contains the encrypted data. This is the encryption of - * an encoded Ieee1609Dot2Data structure as specified in 5.3.4.2. - */ - EncryptedData ::= SEQUENCE { - recipients SequenceOfRecipientInfo, - ciphertext SymmetricCiphertext - } - -/** - * @class RecipientInfo - * - * @brief This data structure is used to transfer the data encryption key to - * an individual recipient of an EncryptedData. The option pskRecipInfo is - * selected if the EncryptedData was encrypted using the static encryption - * key approach specified in 1.1.1.1. The other options are selected if the - * EncryptedData was encrypted using the ephemeral encryption key approach - * specified in 1.1.1.1. The meanings of the choices are: - * - *

See Annex C.7 for guidance on when it may be appropriate to use - * each of these approaches. - * - * @param pskRecipInfo: The ciphertext was encrypted directly using a - * symmetric key. - * - * @param symmRecipInfo: The data encryption key was encrypted using a - * symmetric key. - * - * @param certRecipInfo: The data encryption key was encrypted using a public - * key encryption scheme, where the public encryption key was obtained from a - * certificate. In this case, the parameter P1 to ECIES as defined in 5.3.4 - * is the hash of the certificate. - * - * @param signedDataRecipInfo: The data encryption key was encrypted using a - * public encryption key, where the encryption key was obtained as the public - * response encryption key from a Signed-Data. In this case, the parameter P1 - * to ECIES as defined in 5.3.4 is the SHA-256 hash of the Ieee1609Dot2Data - * containing the response encryption key. - * - * @param rekRecipInfo: The data encryption key was encrypted using a public - * key that was not obtained from a Signed¬Data. In this case, the parameter - * P1 to ECIES as defined in 5.3.4 is the hash of the empty string. - */ - RecipientInfo ::= CHOICE { - pskRecipInfo PreSharedKeyRecipientInfo, - symmRecipInfo SymmRecipientInfo, - certRecipInfo PKRecipientInfo, - signedDataRecipInfo PKRecipientInfo, - rekRecipInfo PKRecipientInfo - } - -/** - * @class SequenceOfRecipientInfo - * - * @brief This type is used for clarity of definitions. - */ - SequenceOfRecipientInfo ::= SEQUENCE OF RecipientInfo - -/** - * @class PreSharedKeyRecipientInfo - * - * @brief This data structure is used to indicate a symmetric key that may be - * used directly to decrypt a SymmetricCiphertext. It consists of the - * low-order 8 bytes of the SHA-256 hash of the COER encoding of a - * SymmetricEncryptionKey structure containing the symmetric key in question. - * The symmetric key may be established by any appropriate means agreed by - * the two parties to the exchange. - */ - PreSharedKeyRecipientInfo ::= HashedId8 - -/** - * @class SymmRecipientInfo - * - * @brief This data structure contains the following fields: - * - * @param recipientId contains the hash of the symmetric key encryption key - * that may be used to decrypt the data encryption key. It consists of the - * low-order 8 bytes of the SHA-256 hash of the COER encoding of a - * SymmetricEncryptionKey structure containing the symmetric key in question. - * The symmetric key may be established by any appropriate means agreed by - * the two parties to the exchange. - * - * @param encKey contains the encrypted data encryption key within an AES-CCM - * ciphertext. - */ - SymmRecipientInfo ::= SEQUENCE { - recipientId HashedId8, - encKey SymmetricCiphertext - } - -/** - * @class PKRecipientInfo - * - * @brief This data structure contains the following fields: - * - * @param recipientId contains the hash of the container for the encryption - * public key as specified in the definition of RecipientInfo. Specifically, - * depending on the choice indicated by the containing RecipientInfo structure: - *
    - *
  • If the containing RecipientInfo structure indicates certRecipInfo, - * this field contains the HashedId8 of the certificate. The HashedId8 is - * calculated with the whole-certificate hash algorithm, determined as - * described in 6.4.3.
  • - * - *
  • If the containing RecipientInfo structure indicates - * signedDataRecipInfo, this field contains the HashedId8 of the - * Ieee1609Dot2Data of type signed that contained the encryption key, with - * that Ieee1609Dot2Data canonicalized per 6.3.4. The HashedId8 is calculated - * with SHA-256.
  • - * - *
  • If the containing RecipientInfo structure indicates rekRecipInfo, - * this field contains the HashedId8 of the COER encoding of a - * PublicEncryptionKey structure containing the response encryption key. The - * HashedId8 is calculated with SHA-256.
  • - *
- * - * @param encKey contains the encrypted key. - */ - PKRecipientInfo ::= SEQUENCE { - recipientId HashedId8, - encKey EncryptedDataEncryptionKey - } - -/** - * @class EncryptedDataEncryptionKey - * - * @brief This data structure contains an encrypted data encryption key. - * - *

Critical information fields: If present and applicable to - * the receiving SDEE, this is a critical information field as defined in - * 5.2.6. If an implementation receives an encrypted SPDU and determines that - * one or more RecipientInfo fields are relevant to it, and if all of those - * RecipientInfos contain an EncryptedDataEncryptionKey such that the - * implementation does not recognize the indicated CHOICE, the implementation - * shall indicate that the encrypted SPDU is not decryptable. - */ - EncryptedDataEncryptionKey ::= CHOICE { - eciesNistP256 EciesP256EncryptedKey, - eciesBrainpoolP256r1 EciesP256EncryptedKey, - ... - } - -/** - * @class SymmetricCiphertext - * - * @brief This data structure encapsulates a ciphertext generated with an - * approved symmetric algorithm. - * - *

Critical information fields: If present, this is a critical - * information field as defined in 5.2.6. An implementation that does not - * recognize the indicated CHOICE value for this type in an encrypted SPDU - * shall reject the SPDU as invalid. - */ - SymmetricCiphertext ::= CHOICE { - aes128ccm AesCcmCiphertext, - ... - } - -/** - * @class AesCcmCiphertext - * - * @brief This data structure encapsulates an encrypted ciphertext for the - * AES-CCM symmetric algorithm. It contains the following fields: - * - *

The ciphertext is 16 bytes longer than the corresponding plaintext. - * - *

The plaintext resulting from a correct decryption of the - * ciphertext is a COER-encoded Ieee1609Dot2Data structure. - * - * @param nonce contains the nonce N as specified in 5.3.7. - * - * @param ccmCiphertext contains the ciphertext C as specified in 5.3.7. - */ - AesCcmCiphertext ::= SEQUENCE { - nonce OCTET STRING (SIZE (12)), - ccmCiphertext Opaque - } - -/** - * @class Countersignature - * - * @brief This data structure is used to perform a countersignature over an - * already-signed SPDU. This is the profile of an Ieee1609Dot2Data containing - * a signedData. The tbsData within content is composed of a payload - * containing the hash (extDataHash) of the externally generated, pre-signed - * SPDU over which the countersignature is performed. - */ - Countersignature ::= Ieee1609Dot2Data (WITH COMPONENTS {..., - content (WITH COMPONENTS {..., - signedData (WITH COMPONENTS {..., - tbsData (WITH COMPONENTS {..., - payload (WITH COMPONENTS {..., - data ABSENT, - extDataHash PRESENT - }), - headerInfo(WITH COMPONENTS {..., - generationTime PRESENT, - expiryTime ABSENT, - generationLocation ABSENT, - p2pcdLearningRequest ABSENT, - missingCrlIdentifier ABSENT, - encryptionKey ABSENT - }) - }) - }) - }) - }) - ---***************************************************************************-- --- Certificates and other Security Management -- ---***************************************************************************-- - -/** - * @class Certificate - * - * @brief This structure is a profile of the structure CertificateBase which - * specifies the valid combinations of fields to transmit implicit and - * explicit certificates. - */ - Certificate ::= CertificateBase (ImplicitCertificate | ExplicitCertificate) - -/** - * @class SequenceOfCertificate - * - * @brief This type is used for clarity of definitions. - */ - SequenceOfCertificate ::= SEQUENCE OF Certificate - -/** - * @class CertificateBase - * - * @brief The fields in this structure have the following meaning: - * - *

Encoding considerations: When a certificate is encoded for - * hashing, for example to generate its HashedId8, or when it is to be used - * as the signer identifier information for verification, it is - * canonicalized as follows: - *
    - *
  • The encoding of toBeSigned uses the compressed form for all elliptic - * curve points: that is, those points indicate a choice of compressed-y-0 or - * compressed-y-1.
  • - * - *
  • The encoding of the signature, if present and if an ECDSA signature, - * takes the r value to be an EccP256CurvePoint or EccP384CurvePoint - * indicating the choice x-only.
  • - *
- * - *

Whole-certificate hash: If the entirety of a certificate is - * hashed to calculate a HashedId3, HashedId8, or HashedId10, the algorithm - * used for this purpose is known as the whole-certificate hash. - *
    - *
  • The whole-certificate hash is SHA-256 if the certificate is an - * implicit certificate.
  • - * - *
  • The whole-certificate hash is SHA-256 if the certificate is an - * explicit certificate and toBeSigned.verifyKeyIndicator.verificationKey is - * an EccP256CurvePoint.
  • - * - *
  • The whole-certificate hash is SHA-384 if the certificate is an - * explicit certificate and toBeSigned.verifyKeyIndicator.verificationKey is - * an EccP384CurvePoint.
  • - *
- * - * Parameters: - * - * @param version contains the version of the certificate format. In this - * version of the data structures, this field is set to 3. - * - * @param type states whether the certificate is implicit or explicit. This - * field is set to explicit for explicit certificates and to implicit for - * implicit certificates. See ExplicitCertificate and ImplicitCertificate for - * more details. - * - * @param issuer identifies the issuer of the certificate. - * - * @param toBeSigned is the certificate contents. This field is an input to - * the hash when generating or verifying signatures for an explicit - * certificate, or generating or verifying the public key from the - * reconstruction value for an implicit certificate. The details of how this - * field are encoded are given in the description of the - * ToBeSignedCertificate type. - * - * @param signature is included in an ExplicitCertificate. It is the - * signature, calculated by the signer identified in the issuer field, over - * the hash of toBeSigned. The hash is calculated as specified in 5.3.1, where: - *
    - *
  • Data input is the encoding of toBeSigned following the COER.
  • - * - *
  • Signer identifier input depends on the verification type, which in - * turn depends on the choice indicated by issuer. If the choice indicated by - * issuer is self, the verification type is self-signed and the signer - * identifier input is the empty string. If the choice indicated by issuer is - * not self, the verification type is certificate and the signer identifier - * input is the canonicalized COER encoding of the certificate indicated by - * issuer. The canonicalization is carried out as specified in the Encoding - * considerations section of this subclause.
  • - *
- */ - CertificateBase ::= SEQUENCE { - version Uint8(3), - type CertificateType, - issuer IssuerIdentifier, - toBeSigned ToBeSignedCertificate, - signature Signature OPTIONAL - } - -/** - * @class CertificateType - * - * @brief This enumerated type indicates whether a certificate is explicit or - * implicit. - * - *

Critical information fields: If present, this is a critical - * information field as defined in 5.2.5. An implementation that does not - * recognize the indicated CHOICE for this type when verifying a signed SPDU - * shall indicate that the signed SPDU is invalid. - */ - CertificateType ::= ENUMERATED { - explicit, - implicit, - ... - } - -/** - * @class ImplicitCertificate - * - * @brief This is a profile of the CertificateBase structure providing all - * the fields necessary for an implicit certificate, and no others. - */ - ImplicitCertificate ::= CertificateBase (WITH COMPONENTS {..., - type(implicit), - toBeSigned(WITH COMPONENTS {..., - verifyKeyIndicator(WITH COMPONENTS {reconstructionValue}) - }), - signature ABSENT - }) - -/** - * @class ExplicitCertificate - * - * @brief This is a profile of the CertificateBase structure providing all - * the fields necessary for an explicit certificate, and no others. - */ - ExplicitCertificate ::= CertificateBase (WITH COMPONENTS {..., - type(explicit), - toBeSigned(WITH COMPONENTS {..., - verifyKeyIndicator(WITH COMPONENTS {verificationKey}) - }), - signature PRESENT - }) - -/** - * @class IssuerIdentifier - * - * @brief This structure allows the recipient of a certificate to determine - * which keying material to use to authenticate the certificate. - * - *

If the choice indicated is sha256AndDigest or sha384AndDigest: - *
    - *
  • The structure contains the HashedId8 of the issuing certificate, - * where the certificate is canonicalized as specified in 6.4.3 before - * hashing and the HashedId8 is calculated with the whole-certificate hash - * algorithm, determined as described in 6.4.3.
  • - * - *
  • The hash algorithm to be used to generate the hash of the certificate - * for verification is SHA-256 (in the case of sha256AndDigest) or SHA-384 - * (in the case of sha384AndDigest).
  • - * - *
  • The certificate is to be verified with the public key of the - * indicated issuing certificate.
  • - *
- * - * If the choice indicated is self: - *
    - *
  • The structure indicates what hash algorithm is to be used to generate - * the hash of the certificate for verification.
  • - * - *
  • The certificate is to be verified with the public key indicated by - * the verifyKeyIndicator field in theToBeSignedCertificate.
  • - *
- * - *

Critical information fields: If present, this is a critical - * information field as defined in 5.2.5. An implementation that does not - * recognize the indicated CHOICE for this type when verifying a signed SPDU - * shall indicate that the signed SPDU is invalid. - */ - IssuerIdentifier ::= CHOICE { - sha256AndDigest HashedId8, - self HashAlgorithm, - ..., - sha384AndDigest HashedId8 - } - -/** - * @class ToBeSignedCertificate - * - * @brief The fields in the ToBeSignedCertificate structure have the - * following meaning: - * - *

Encoding considerations: The encoding of toBeSigned which - * is input to the hash uses the compressed form for all public keys and - * reconstruction values that are elliptic curve points: that is, those - * points indicate a choice of compressed-y-0 or compressed-y-1. The encoding - * of the issuing certificate uses the compressed form for all public key and - * reconstruction values and takes the r value of an ECDSA signature, which - * in this standard is an ECC curve point, to be of type x-only. - * - *

For both implicit and explicit certificates, when the certificate - * is hashed to create or recover the public key (in the case of an implicit - * certificate) or to generate or verify the signature (in the case of an - * explicit certificate), the hash is Hash (Data input) || Hash ( - * Signer identifier input), where: - *
    - *
  • Data input is the COER encoding of toBeSigned, canonicalized - * as described above.
  • - * - *
  • Signer identifier input depends on the verification type, - * which in turn depends on the choice indicated by issuer. If the choice - * indicated by issuer is self, the verification type is self-signed and the - * signer identifier input is the empty string. If the choice indicated by - * issuer is not self, the verification type is certificate and the signer - * identifier input is the COER encoding of the canonicalization per 6.4.3 of - * the certificate indicated by issuer.
  • - *
- * - * In other words, for implicit certificates, the value H (CertU) in SEC 4, - * section 3, is for purposes of this standard taken to be H [H - * (canonicalized ToBeSignedCertificate from the subordinate certificate) || - * H (entirety of issuer Certificate)]. See 5.3.2 for further discussion, - * including material differences between this standard and SEC 4 regarding - * how the hash function output is converted from a bit string to an integer. - * - *

NOTE: This encoding of the implicit certificate for hashing has - * been changed from the encoding specified in IEEE Std 1609.2-2013 for - * consistency with the encoding of the explicit certificates. This - * definition of the encoding results in implicit and explicit certificates - * both being hashed as specified in 5.3.1. - * - *

Critical information fields: - *
    - *
  • If present, appPermissions is a critical information field as defined - * in 5.2.6. An implementation that does not support the number of PsidSsp in - * appPermissions shall reject the signed SPDU as invalid. A compliant - * implementation shall support appPermissions fields containing at least - * eight entries.
  • - * - *
  • If present, certIssuePermissions is a critical information field as - * defined in 5.2.6. An implementation that does not support the number of - * PsidGroupPermissions in certIssuePermissions shall reject the signed SPDU - * as invalid. A compliant implementation shall support certIssuePermissions - * fields containing at least eight entries.
  • - * - *
  • If present, certRequestPermissions is a critical information field as - * defined in 5.2.6. An implementation that does not support the number of - * PsidGroupPermissions in certRequestPermissions shall reject the signed - * SPDU as invalid. A compliant implementation shall support - * certRequestPermissions fields containing at least eight entries.
  • - *
- * - * Parameters: - * - * @param id contains information that is used to identify the certificate - * holder if necessary. - * - * @param cracaId identifies the Certificate Revocation Authorization CA - * (CRACA) responsible for certificate revocation lists (CRLs) on which this - * certificate might appear. Use of the cracaId is specified in 5.1.3. The - * HashedId3 is calculated with the whole-certificate hash algorithm, - * determined as described in 6.4.12. - * - * @param crlSeries represents the CRL series relevant to a particular - * Certificate Revocation Authorization CA (CRACA) on which the certificate - * might appear. Use of this field is specified in 5.1.3. - * - * @param validityPeriod contains the validity period of the certificate. - * - * @param region, if present, indicates the validity region of the - * certificate. If it is omitted the validity region is indicated as follows: - *
    - *
  • If enclosing certificate is self-signed, i.e., the choice indicated - * by the issuer field in the enclosing certificate structure is self, the - * certificate is valid worldwide.
  • - * - *
  • Otherwise, the certificate has the same validity region as the - * certificate that issued it.
  • - *
- * - * @param assuranceLevel indicates the assurance level of the certificate - * holder. - * - * @param appPermissions indicates the permissions that the certificate - * holder has to sign application data with this certificate. A valid - * instance of appPermissions contains any particular Psid value in at most - * one entry. - * - * @param certIssuePermissions indicates the permissions that the certificate - * holder has to sign certificates with this certificate. A valid instance of - * this array contains no more than one entry whose psidSspRange field - * indicates all. If the array has multiple entries and one entry has its - * psidSspRange field indicate all, then the entry indicating all specifies - * the permissions for all PSIDs other than the ones explicitly specified in - * the other entries. See the description of PsidGroupPermissions for further - * discussion. - * - * @param certRequestPermissions indicates the permissions that the - * certificate holder has to sign certificate requests with this certificate. - * A valid instance of this array contains no more than one entry whose - * psidSspRange field indicates all. If the array has multiple entries and - * one entry has its psidSspRange field indicate all, then the entry - * indicating all specifies the permissions for all PSIDs other than the ones - * explicitly specified in the other entries. See the description of - * PsidGroupPermissions for further discussion. - * - * @param canRequestRollover indicates that the certificate may be used to - * sign a request for another certificate with the same permissions. This - * field is provided for future use and its use is not defined in this - * version of this standard. - * - * @param encryptionKey contains a public key for encryption for which the - * certificate holder holds the corresponding private key. - * - * @param verifyKeyIndicator contains material that may be used to recover - * the public key that may be used to verify data signed by this certificate. - */ - ToBeSignedCertificate ::= SEQUENCE { - id CertificateId, - cracaId HashedId3, - crlSeries CrlSeries, - validityPeriod ValidityPeriod, - region GeographicRegion OPTIONAL, - assuranceLevel SubjectAssurance OPTIONAL, - appPermissions SequenceOfPsidSsp OPTIONAL, - certIssuePermissions SequenceOfPsidGroupPermissions OPTIONAL, - certRequestPermissions SequenceOfPsidGroupPermissions OPTIONAL, - canRequestRollover NULL OPTIONAL, - encryptionKey PublicEncryptionKey OPTIONAL, - verifyKeyIndicator VerificationKeyIndicator, - ..., - flags BIT STRING {cubk (0)} (SIZE (8)) OPTIONAL - } - (WITH COMPONENTS { ..., appPermissions PRESENT} | - WITH COMPONENTS { ..., certIssuePermissions PRESENT} | - WITH COMPONENTS { ..., certRequestPermissions PRESENT}) - -/** - * @class CertificateId - * - * @brief This structure contains information that is used to identify the - * certificate holder if necessary. - * - *

Critical information fields: - *
    - *
  • If present, this is a critical information field as defined in 5.2.6. - * An implementation that does not recognize the choice indicated in this - * field shall reject a signed SPDU as invalid.
  • - *
- * - * Parameters: - * - * @param linkageData is used to identify the certificate for revocation - * purposes in the case of certificates that appear on linked certificate - * CRLs. See 5.1.3 and 7.3 for further discussion. - * - * @param name is used to identify the certificate holder in the case of - * non-anonymous certificates. The contents of this field are a matter of - * policy and should be human-readable. - * - * @param binaryId supports identifiers that are not human-readable. - * - * @param none indicates that the certificate does not include an identifier. - */ - CertificateId ::= CHOICE { - linkageData LinkageData, - name Hostname, - binaryId OCTET STRING(SIZE(1..64)), - none NULL, - ... - } - -/** - * @class LinkageData - * - * @brief This structure contains information that is matched against - * information obtained from a linkage ID-based CRL to determine whether the - * containing certificate has been revoked. See 5.1.3.4 and 7.3 for details - * of use. - */ - LinkageData ::= SEQUENCE { - iCert IValue, - linkage-value LinkageValue, - group-linkage-value GroupLinkageValue OPTIONAL - } - -/** - * @class EndEntityType - * - * @brief This type indicates which type of permissions may appear in - * end-entity certificates the chain of whose permissions passes through the - * PsidGroupPermissions field containing this value. If app is indicated, the - * end-entity certificate may contain an appPermissions field. If enroll is - * indicated, the end-entity certificate may contain a certRequestPermissions - * field. - */ - EndEntityType ::= BIT STRING { - app (0), - enroll (1) - } (SIZE (8)) (ALL EXCEPT {}) - -/** - * @class PsidGroupPermissions - * - * @brief This structure states the permissions that a certificate holder has - * with respect to issuing and requesting certificates for a particular set - * of PSIDs. In this structure: - * - *

For examples, see D.5.3 and D.5.4. - * - * @param subjectPermissions indicates PSIDs and SSP Ranges covered by this - * field. - * - * @param minChainLength and chainLengthRange indicate how long the - * certificate chain from this certificate to the end-entity certificate is - * permitted to be. As specified in 5.1.2.1, the length of the certificate - * chain is the number of certificates "below" this certificate in the chain, - * down to and including the end-entity certificate. The length is permitted - * to be (a) greater than or equal to minChainLength certificates and (b) - * less than or equal to minChainLength + chainLengthRange certificates. A - * value of 0 for minChainLength is not permitted when this type appears in - * the certIssuePermissions field of a ToBeSignedCertificate; a certificate - * that has a value of 0 for this field is invalid. The value −1 for - * chainLengthRange is a special case: if the value of chainLengthRange is −1 - * it indicates that the certificate chain may be any length equal to or - * greater than minChainLength. See the examples below for further discussion. - * - * @param eeType takes one or more of the values app and enroll and indicates - * the type of certificates or requests that this instance of - * PsidGroupPermissions in the certificate is entitled to authorize. If this - * field indicates app, the chain is allowed to end in an authorization - * certificate, i.e., a certficate in which these permissions appear in an - * appPermissions field (in other words, if the field does not indicate app - * but the chain ends in an authorization certificate, the chain shall be - * considered invalid). If this field indicates enroll, the chain is allowed - * to end in an enrollment certificate, i.e., a certificate in which these - * permissions appear in a certReqPermissions permissions field), or both (in - * other words, if the field does not indicate app but the chain ends in an - * authorization certificate, the chain shall be considered invalid). - * Different instances of PsidGroupPermissions within a ToBeSignedCertificate - * may have different values for eeType. - */ - PsidGroupPermissions ::= SEQUENCE { - subjectPermissions SubjectPermissions, - minChainLength INTEGER DEFAULT 1, - chainLengthRange INTEGER DEFAULT 0, - eeType EndEntityType DEFAULT {app} - } - -/** - * @class SequenceOfPsidGroupPermissions - * - * @brief This type is used for clarity of definitions. - */ - SequenceOfPsidGroupPermissions ::= SEQUENCE OF PsidGroupPermissions - -/** - * @class SubjectPermissions - * - * @brief This indicates the PSIDs and associated SSPs for which certificate - * issuance or request permissions are granted by a PsidGroupPermissions - * structure. If this takes the value explicit, the enclosing - * PsidGroupPermissions structure grants certificate issuance or request - * permissions for the indicated PSIDs and SSP Ranges. If this takes the - * value all, the enclosing PsidGroupPermissions structure grants certificate - * issuance or request permissions for all PSIDs not indicated by other - * PsidGroupPermissions in the same certIssuePermissions or - * certRequestPermissions field. - * - *

Critical information fields: - *
    - *
  • If present, this is a critical information field as defined in 5.2.6. - * An implementation that does not recognize the indicated CHOICE when - * verifying a signed SPDU shall indicate that the signed SPDU is - * invalid.
  • - * - *
  • If present, explicit is a critical information field as defined in - * 5.2.6. An implementation that does not support the number of PsidSspRange - * in explicit when verifying a signed SPDU shall indicate that the signed - * SPDU is invalid. A compliant implementation shall support explicit fields - * containing at least eight entries.
  • - *
- */ - SubjectPermissions ::= CHOICE { - explicit SequenceOfPsidSspRange, - all NULL, - ... - } - -/** - * @class VerificationKeyIndicator - * - * @brief The contents of this field depend on whether the certificate is an - * implicit or an explicit certificate. - * - *

Critical information fields: If present, this is a critical - * information field as defined in 5.2.5. An implementation that does not - * recognize the indicated CHOICE for this type when verifying a signed SPDU - * shall indicate that the signed SPDU is invalid. - * - *

Parameters: - * - * @param verificationKey is included in explicit certificates. It contains - * the public key to be used to verify signatures generated by the holder of - * the Certificate. - * - * @param reconstructionValue is included in implicit certificates. It - * contains the reconstruction value, which is used to recover the public key - * as specified in SEC 4 and 5.3.2. - */ - VerificationKeyIndicator ::= CHOICE { - verificationKey PublicVerificationKey, - reconstructionValue EccP256CurvePoint, - ... - } - -END - -EtsiTs103097ExtensionModule -{itu-t(0) identified-organization(4) etsi(0) itsDomain(5) wg5(5) secHeaders(103097) extension(2) major-version-1(1) minor-version-1(1)} -DEFINITIONS AUTOMATIC TAGS ::= BEGIN - -IMPORTS - HashedId8, - Time32 -FROM Ieee1609Dot2BaseTypes {iso(1) identified-organization(3) ieee(111) - standards-association-numbered-series-standards(2) wave-stds(1609) - dot2(2) base(1) base-types(2) major-version-2 (2) minor-version-3 (3)} -/* WITH Successors */ -; - -ExtensionModuleVersion::= INTEGER(1) - -/* Extension {EXT-TYPE : ExtensionTypes} ::= SEQUENCE { - id EXT-TYPE.&extId({ExtensionTypes}), - content EXT-TYPE.&ExtContent({ExtensionTypes}{@.id}) -} */ - -Extension {EXT-TYPE : ExtensionTypes} ::= SEQUENCE { - id INTEGER, - content INTEGER -} - -/*EXT-TYPE ::= CLASS { - &extId ExtId, - &ExtContent -} WITH SYNTAX {&ExtContent IDENTIFIED BY &extId} */ - -EXT-TYPE ::= SEQUENCE { - extId ExtId -} - -ExtId ::= INTEGER(0..255) - -EtsiOriginatingHeaderInfoExtension ::= Extension{{EtsiTs103097HeaderInfoExtensions}} - -EtsiTs103097HeaderInfoExtensionId ::= ExtId - etsiTs102941CrlRequestId EtsiTs103097HeaderInfoExtensionId ::= 1 --'01'H - etsiTs102941DeltaCtlRequestId EtsiTs103097HeaderInfoExtensionId ::= 2 --'02'H - -EtsiTs103097HeaderInfoExtensions EXT-TYPE ::= { - { EtsiTs102941CrlRequest IDENTIFIED BY etsiTs102941CrlRequestId } | - { EtsiTs102941DeltaCtlRequest IDENTIFIED BY etsiTs102941DeltaCtlRequestId }, - ... -} - -EtsiTs102941CrlRequest::= SEQUENCE { - issuerId HashedId8, - lastKnownUpdate Time32 OPTIONAL -} - -EtsiTs102941CtlRequest::= SEQUENCE { - issuerId HashedId8, - lastKnownCtlSequence INTEGER (0..255) OPTIONAL -} - -EtsiTs102941DeltaCtlRequest::= EtsiTs102941CtlRequest - -END - -EtsiTs103097Module -{itu-t(0) identified-organization(4) etsi(0) itsDomain(5) wg5(5) secHeaders(103097) core(1) major-version-3(3) minor-version-1(1)} - -DEFINITIONS AUTOMATIC TAGS ::= BEGIN - -IMPORTS - -Ieee1609Dot2Data, Certificate -FROM Ieee1609Dot2 {iso(1) identified-organization(3) ieee(111) - standards-association-numbered-series-standards(2) wave-stds(1609) - dot2(2) base(1) schema(1) major-version-2(2) minor-version-4(4)} -/* WITH Successors */ - -ExtensionModuleVersion -FROM EtsiTs103097ExtensionModule {itu-t(0) identified-organization(4) - etsi(0) itsDomain(5) wg5(5) secHeaders(103097) extension(2) major-version-1(1) minor-version-1(1)} -; - -EtsiTs103097Certificate::= Certificate (WITH COMPONENTS{..., - toBeSigned (WITH COMPONENTS{..., - id (WITH COMPONENTS{..., - linkageData ABSENT, - binaryId ABSENT - }), - certRequestPermissions ABSENT, - canRequestRollover ABSENT - }) -}) - -EtsiTs103097Data::=Ieee1609Dot2Data (WITH COMPONENTS {..., - content (WITH COMPONENTS {..., - signedData (WITH COMPONENTS {..., -- constraints on signed data headers - tbsData (WITH COMPONENTS { - headerInfo (WITH COMPONENTS {..., - generationTime PRESENT, - p2pcdLearningRequest ABSENT, - missingCrlIdentifier ABSENT - }) - }), - signer (WITH COMPONENTS {..., --constraints on the certificate - certificate ((WITH COMPONENT (EtsiTs103097Certificate))^(SIZE(1))) - }) - }), - encryptedData (WITH COMPONENTS {..., -- constraints on encrypted data headers - recipients (WITH COMPONENT ( - (WITH COMPONENTS {..., - pskRecipInfo ABSENT, - symmRecipInfo ABSENT, - rekRecipInfo ABSENT - }) - )) - }), - signedCertificateRequest ABSENT - }) -}) - -EtsiTs103097Data-Unsecured {ToBeSentDataContent} ::= EtsiTs103097Data (WITH COMPONENTS {..., - content (WITH COMPONENTS { - unsecuredData (CONTAINING ToBeSentDataContent) - }) -}) - -EtsiTs103097Data-Signed {ToBeSignedDataContent} ::= EtsiTs103097Data (WITH COMPONENTS {..., - content (WITH COMPONENTS { - signedData (WITH COMPONENTS {..., - tbsData (WITH COMPONENTS { - payload (WITH COMPONENTS { - data (WITH COMPONENTS {..., - content (WITH COMPONENTS { - unsecuredData (CONTAINING ToBeSignedDataContent) - }) - }) PRESENT - }) - }) - }) - }) -}) - -EtsiTs103097Data-SignedExternalPayload ::= EtsiTs103097Data (WITH COMPONENTS {..., - content (WITH COMPONENTS { - signedData (WITH COMPONENTS {..., - tbsData (WITH COMPONENTS { - payload (WITH COMPONENTS { - extDataHash (WITH COMPONENTS { - sha256HashedData PRESENT - }) PRESENT - }) - }) - }) - }) -}) - -EtsiTs103097Data-Encrypted {ToBeEncryptedDataContent} ::= EtsiTs103097Data (WITH COMPONENTS {..., - content (WITH COMPONENTS { - encryptedData (WITH COMPONENTS {..., - ciphertext (WITH COMPONENTS {..., - aes128ccm (WITH COMPONENTS {..., - ccmCiphertext (CONSTRAINED BY {-- ccm encryption of -- ToBeEncryptedDataContent}) - }) - }) - }) - }) -}) - -EtsiTs103097Data-SignedAndEncrypted {ToBesignedAndEncryptedDataContent} ::= EtsiTs103097Data-Encrypted {EtsiTs103097Data-Signed {ToBesignedAndEncryptedDataContent}} - -EtsiTs103097Data-Encrypted-Unicast {ToBeEncryptedDataContent} ::= EtsiTs103097Data-Encrypted { EtsiTs103097Data-Unsecured{ToBeEncryptedDataContent}} (WITH COMPONENTS {..., - content (WITH COMPONENTS { - encryptedData (WITH COMPONENTS {..., - recipients (SIZE(1)) - }) - }) -}) - -EtsiTs103097Data-SignedAndEncrypted-Unicast {ToBesignedAndEncryptedDataContent} ::= EtsiTs103097Data-Encrypted {EtsiTs103097Data-Signed {ToBesignedAndEncryptedDataContent}} (WITH COMPONENTS {..., - content (WITH COMPONENTS { - encryptedData (WITH COMPONENTS {..., - recipients (SIZE(1)) - }) - }) -}) - -END - -""" \ No newline at end of file +from .asn1.ieee1609dot2.Ieee1609Dot2CrlBaseTypes import IEEE_1609_DOT_2_CRL_BASE_TYPES_ASN1_DESCRIPTIONS +from .asn1.ieee1609dot2.Ieee1609Dot2Crl import IEEE_1609_DOT_2_CRL_ASN1_DESCRIPTIONS +from .asn1.ieee1609dot2.Ieee1609Dot2BaseTypes import IEEE_1609_DOT_2_BASE_TYPES_ASN1_DESCRIPTIONS +from .asn1.ieee1609dot2.Ieee1609Dot2 import IEEE_1609_DOT_2_MODULE_ASN1_DESCRIPTIONS +from .asn1.EtsiTs103097ExtensionModule import ETSI_TS_103_097_EXTENSION_MODULE_ASN1_DESCRIPTIONS +from .asn1.EtsiTs103097Module import ETSI_TS_103_097_MODULE_ASN1_DESCRIPTIONS + +SECURITY_ASN1_DESCRIPTIONS = IEEE_1609_DOT_2_CRL_BASE_TYPES_ASN1_DESCRIPTIONS + IEEE_1609_DOT_2_CRL_ASN1_DESCRIPTIONS + IEEE_1609_DOT_2_BASE_TYPES_ASN1_DESCRIPTIONS + \ + IEEE_1609_DOT_2_MODULE_ASN1_DESCRIPTIONS + \ + ETSI_TS_103_097_EXTENSION_MODULE_ASN1_DESCRIPTIONS + \ + ETSI_TS_103_097_MODULE_ASN1_DESCRIPTIONS diff --git a/src/flexstack/security/security_profiles.py b/src/flexstack/security/security_profiles.py index e1e2320..c0cfd58 100644 --- a/src/flexstack/security/security_profiles.py +++ b/src/flexstack/security/security_profiles.py @@ -8,3 +8,4 @@ class SecurityProfile(Enum): NO_SECURITY = 0 COOPERATIVE_AWARENESS_MESSAGE = 1 DECENTRALIZED_ENVIRONMENTAL_NOTIFICATION_MESSAGE = 2 + VRU_AWARENESS_MESSAGE = 3 diff --git a/src/flexstack/security/sign_service.py b/src/flexstack/security/sign_service.py index 2c5a5f7..2413c61 100644 --- a/src/flexstack/security/sign_service.py +++ b/src/flexstack/security/sign_service.py @@ -1,6 +1,7 @@ from __future__ import annotations from .sn_sap import SNSIGNRequest, SNSIGNConfirm -from .certificate import OwnCertificate, SECURITY_CODER +from .certificate import Certificate, OwnCertificate, SECURITY_CODER +from .certificate_library import CertificateLibrary from .ecdsa_backend import ECDSABackend from ..utils.time_service import TimeService @@ -54,7 +55,8 @@ def set_up_signer(self, certificate: OwnCertificate) -> tuple: or self.requested_own_certificate ): self.last_signer_full_certificate_time = current_time - signer = ("certificate", certificate.encode()) + self.requested_own_certificate = False + signer = ("certificate", [certificate.certificate]) return signer @@ -66,65 +68,167 @@ class SignService: ---------- ecdsa_backend : ECDSABackend ECDSA backend to use. - unknown_ats : List[bytes] + certificate_library : CertificateLibrary + Certificate library holding own certificates, known authorization tickets, + authorization authorities and root certificates used during signing. + unknown_ats : list[bytes] List of unknown ATs. Each AT is represented by its certificate hashedId3. - knwon_ats : dict[bytes, Certificate] - Dictionary of known ATs, indexed by the AT's certificate hashId8. Each AT is represented by a dictionary with the following keys: 'verifying_key', 'verified', 'certificate', 'backend_id'. - requested_ats : List[bytes] + requested_ats : list[bytes] List of requested ATs. Each AT is represented by its certificate hashedId3. - known_aas : Dict[bytes, Certificate] - Dictionary of AA. Indexed by the AA HashedId8. Each AA is represented by a Certificate object. - root_ca : dict[bytes, Certificate] - Dictionary of Root CA. Indexed by the Root CA HashedId8. Each Root CA is represented by a dictionary with the following keys: 'verifying_key', 'verified', 'certificate'. - present_ats : dict[int, OwnCertificate] - The present ats stores the ATs to use for signing. Indexed by the AT's backend id. Each AT is stored as a Certificate. """ - def __init__(self, backend: ECDSABackend) -> None: + def __init__(self, backend: ECDSABackend, certificate_library: CertificateLibrary) -> None: """ Initialize the Sign Service. + + Parameters + ---------- + backend : ECDSABackend + ECDSA backend to use for cryptographic operations. + certificate_library : CertificateLibrary + Certificate library holding own certificates and trusted chain certificates. """ self.ecdsa_backend: ECDSABackend = backend - self.knwon_ats = {} - self.unknown_ats = {} - self.requested_ats = {} - self.knwon_aas = {} - self.root_ca = {} - self.present_ats = {} + self.certificate_library: CertificateLibrary = certificate_library + self.unknown_ats: list = [] + self.requested_ats: list = [] + self.cam_handler: CooperativeAwarenessMessageSecurityHandler = CooperativeAwarenessMessageSecurityHandler(backend) def sign_request(self, request: SNSIGNRequest) -> SNSIGNConfirm: """ Sign a SNSIGNRequest. + + Routing: + - its_aid == 36: CAM-related PKI signing — not implemented (CAMs are signed + directly via sign_cam()). + - its_aid == 37: DENM — delegates to sign_denm() (§7.1.2). + - any other its_aid: generic signed message — delegates to sign_other() (§7.1.3). """ if request.its_aid == 36: raise NotImplementedError("CA signing is not implemented") elif request.its_aid == 37: - raise NotImplementedError("DEN signing is not implemented") - elif request.its_aid == 137: - raise NotImplementedError( - "TLM signing is not implemented (SPATEM)") - elif request.its_aid == 138: - raise NotImplementedError("RLT signing is not implemented (MAPEM)") - elif request.its_aid == 139: - raise NotImplementedError("IVI signing is not implemented (IVIM)") - elif request.its_aid == 141: - raise NotImplementedError( - "GeoNetworking Management Communications (GN-MGMT) signing is not implemented (SPATEM)" - ) - elif request.its_aid == 540 or request.its_aid == 801: - raise NotImplementedError("SA service signing is not implemented") - elif request.its_aid == 639: - raise NotImplementedError("CP signing is not implemented") - elif request.its_aid == 638: - raise NotImplementedError("VRU signing is not implemented") + return self.sign_denm(request) else: - raise NotImplementedError( - "Security profile for the specified message not implemented" + return self.sign_other(request) + + def sign_other(self, request: SNSIGNRequest) -> SNSIGNConfirm: + """ + Sign a message according to ETSI TS 103 097 V2.2.1 §5.2 and §7.1.3. + + §7.1.3 generic profile for signed messages other than CAM and DENM: + - tbsData.headerInfo SHALL contain psid (set to request.its_aid) and + generationTime (§5.2). No other headerInfo fields are added or + required by this profile. + - Signer is set to choice 'digest' (hashedId8 of the signing AT). + """ + signed_data_dict = { + "protocolVersion": 3, + "content": ( + "signedData", + { + "hashId": "sha256", + "tbsData": { + "payload": { + "data": { + "protocolVersion": 3, + "content": ("unsecuredData", request.tbs_message), + } + }, + "headerInfo": { + "psid": request.its_aid, + "generationTime": TimeService.timestamp_its() * 1000, + }, + }, + "signer": ("digest", b"\x00\x00\x00\x00\x00\x00\x00\x00"), + "signature": ( + "ecdsaNistP256Signature", + { + "rSig": ("fill", None), + "sSig": (0xA495991B7852B855).to_bytes(32, byteorder="big"), + }, + ), + }, + ), + } + tobesigned: bytes = SECURITY_CODER.encode_to_be_signed_data( + signed_data_dict["content"][1]["tbsData"] + ) + at_item: OwnCertificate | None = self.get_present_at_for_signging(request.its_aid) + if at_item is None: + raise RuntimeError("No present AT for signing message") + signed_data_dict["content"][1]["signer"] = ("digest", at_item.as_hashedid8()) + signed_data_dict["content"][1]["signature"] = at_item.sign_message( + self.ecdsa_backend, tobesigned + ) + sec_message = SECURITY_CODER.encode_etsi_ts_103097_data_signed(signed_data_dict) + return SNSIGNConfirm( + sec_message=sec_message, sec_message_length=len(sec_message) + ) + + def sign_denm(self, request: SNSIGNRequest) -> SNSIGNConfirm: + """ + Sign a DENM according to ETSI TS 103 097 V2.2.1 §5.2 and §7.1.2. + + §7.1.2 constraints applied here: + - signer shall always be choice 'certificate' (never 'digest'). + - tbsData.headerInfo shall contain psid (37), generationTime (§5.2), + and generationLocation (mandatory). No other optional fields are + included (expiryTime, inlineP2pcdRequest, requestedCertificate, etc. + shall all be absent). + """ + if request.generation_location is None: + raise ValueError( + "sign_denm requires a generation_location (§7.1.2: generationLocation SHALL be present)" ) + signed_data_dict = { + "protocolVersion": 3, + "content": ( + "signedData", + { + "hashId": "sha256", + "tbsData": { + "payload": { + "data": { + "protocolVersion": 3, + "content": ("unsecuredData", request.tbs_message), + } + }, + "headerInfo": { + "psid": request.its_aid, + "generationTime": TimeService.timestamp_its() * 1000, + "generationLocation": request.generation_location, + }, + }, + "signer": ("digest", b"\x00\x00\x00\x00\x00\x00\x00\x00"), + "signature": ( + "ecdsaNistP256Signature", + { + "rSig": ("fill", None), + "sSig": (0xA495991B7852B855).to_bytes(32, byteorder="big"), + }, + ), + }, + ), + } + tobesigned: bytes = SECURITY_CODER.encode_to_be_signed_data( + signed_data_dict["content"][1]["tbsData"] + ) + at_item: OwnCertificate | None = self.get_present_at_for_signging(request.its_aid) + if at_item is None: + raise RuntimeError("No present AT for signing DENM") + # §7.1.2: signer SHALL always be 'certificate' + signed_data_dict["content"][1]["signer"] = ("certificate", [at_item.certificate]) + signed_data_dict["content"][1]["signature"] = at_item.sign_message( + self.ecdsa_backend, tobesigned + ) + sec_message = SECURITY_CODER.encode_etsi_ts_103097_data_signed(signed_data_dict) + return SNSIGNConfirm( + sec_message=sec_message, sec_message_length=len(sec_message) + ) def sign_cam(self, request: SNSIGNRequest) -> SNSIGNConfirm: """ - Sign a CAM according the standard ETSI TS 103 097 V2.1.1 (2021-10) 7.1.1 + Sign a CAM according the standard ETSI TS 103 097 V2.2.1 §5.2 and §7.1.1 """ sigend_data_dict = { "protocolVersion": 3, @@ -141,8 +245,7 @@ def sign_cam(self, request: SNSIGNRequest) -> SNSIGNConfirm: }, "headerInfo": { "psid": request.its_aid, - # "generationTime": 0, - # "expireTime": 0 + "generationTime": TimeService.timestamp_its() * 1000, }, }, "signer": ("digest", b"\x00\x00\x00\x00\x00\x00\x00\x00"), @@ -172,8 +275,7 @@ def sign_cam(self, request: SNSIGNRequest) -> SNSIGNConfirm: request.its_aid) if at_item is None: raise RuntimeError("No present AT for signing CAM") - sigend_data_dict["content"][1]["signer"] = ( - "digest", at_item.as_hashedid8()) + sigend_data_dict["content"][1]["signer"] = self.cam_handler.set_up_signer(at_item) sigend_data_dict["content"][1]["signature"] = at_item.sign_message( self.ecdsa_backend, tobesigned) @@ -188,16 +290,84 @@ def sign_cam(self, request: SNSIGNRequest) -> SNSIGNConfirm: def get_present_at_for_signging(self, its_aid: int) -> OwnCertificate | None: """ Get the present AT for a given ITS-AID. + + Parameters + ---------- + its_aid : int + ITS AID to look up. + + Returns + ------- + OwnCertificate | None + The OwnCertificate that covers the given ITS-AID, or None if not found. """ - for cert in self.present_ats.items(): - if its_aid in cert[1].get_list_of_its_aid(): - return cert[1] + for cert in self.certificate_library.own_certificates.values(): + if its_aid in cert.get_list_of_its_aid(): + return cert return None + def add_own_certificate(self, cert: OwnCertificate) -> None: + """ + Add an own certificate to the certificate library. + + Delegates to :meth:`CertificateLibrary.add_own_certificate` so that the + certificate is available for signing and can be verified against the + trusted chain stored in the library. + + Parameters + ---------- + cert : OwnCertificate + The own certificate to add. + """ + self.certificate_library.add_own_certificate(cert) + + def notify_unknown_at(self, hashedid8: bytes) -> None: + """ + §7.1.1: An unknown AT cert was seen. Record its HashedId3 for the next + inlineP2pcdRequest and force own certificate inclusion in the next CAM. + """ + hashedid3 = hashedid8[-3:] + if hashedid3 not in self.unknown_ats: + self.unknown_ats.append(hashedid3) + self.cam_handler.requested_own_certificate = True + + def notify_inline_p2pcd_request(self, request_list: list) -> None: + """ + §7.1.1: Process a received inlineP2pcdRequest field. + + - If our own AT's HashedId3 is present, include certificate in next CAM. + - If a CA cert we hold is requested, schedule it as requestedCertificate. + """ + for own_cert in self.certificate_library.own_certificates.values(): + own_hashedid3 = own_cert.as_hashedid8()[-3:] + if own_hashedid3 in request_list: + self.cam_handler.requested_own_certificate = True + for hashedid3 in request_list: + ca_cert = self.certificate_library.get_ca_certificate_by_hashedid3(hashedid3) + if ca_cert is not None and hashedid3 not in self.requested_ats: + self.requested_ats.append(hashedid3) + + def notify_received_ca_certificate(self, cert_dict: dict) -> None: + """ + §7.1.1: A peer sent the CA certificate we requested. Discard the + pending request and add the certificate to the library. + """ + cert = Certificate.from_dict(cert_dict) + hashedid3 = cert.as_hashedid8()[-3:] + if hashedid3 in self.requested_ats: + self.requested_ats.remove(hashedid3) + if hashedid3 in self.unknown_ats: + self.unknown_ats.remove(hashedid3) + self.certificate_library.add_authorization_authority(cert) + def get_known_at_for_request(self, hashedid3: bytes) -> dict: """ - Get the known AT for a given hashedId3. + §7.1.1: Return the CA certificate dict for the given HashedId3 so it + can be embedded in the requestedCertificate header field. """ - raise NotImplementedError( - "Getting a known AT from HashedId3 is not implemented yet!" - ) + ca_cert = self.certificate_library.get_ca_certificate_by_hashedid3(hashedid3) + if ca_cert is None: + raise RuntimeError( + f"No CA certificate found for HashedId3 {hashedid3.hex()}" + ) + return ca_cert.certificate diff --git a/src/flexstack/security/sn_sap.py b/src/flexstack/security/sn_sap.py index 4606069..15cbe99 100644 --- a/src/flexstack/security/sn_sap.py +++ b/src/flexstack/security/sn_sap.py @@ -35,6 +35,10 @@ class SNSIGNRequest: Context information which could be used in selecting properties of the underlying security protocol for various purposes. key_handle : int (optional) An indicator for the security entity to decide which key to use + generation_location : dict | None (optional) + 3D location to embed in the signed message headerInfo as generationLocation. + Required for DENM (§7.1.2). Expected keys: 'latitude' (int), 'longitude' (int), + 'elevation' (int, Uint16 in 0.2 m units; 0xF000 = unavailable). """ tbs_message_length: int tbs_message: bytes @@ -43,6 +47,7 @@ class SNSIGNRequest: permissions: bytes context_information: bytes | None = None key_handle: int | None = None + generation_location: dict | None = None def __repr__(self): return ( @@ -174,25 +179,29 @@ class SNVERIFYConfirm: ITS AID permissions : bytes Permissions of the signer (Max length 31 octets) + plain_message : bytes + The verified plain-text payload extracted from the signed message. + Empty bytes when verification does not succeed. """ report: ReportVerify certificate_id: bytes its_aid_length: int its_aid: bytes permissions: bytes + plain_message: bytes = b"" def __repr__(self): return ( f"SNVERIFYConfirm(report={self.report}, certificate_id={self.certificate_id}, " f"its_aid_length={self.its_aid_length}, its_aid={self.its_aid}, " - f"permissions={self.permissions})" + f"permissions={self.permissions}, plain_message={self.plain_message})" ) def __str__(self): return ( f"SNVERIFYConfirm(report={self.report}, certificate_id={self.certificate_id}, " f"its_aid_length={self.its_aid_length}, its_aid={self.its_aid}, " - f"permissions={self.permissions})" + f"permissions={self.permissions}, plain_message={self.plain_message})" ) diff --git a/src/flexstack/security/verify_service.py b/src/flexstack/security/verify_service.py index 37ca5b7..8c9fe6f 100644 --- a/src/flexstack/security/verify_service.py +++ b/src/flexstack/security/verify_service.py @@ -1,5 +1,7 @@ +from __future__ import annotations + from .sn_sap import ReportVerify, SNVERIFYRequest, SNVERIFYConfirm -from .sign_service import ECDSABackend +from .sign_service import ECDSABackend, SignService from .certificate import SECURITY_CODER from .certificate_library import CertificateLibrary @@ -13,33 +15,79 @@ def __init__( self, backend: ECDSABackend, certificate_library: CertificateLibrary, + sign_service: SignService | None = None, ): """ Constructor + Parameters + ---------- + backend : ECDSABackend + ECDSA backend to use for the verification of the signature. + certificate_library : CertificateLibrary + Certificate library to use for the verification of the signature. + sign_service : SignService | None, optional + Sign service to notify about P2PCD events (unknown certs, requests). """ self.backend: ECDSABackend = backend self.certificate_library: CertificateLibrary = certificate_library + self.sign_service: SignService | None = sign_service def verify(self, request: SNVERIFYRequest) -> SNVERIFYConfirm: """ Verify the signature of a message + + Parameters + ---------- + request : SNVERIFYRequest + Request to verify the signature of a message. + + Returns + ------- + SNVERIFYConfirm + Confirmation of the verification of the signature of a message. """ sec_header_decoded = SECURITY_CODER.decode_etsi_ts_103097_data_signed( request.message ) - data = SECURITY_CODER.encode_to_be_signed_data( - sec_header_decoded["toBeSigned"] - ) - signer = sec_header_decoded["content"][1]["signer"] + signed_data = sec_header_decoded["content"][1] + data = SECURITY_CODER.encode_to_be_signed_data(signed_data["tbsData"]) + signer = signed_data["signer"] + # Determine the message profile early so per-profile signer constraints can be + # checked before attempting certificate lookup. + _header_info_early = signed_data["tbsData"].get("headerInfo", {}) + _psid_early: int = _header_info_early.get("psid", 0) + # §7.1.2: DENMs (psid 37) SHALL use signer choice 'certificate', never 'digest'. + if _psid_early == 37 and signer[0] != "certificate": + return SNVERIFYConfirm( + report=ReportVerify.UNSUPPORTED_SIGNER_IDENTIFIER_TYPE, + certificate_id=b'', + its_aid=b'', + its_aid_length=0, + permissions=b'', + ) authorization_ticket = None if signer[0] == "certificate": + # §5.2: certificate choice constrained to exactly one entry + if len(signer[1]) != 1: + return SNVERIFYConfirm( + report=ReportVerify.UNSUPPORTED_SIGNER_IDENTIFIER_TYPE, + certificate_id=b'', + its_aid=b'', + its_aid_length=0, + permissions=b'', + ) authorization_ticket = ( self.certificate_library.verify_sequence_of_certificates( signer[1], self.backend ) ) if not authorization_ticket: + if self.sign_service is not None: + cert_dict = signer[1][0] + issuer = cert_dict.get("issuer", ("self", None)) + if issuer[0] in ("sha256AndDigest", "sha384AndDigest") and issuer[1] is not None: + self.sign_service.notify_unknown_at(issuer[1]) return SNVERIFYConfirm( report=ReportVerify.INCONSISTENT_CHAIN, certificate_id=b'', @@ -54,8 +102,10 @@ def verify(self, request: SNVERIFYRequest) -> SNVERIFYConfirm: ) ) if not authorization_ticket: + if self.sign_service is not None: + self.sign_service.notify_unknown_at(signer[1]) return SNVERIFYConfirm( - report=ReportVerify.INVALID_CERTIFICATE, + report=ReportVerify.SIGNER_CERTIFICATE_NOT_FOUND, certificate_id=b'', its_aid=b'', its_aid_length=0, @@ -66,31 +116,90 @@ def verify(self, request: SNVERIFYRequest) -> SNVERIFYConfirm: if ( authorization_ticket is not None and authorization_ticket.certificate is not None and authorization_ticket.verify(self.backend) + and authorization_ticket.is_authorization_ticket() and authorization_ticket.certificate["toBeSigned"]["verifyKeyIndicator"][0] == "verificationKey" ): + header_info = signed_data["tbsData"].get("headerInfo", {}) + # §5.2: generationTime SHALL always be present + if "generationTime" not in header_info: + return SNVERIFYConfirm( + report=ReportVerify.INVALID_TIMESTAMP, + certificate_id=authorization_ticket.as_hashedid8(), + its_aid=b'', + its_aid_length=0, + permissions=b'', + ) + # §5.2: p2pcdLearningRequest and missingCrlIdentifier SHALL always be absent + if "p2pcdLearningRequest" in header_info or "missingCrlIdentifier" in header_info: + return SNVERIFYConfirm( + report=ReportVerify.INCOMPATIBLE_PROTOCOL, + certificate_id=authorization_ticket.as_hashedid8(), + its_aid=b'', + its_aid_length=0, + permissions=b'', + ) + psid: int = header_info.get("psid", 0) + # §7.1.2: DENM-specific headerInfo constraints + if psid == 37: + # generationLocation SHALL be present + if "generationLocation" not in header_info: + return SNVERIFYConfirm( + report=ReportVerify.INCOMPATIBLE_PROTOCOL, + certificate_id=authorization_ticket.as_hashedid8(), + its_aid=b'', + its_aid_length=0, + permissions=b'', + ) + # All other optional headerInfo fields SHALL be absent + _denm_forbidden = { + "expiryTime", "encryptionKey", + "inlineP2pcdRequest", "requestedCertificate", + } + for _field in _denm_forbidden: + if _field in header_info: + return SNVERIFYConfirm( + report=ReportVerify.INCOMPATIBLE_PROTOCOL, + certificate_id=authorization_ticket.as_hashedid8(), + its_aid=b'', + its_aid_length=0, + permissions=b'', + ) + its_aid_bytes = psid.to_bytes( + (psid.bit_length() + 7) // 8 or 1, "big") verification_key = authorization_ticket.certificate["toBeSigned"]["verifyKeyIndicator"][ 1 ] verify = self.backend.verify_with_pk( data=data, - signature=sec_header_decoded["signature"], + signature=signed_data["signature"], pk=verification_key, ) if verify: + plain_message = signed_data["tbsData"]["payload"]["data"]["content"][1] + if self.sign_service is not None: + if "inlineP2pcdRequest" in header_info: + self.sign_service.notify_inline_p2pcd_request( + header_info["inlineP2pcdRequest"] + ) + if "requestedCertificate" in header_info: + self.sign_service.notify_received_ca_certificate( + header_info["requestedCertificate"] + ) return SNVERIFYConfirm( report=ReportVerify.SUCCESS, certificate_id=authorization_ticket.as_hashedid8(), - its_aid=b'', - its_aid_length=0, + its_aid=its_aid_bytes, + its_aid_length=len(its_aid_bytes), permissions=b'', + plain_message=plain_message, ) else: return SNVERIFYConfirm( report=ReportVerify.FALSE_SIGNATURE, certificate_id=authorization_ticket.as_hashedid8(), - its_aid=b'', - its_aid_length=0, + its_aid=its_aid_bytes, + its_aid_length=len(its_aid_bytes), permissions=b'', ) return SNVERIFYConfirm( diff --git a/tests/flexstack/btp/test_service_access_point.py b/tests/flexstack/btp/test_service_access_point.py index 5ec9cf9..07f092d 100644 --- a/tests/flexstack/btp/test_service_access_point.py +++ b/tests/flexstack/btp/test_service_access_point.py @@ -3,6 +3,7 @@ from flexstack.geonet.gn_address import GNAddress from flexstack.btp.service_access_point import BTPDataIndication, BTPDataRequest from flexstack.geonet.service_access_point import TrafficClass +from flexstack.security.security_profiles import SecurityProfile class TestBTPDataRequest(TestCase): @@ -22,6 +23,22 @@ def test__init__(self): ) self.assertEqual(btp_data_request.length, 0) self.assertEqual(btp_data_request.data, b"") + self.assertEqual(btp_data_request.security_profile, SecurityProfile.NO_SECURITY) + self.assertEqual(btp_data_request.its_aid, 0) + self.assertEqual(btp_data_request.security_permissions, b"\x00") + + def test_security_profile_can_be_set(self): + btp_data_request = BTPDataRequest( + security_profile=SecurityProfile.COOPERATIVE_AWARENESS_MESSAGE, + its_aid=36, + security_permissions=b"\x01", + ) + self.assertEqual( + btp_data_request.security_profile, + SecurityProfile.COOPERATIVE_AWARENESS_MESSAGE, + ) + self.assertEqual(btp_data_request.its_aid, 36) + self.assertEqual(btp_data_request.security_permissions, b"\x01") def test_to_dict(self): btp_data_request = BTPDataRequest() @@ -35,6 +52,9 @@ def test_to_dict(self): "gn_packet_transport_type": {"header_type": 5, "header_subtype": 0}, "gn_destination_address": "AAAAAAAAAAA=", "gn_area": {"latitude": 0, "longitude": 0, "a": 0, "b": 0, "angle": 0}, + "gn_max_packet_lifetime": None, + "gn_repetition_interval": None, + "gn_max_repetition_time": None, "communication_profile": 0, "traffic_class": "AA==", "length": 0, @@ -101,7 +121,11 @@ def test_to_dict(self): "gn_packet_transport_type": {"header_type": 5, "header_subtype": 0}, "gn_destination_address": "AAAAAAAAAAA=", "gn_source_position_vector": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + "gn_security_report": None, + "gn_certificate_id": None, + "gn_permissions": None, "gn_traffic_class": "AA==", + "gn_remaining_packet_lifetime": None, "length": 0, "data": "", }, diff --git a/tests/flexstack/facilities/ca_basic_service/test_ca_basic_service.py b/tests/flexstack/facilities/ca_basic_service/test_ca_basic_service.py index 4c8f271..fd926df 100644 --- a/tests/flexstack/facilities/ca_basic_service/test_ca_basic_service.py +++ b/tests/flexstack/facilities/ca_basic_service/test_ca_basic_service.py @@ -1,47 +1,56 @@ -# import unittest -# from unittest.mock import MagicMock, patch -# from flexstack.facilities.ca_basic_service.ca_basic_service import ( -# CooperativeAwarenessBasicService, -# ) - - -# class TestCooperativeAwarenessBasicService(unittest.TestCase): -# @patch("flexstack.facilities.ca_basic_service.cam_coder.CAMCoder") -# @patch( -# "flexstack.facilities.ca_basic_service.cam_reception_management.CAMReceptionManagement" -# ) -# @patch( -# "flexstack.facilities.ca_basic_service.cam_transmission_management.CAMTransmissionManagement" -# ) -# def test__init__(self, mock_transmission, mock_reception, mock_coder): -# # Arrange -# btp_router = MagicMock() -# vehicle_data = MagicMock() -# ldm = MagicMock() -# mock_coder.return_value = MagicMock() -# mock_transmission.return_value = MagicMock() -# mock_reception.return_value = MagicMock() -# # Act -# ca_service = CooperativeAwarenessBasicService(btp_router, vehicle_data, ldm) -# # Assert -# self.assertEqual(ca_service.btp_router, btp_router) -# self.assertEqual(ca_service.vehicle_data, vehicle_data) -# mock_coder.assert_called_once() -# print(ca_service.cam_coder.__module__) -# print(ca_service.cam_coder.__class__) - -# self.assertEqual(ca_service.cam_coder, mock_coder.return_value) -# mock_transmission.assert_called_once_with( -# btp_router=btp_router, -# cam_coder=mock_coder.return_value, -# vehicle_data=vehicle_data, -# ) -# self.assertEqual( -# ca_service.cam_transmission_management, mock_transmission.return_value -# ) -# mock_reception.assert_called_once_with( -# cam_coder=mock_coder.return_value, btp_router=btp_router, ldm=ldm -# ) -# self.assertEqual( -# ca_service.cam_reception_management, mock_reception.return_value -# ) +import unittest +from unittest.mock import MagicMock +from flexstack.facilities.ca_basic_service.ca_basic_service import ( + CooperativeAwarenessBasicService, +) +from flexstack.facilities.ca_basic_service.cam_transmission_management import VehicleData + + +def _make_vehicle_data(): + return VehicleData( + station_id=1, + station_type=5, + drive_direction="forward", + vehicle_length={ + "vehicleLengthValue": 50, + "vehicleLengthConfidenceIndication": "unavailable", + }, + vehicle_width=30, + ) + + +class TestCooperativeAwarenessBasicService(unittest.TestCase): + + def test__init__(self): + btp_router = MagicMock() + vehicle_data = _make_vehicle_data() + service = CooperativeAwarenessBasicService(btp_router, vehicle_data) + self.assertIsNotNone(service.cam_transmission_management) + self.assertIsNotNone(service.cam_reception_management) + self.assertIsNotNone(service.cam_coder) + + def test_start_delegates_to_transmission_management(self): + btp_router = MagicMock() + service = CooperativeAwarenessBasicService(btp_router, _make_vehicle_data()) + service.cam_transmission_management = MagicMock() + service.start() + service.cam_transmission_management.start.assert_called_once() + + def test_stop_delegates_to_transmission_management(self): + btp_router = MagicMock() + service = CooperativeAwarenessBasicService(btp_router, _make_vehicle_data()) + service.cam_transmission_management = MagicMock() + service.stop() + service.cam_transmission_management.stop.assert_called_once() + + def test_location_service_callback_available(self): + btp_router = MagicMock() + service = CooperativeAwarenessBasicService(btp_router, _make_vehicle_data()) + # The callback must exist and accept a TPV dict without raising + tpv = {"lat": 41.0, "lon": 2.0, "track": 0.0, "speed": 1.0} + service.cam_transmission_management.location_service_callback(tpv) + self.assertEqual(service.cam_transmission_management._current_tpv, tpv) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/flexstack/facilities/ca_basic_service/test_cam_coder.py b/tests/flexstack/facilities/ca_basic_service/test_cam_coder.py index 8fc2a23..7bd322f 100644 --- a/tests/flexstack/facilities/ca_basic_service/test_cam_coder.py +++ b/tests/flexstack/facilities/ca_basic_service/test_cam_coder.py @@ -17,7 +17,6 @@ def test_encode(self, asn1tools_compile_string_mock): asn_coder.encode = MagicMock(return_value="encoded_cam") asn1tools_compile_string_mock.return_value = asn_coder cam_coder = CAMCoder() - # cam_coder.asn_coder.encode.return_value = "encoded_cam" cam = {"camField": "value"} encoded_cam = cam_coder.encode(cam) cam_coder.asn_coder.encode.assert_called_once_with("CAM", cam) @@ -29,8 +28,203 @@ def test_decode(self, asn1tools_compile_string_mock): asn_coder.decode = MagicMock(return_value="decoded_cam") asn1tools_compile_string_mock.return_value = asn_coder cam_coder = CAMCoder() - # cam_coder.asn_coder.decode.return_value = "decoded_cam" encoded_cam = b'\x30\x0a\x02\x01\x01\x16\x05value' decoded_cam = cam_coder.decode(encoded_cam) cam_coder.asn_coder.decode.assert_called_once_with("CAM", encoded_cam) self.assertEqual(decoded_cam, "decoded_cam") + + +class TestCAMCoderExtensionContainers(unittest.TestCase): + """ + Tests for extension-container encode/decode helpers. + + These tests also verify wire-format interoperability: the bytes + produced by encode_extension_container must be valid UPER encodings + of the named container types (i.e. identical to what an unaltered + ASN.1 encoder would produce), and must round-trip through + decode_extension_container unchanged. + """ + + @classmethod + def setUpClass(cls): + # Use the real coder – these are integration / wire-compat tests. + cls.coder = CAMCoder() + + # ------------------------------------------------------------------ + # Known container-id / type-name mapping + # ------------------------------------------------------------------ + + def test_encode_extension_container_unknown_id_raises(self): + with self.assertRaises(ValueError): + self.coder.encode_extension_container(99, {}) + + def test_decode_extension_container_unknown_id_raises(self): + with self.assertRaises(ValueError): + self.coder.decode_extension_container(99, b'\x00') + + # ------------------------------------------------------------------ + # VeryLowFrequencyContainer (containerId = 3) + # ------------------------------------------------------------------ + + def test_encode_decode_vlf_container_empty(self): + """Empty VLF container round-trips correctly.""" + vlf_in = {} + data = self.coder.encode_extension_container(3, vlf_in) + # UPER of empty extensible SEQUENCE with all-optional fields = 0x00 + self.assertEqual(data, b'\x00') + vlf_out = self.coder.decode_extension_container(3, data) + self.assertEqual(vlf_out, vlf_in) + + def test_vlf_container_bytes_survive_cam_round_trip(self): + """ + Encode a CAM with a VLF extensionContainer; after decode the + containerData bytes must be identical to the standalone-encoded + VLF bytes, proving wire interoperability. + """ + vlf_bytes = self.coder.encode_extension_container(3, {}) + cam = _build_minimal_cam(extension_containers=[ + {'containerId': 3, 'containerData': vlf_bytes} + ]) + encoded = self.coder.encode(cam) + decoded = self.coder.decode(encoded) + ext = decoded['cam']['camParameters']['extensionContainers'][0] + self.assertEqual(ext['containerId'], 3) + # bytes must survive the CAM UPER encode/decode unchanged + self.assertEqual(ext['containerData'], vlf_bytes) + + # ------------------------------------------------------------------ + # TwoWheelerContainer (containerId = 1) + # ------------------------------------------------------------------ + + def test_encode_decode_two_wheeler_container_empty(self): + """Empty TwoWheelerContainer round-trips correctly.""" + tw_in = {} + data = self.coder.encode_extension_container(1, tw_in) + self.assertEqual(data, b'\x00') + tw_out = self.coder.decode_extension_container(1, data) + self.assertEqual(tw_out, tw_in) + + def test_two_wheeler_container_bytes_survive_cam_round_trip(self): + tw_bytes = self.coder.encode_extension_container(1, {}) + cam = _build_minimal_cam(extension_containers=[ + {'containerId': 1, 'containerData': tw_bytes} + ]) + encoded = self.coder.encode(cam) + decoded = self.coder.decode(encoded) + ext = decoded['cam']['camParameters']['extensionContainers'][0] + self.assertEqual(ext['containerId'], 1) + self.assertEqual(ext['containerData'], tw_bytes) + + # ------------------------------------------------------------------ + # PathPredictionContainer (containerId = 4) + # ------------------------------------------------------------------ + + def test_encode_decode_path_prediction_container(self): + pp_in = {'pathPredictedList': []} + data = self.coder.encode_extension_container(4, pp_in) + pp_out = self.coder.decode_extension_container(4, data) + self.assertEqual(pp_out, pp_in) + + # ------------------------------------------------------------------ + # Multiple extension containers in one CAM + # ------------------------------------------------------------------ + + def test_multiple_extension_containers_in_cam(self): + vlf_bytes = self.coder.encode_extension_container(3, {}) + tw_bytes = self.coder.encode_extension_container(1, {}) + cam = _build_minimal_cam(extension_containers=[ + {'containerId': 3, 'containerData': vlf_bytes}, + {'containerId': 1, 'containerData': tw_bytes}, + ]) + encoded = self.coder.encode(cam) + decoded = self.coder.decode(encoded) + exts = decoded['cam']['camParameters']['extensionContainers'] + self.assertEqual(len(exts), 2) + self.assertEqual(exts[0]['containerId'], 3) + self.assertEqual(exts[0]['containerData'], vlf_bytes) + self.assertEqual(exts[1]['containerId'], 1) + self.assertEqual(exts[1]['containerData'], tw_bytes) + + # ------------------------------------------------------------------ + # ASN.1 compilation smoke test (WITH SUCCESSORS must not appear) + # ------------------------------------------------------------------ + + def test_asn1_compiles_without_with_successors(self): + """ + The cam_asn1 module must compile cleanly (i.e. WITH SUCCESSORS + was removed from the IMPORTS block). + """ + import asn1tools + from flexstack.facilities.ca_basic_service.cam_asn1 import ( + CAM_ASN1_DESCRIPTIONS, + ) + # The problematic syntax: + # FROM {OID} WITH SUCCESSORS + # must not appear in the final ASN.1 string. + import re + self.assertIsNone( + re.search(r'FROM\s+\S+\s*\{[^}]*\}\s+WITH\s+SUCCESSORS', + CAM_ASN1_DESCRIPTIONS), + "WITH SUCCESSORS found after a module reference in IMPORTS – " + "this breaks asn1tools.", + ) + # Should compile without raising + coder = asn1tools.compile_string(CAM_ASN1_DESCRIPTIONS, codec="uper") + self.assertIsNotNone(coder) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _build_minimal_cam(extension_containers=None): + cam = { + 'header': {'protocolVersion': 2, 'messageId': 2, 'stationId': 1}, + 'cam': { + 'generationDeltaTime': 1000, + 'camParameters': { + 'basicContainer': { + 'stationType': 5, + 'referencePosition': { + 'latitude': 414536062, + 'longitude': 20737073, + 'positionConfidenceEllipse': { + 'semiMajorAxisLength': 875, + 'semiMinorAxisLength': 1059, + 'semiMajorAxisOrientation': 0, + }, + 'altitude': { + 'altitudeValue': 16350, + 'altitudeConfidence': 'unavailable', + }, + }, + }, + 'highFrequencyContainer': ( + 'basicVehicleContainerHighFrequency', + { + 'heading': {'headingValue': 3601, 'headingConfidence': 127}, + 'speed': {'speedValue': 16383, 'speedConfidence': 127}, + 'driveDirection': 'unavailable', + 'vehicleLength': { + 'vehicleLengthValue': 1023, + 'vehicleLengthConfidenceIndication': 'unavailable', + }, + 'vehicleWidth': 62, + 'longitudinalAcceleration': {'value': 161, 'confidence': 102}, + 'curvature': { + 'curvatureValue': 1023, + 'curvatureConfidence': 'unavailable', + }, + 'curvatureCalculationMode': 'unavailable', + 'yawRate': { + 'yawRateValue': 32767, + 'yawRateConfidence': 'unavailable', + }, + }, + ), + }, + }, + } + if extension_containers is not None: + cam['cam']['camParameters']['extensionContainers'] = extension_containers + return cam diff --git a/tests/flexstack/facilities/ca_basic_service/test_cam_reception_management.py b/tests/flexstack/facilities/ca_basic_service/test_cam_reception_management.py index 3839103..02d06a8 100644 --- a/tests/flexstack/facilities/ca_basic_service/test_cam_reception_management.py +++ b/tests/flexstack/facilities/ca_basic_service/test_cam_reception_management.py @@ -5,41 +5,134 @@ ) -class TestCamReceptionManagement(unittest.TestCase): - def test__init__(self): - # Arrange - cam_coder = MagicMock() - btp_router = MagicMock() - btp_router.register_indication_callback_btp = MagicMock() - ldm_facility = MagicMock() - ca_basic_service_ldm = MagicMock() - # Act - cam_reception_management = CAMReceptionManagement( - cam_coder, btp_router, ldm_facility - ) - # Assert - cam_reception_management.cam_coder = cam_coder - cam_reception_management.btp_router = btp_router - cam_reception_management.ca_basic_service_ldm = ca_basic_service_ldm - btp_router.register_indication_callback_btp.assert_called_once() - - def test_reception_callback(self): - # Arrange - cam_coder = MagicMock() - cam = {"cam": {"generationDeltaTime": 24856}, - "header": {"stationId": 1}} - cam_coder.decode = MagicMock(return_value=cam) - btp_router = MagicMock() - btp_router.register_indication_callback_btp = MagicMock() - ca_basic_service_ldm = MagicMock() - ca_basic_service_ldm.add_provider_data_to_ldm = MagicMock() - # Act - cam_reception_management = CAMReceptionManagement( - cam_coder, btp_router, ca_basic_service_ldm +def _make_crm(with_ldm=False): + cam_coder = MagicMock() + btp_router = MagicMock() + ldm = MagicMock() if with_ldm else None + crm = CAMReceptionManagement(cam_coder, btp_router, ldm) + return crm, cam_coder, btp_router, ldm + + +def _make_indication(data=None): + ind = MagicMock() + ind.data = data or MagicMock() + return ind + + +class TestCamReceptionManagementInit(unittest.TestCase): + + def test_registers_btp_callback_on_port_2001(self): + _, _, btp_router, _ = _make_crm() + btp_router.register_indication_callback_btp.assert_called_once_with( + port=2001, callback=btp_router.register_indication_callback_btp.call_args[1]["callback"] ) - data = MagicMock() - data.data = MagicMock() - cam_reception_management.reception_callback(data) - # Assert - ca_basic_service_ldm.add_provider_data_to_ldm.assert_called_once() + + def test_cam_coder_stored(self): + crm, cam_coder, _, _ = _make_crm() + self.assertIs(crm.cam_coder, cam_coder) + + def test_ldm_stored(self): + crm, _, _, ldm = _make_crm(with_ldm=True) + self.assertIs(crm.ca_basic_service_ldm, ldm) + + def test_no_application_callbacks_initially(self): + crm, _, _, _ = _make_crm() + self.assertEqual(crm._application_callbacks, []) + + +class TestReceptionCallback(unittest.TestCase): + + def test_reception_callback_decodes_cam(self): + crm, cam_coder, _, _ = _make_crm() + cam_coder.decode.return_value = { + "cam": {"generationDeltaTime": 24856}, + "header": {"stationId": 1}, + } + crm.reception_callback(_make_indication()) cam_coder.decode.assert_called_once() + + def test_reception_callback_updates_ldm(self): + crm, cam_coder, _, ldm = _make_crm(with_ldm=True) + cam_coder.decode.return_value = { + "cam": {"generationDeltaTime": 24856}, + "header": {"stationId": 1}, + } + crm.reception_callback(_make_indication()) + ldm.add_provider_data_to_ldm.assert_called_once() + + def test_reception_callback_no_ldm_no_error(self): + crm, cam_coder, _, _ = _make_crm(with_ldm=False) + cam_coder.decode.return_value = { + "cam": {"generationDeltaTime": 24856}, + "header": {"stationId": 1}, + } + crm.reception_callback(_make_indication()) # Should not raise + + def test_utc_timestamp_added_to_cam(self): + crm, cam_coder, _, _ = _make_crm() + cam_dict = {"cam": {"generationDeltaTime": 24856}, "header": {"stationId": 1}} + cam_coder.decode.return_value = cam_dict + callbacks_received = [] + crm.add_application_callback(lambda c: callbacks_received.append(c)) + crm.reception_callback(_make_indication()) + self.assertIn("utc_timestamp", callbacks_received[0]) + + +class TestAnnexB331DecodeException(unittest.TestCase): + + def test_decode_exception_does_not_update_ldm(self): + """Annex B.3.3.1: decoding failure must not update the LDM.""" + crm, cam_coder, _, ldm = _make_crm(with_ldm=True) + cam_coder.decode.side_effect = ValueError("bad packet") + crm.reception_callback(_make_indication()) + ldm.add_provider_data_to_ldm.assert_not_called() + + def test_decode_exception_does_not_raise(self): + """Annex B.3.3.1: decoding failure must not propagate.""" + crm, cam_coder, _, _ = _make_crm() + cam_coder.decode.side_effect = ValueError("bad packet") + crm.reception_callback(_make_indication()) # Should not raise + + def test_decode_exception_skips_application_callbacks(self): + crm, cam_coder, _, _ = _make_crm() + cam_coder.decode.side_effect = ValueError("bad packet") + received = [] + crm.add_application_callback(lambda c: received.append(c)) + crm.reception_callback(_make_indication()) + self.assertEqual(received, []) + + +class TestApplicationCallbacks(unittest.TestCase): + + def test_add_application_callback_called_on_valid_cam(self): + crm, cam_coder, _, _ = _make_crm() + cam_coder.decode.return_value = { + "cam": {"generationDeltaTime": 1000}, + "header": {"stationId": 42}, + } + received = [] + crm.add_application_callback(lambda c: received.append(c)) + crm.reception_callback(_make_indication()) + self.assertEqual(len(received), 1) + self.assertEqual(received[0]["header"]["stationId"], 42) + + def test_multiple_callbacks_all_called(self): + crm, cam_coder, _, _ = _make_crm() + cam_coder.decode.return_value = { + "cam": {"generationDeltaTime": 1000}, + "header": {"stationId": 7}, + } + count = [] + crm.add_application_callback(lambda c: count.append(1)) + crm.add_application_callback(lambda c: count.append(2)) + crm.reception_callback(_make_indication()) + self.assertEqual(count, [1, 2]) + + def test_faulty_application_callback_does_not_propagate(self): + crm, cam_coder, _, _ = _make_crm() + cam_coder.decode.return_value = { + "cam": {"generationDeltaTime": 1000}, + "header": {"stationId": 7}, + } + crm.add_application_callback(lambda c: (_ for _ in ()).throw(RuntimeError("oops"))) + crm.reception_callback(_make_indication()) # Should not raise diff --git a/tests/flexstack/facilities/ca_basic_service/test_cam_transmission_management.py b/tests/flexstack/facilities/ca_basic_service/test_cam_transmission_management.py index 97db392..2b48d6c 100644 --- a/tests/flexstack/facilities/ca_basic_service/test_cam_transmission_management.py +++ b/tests/flexstack/facilities/ca_basic_service/test_cam_transmission_management.py @@ -2,73 +2,123 @@ from unittest.mock import MagicMock, patch -from flexstack.facilities.ca_basic_service.cam_transmission_management import GenerationDeltaTime, CAMTransmissionManagement, CooperativeAwarenessMessage, VehicleData +from flexstack.facilities.ca_basic_service.cam_transmission_management import ( + GenerationDeltaTime, + CAMTransmissionManagement, + CooperativeAwarenessMessage, + VehicleData, + T_GEN_CAM_MIN, + T_GEN_CAM_MAX, + N_GEN_CAM_DEFAULT, + _haversine_m, +) from flexstack.facilities.ca_basic_service.cam_coder import CAMCoder +# --------------------------------------------------------------------------- +# Helpers shared across test classes +# --------------------------------------------------------------------------- + +def _make_vehicle_data(**kwargs): + defaults = dict( + station_id=1, + station_type=5, + drive_direction="forward", + vehicle_length={ + "vehicleLengthValue": 50, + "vehicleLengthConfidenceIndication": "unavailable", + }, + vehicle_width=30, + ) + defaults.update(kwargs) + return VehicleData(**defaults) + + +def _make_tpv(lat=41.0, lon=2.0, track=90.0, speed=5.0): + return { + "lat": lat, + "lon": lon, + "track": track, + "speed": speed, + "time": "2020-01-01T00:00:00Z", + } + + +def _make_ctm(vehicle_data=None, ldm=None): + btp_router = MagicMock() + cam_coder = MagicMock() + cam_coder.encode.return_value = b"\x00" * 10 + cam_coder.encode_extension_container.return_value = b"\x00" + if vehicle_data is None: + vehicle_data = _make_vehicle_data() + ctm = CAMTransmissionManagement(btp_router, cam_coder, vehicle_data, ldm) + return ctm, btp_router, cam_coder, vehicle_data + + +# --------------------------------------------------------------------------- +# TestGenerationDeltaTime — unchanged +# --------------------------------------------------------------------------- + class TestGenerationDeltaTime(unittest.TestCase): def test_set_in_normal_timestamp(self): timestamp = 1675871599 generation_delta_time = GenerationDeltaTime.from_timestamp(timestamp) self.assertEqual(generation_delta_time.msec, - (((timestamp*1000)-1072915200000+5000) % 65536)) + (((timestamp * 1000) - 1072915200000 + 5000) % 65536)) def test_as_timestamp_in_certain_point(self): timestamp = 1755763553.722 - reception_timestamp_millis = (timestamp+0.3)*1000 + reception_timestamp_millis = (timestamp + 0.3) * 1000 generation_delta_time = GenerationDeltaTime.from_timestamp(timestamp) - self.assertEqual(generation_delta_time.as_timestamp_in_certain_point( - int(reception_timestamp_millis)), timestamp*1000) + self.assertEqual( + generation_delta_time.as_timestamp_in_certain_point( + int(reception_timestamp_millis) + ), + timestamp * 1000, + ) def test__gt__(self): - timestamp = 1675871599 - timestamp2 = timestamp + 1 - generation_delta_time = GenerationDeltaTime.from_timestamp(timestamp) - generation_delta_time2 = GenerationDeltaTime.from_timestamp(timestamp2) - self.assertTrue(generation_delta_time2 > generation_delta_time) - self.assertFalse(generation_delta_time > generation_delta_time2) + ts1 = GenerationDeltaTime.from_timestamp(1675871599) + ts2 = GenerationDeltaTime.from_timestamp(1675871600) + self.assertTrue(ts2 > ts1) + self.assertFalse(ts1 > ts2) def test__lt__(self): - timestamp = 1675871599 - timestamp2 = timestamp + 1 - generation_delta_time = GenerationDeltaTime.from_timestamp(timestamp) - generation_delta_time2 = GenerationDeltaTime.from_timestamp(timestamp2) - self.assertTrue(generation_delta_time < generation_delta_time2) - self.assertFalse(generation_delta_time2 < generation_delta_time) + ts1 = GenerationDeltaTime.from_timestamp(1675871599) + ts2 = GenerationDeltaTime.from_timestamp(1675871600) + self.assertTrue(ts1 < ts2) + self.assertFalse(ts2 < ts1) def test__ge__(self): - timestamp = 1675871599 - timestamp2 = timestamp + 1 - generation_delta_time = GenerationDeltaTime.from_timestamp(timestamp) - generation_delta_time2 = GenerationDeltaTime.from_timestamp(timestamp2) - self.assertTrue(generation_delta_time >= generation_delta_time) - self.assertTrue(generation_delta_time2 >= generation_delta_time) - self.assertFalse(generation_delta_time >= generation_delta_time2) + ts1 = GenerationDeltaTime.from_timestamp(1675871599) + ts2 = GenerationDeltaTime.from_timestamp(1675871600) + self.assertTrue(ts1 >= ts1) + self.assertTrue(ts2 >= ts1) + self.assertFalse(ts1 >= ts2) def test__le__(self): - timestamp = 1675871599 - timestamp2 = timestamp + 1 - generation_delta_time = GenerationDeltaTime.from_timestamp(timestamp) - generation_delta_time2 = GenerationDeltaTime.from_timestamp(timestamp2) - self.assertTrue(generation_delta_time <= generation_delta_time) - self.assertTrue(generation_delta_time <= generation_delta_time2) - self.assertFalse(generation_delta_time2 <= generation_delta_time) + ts1 = GenerationDeltaTime.from_timestamp(1675871599) + ts2 = GenerationDeltaTime.from_timestamp(1675871600) + self.assertTrue(ts1 <= ts1) + self.assertTrue(ts1 <= ts2) + self.assertFalse(ts2 <= ts1) def test__add__(self): - timestamp = 1675871599 - generation_delta_time = GenerationDeltaTime.from_timestamp(timestamp) - generation_delta_time2 = GenerationDeltaTime(msec=30) - sum = generation_delta_time + generation_delta_time2 - self.assertEqual(sum, (generation_delta_time.msec+30) % 65536) + gdt = GenerationDeltaTime.from_timestamp(1675871599) + result = gdt + GenerationDeltaTime(msec=30) + self.assertEqual(result, (gdt.msec + 30) % 65536) def test__sub__(self): - generation_delta_time = GenerationDeltaTime(msec=20) - generation_delta_time2 = GenerationDeltaTime(msec=30) - diff = generation_delta_time - generation_delta_time2 - self.assertEqual(diff, -10+65536) + gdt1 = GenerationDeltaTime(msec=20) + gdt2 = GenerationDeltaTime(msec=30) + self.assertEqual(gdt1 - gdt2, -10 + 65536) +# --------------------------------------------------------------------------- +# TestCooperativeAwarenessMessage — unchanged +# --------------------------------------------------------------------------- + class TestCooperativeAwarenessMessage(unittest.TestCase): def __init__(self, methodName: str = ...) -> None: @@ -76,101 +126,749 @@ def __init__(self, methodName: str = ...) -> None: self.coder = CAMCoder() def test__init__(self): - cooperative_awareness_message = CooperativeAwarenessMessage() - encoded_white = self.coder.encode(cooperative_awareness_message.cam) - expected_cam = b'\x02\x02\x00\x00\x00\x00\x00\x00\x00\ri:@:\xd2t\x80?\xff\xff\xfc#\xb7t>\x00\xe1\x1f\xdf\xff\xfe\xbf\xe9\xed\x077\xfe\xeb\xff\xf6\x00' - self.assertEqual(encoded_white, expected_cam) + cam = CooperativeAwarenessMessage() + encoded = self.coder.encode(cam.cam) + expected = ( + b'\x02\x02\x00\x00\x00\x00\x00\x00\x00\ri:@:\xd2t\x80' + b'?\xff\xff\xfc#\xb7t>\x00\xe1\x1f\xdf\xff\xfe\xbf\xe9' + b'\xed\x077\xfe\xeb\xff\xf6\x00' + ) + self.assertEqual(encoded, expected) def test_fullfill_with_vehicle_data(self): - vehicle_data = VehicleData( - station_id=30, - station_type=5, - drive_direction='forward', - vehicle_length={"vehicleLengthValue": 50, - "vehicleLengthConfidenceIndication": "unavailable"}, - vehicle_width=30 + vd = _make_vehicle_data() + cam = CooperativeAwarenessMessage() + cam.fullfill_with_vehicle_data(vd) + params = cam.cam["cam"]["camParameters"] + self.assertEqual(cam.cam["header"]["stationId"], vd.station_id) + self.assertEqual(params["basicContainer"] + ["stationType"], vd.station_type) + self.assertEqual( + params["highFrequencyContainer"][1]["driveDirection"], + vd.drive_direction, ) - - cooperative_awareness_message = CooperativeAwarenessMessage() - cooperative_awareness_message.fullfill_with_vehicle_data(vehicle_data) self.assertEqual( - cooperative_awareness_message.cam['header']['stationId'], vehicle_data.station_id) - self.assertEqual(cooperative_awareness_message.cam['cam']['camParameters'] - ['basicContainer']['stationType'], vehicle_data.station_type) - self.assertEqual(cooperative_awareness_message.cam['cam']['camParameters'] - ['highFrequencyContainer'][1]['driveDirection'], vehicle_data.drive_direction) - self.assertEqual(cooperative_awareness_message.cam['cam']['camParameters']['highFrequencyContainer'] - [1]['vehicleLength']['vehicleLengthValue'], vehicle_data.vehicle_length["vehicleLengthValue"]) - self.assertEqual(cooperative_awareness_message.cam['cam']['camParameters'] - ['highFrequencyContainer'][1]['vehicleWidth'], vehicle_data.vehicle_width) + params["highFrequencyContainer"][1]["vehicleLength"]["vehicleLengthValue"], + vd.vehicle_length["vehicleLengthValue"], + ) + self.assertEqual(params["highFrequencyContainer"] + [1]["vehicleWidth"], vd.vehicle_width) def test_fullfill_with_tpv_data(self): - tpv_data = {"class": "TPV", "device": "/dev/ttyACM0", "mode": 3, "time": "2020-03-13T13:01:14.000Z", "ept": 0.005, "lat": 41.453606167, - "lon": 2.073707333, "alt": 163.500, "epx": 8.754, "epy": 10.597, "epv": 31.970, "track": 0.0000, "speed": 0.011, "climb": 0.000, "eps": 0.57} - cooperative_awareness_message = CooperativeAwarenessMessage() - cooperative_awareness_message.fullfill_with_tpv_data(tpv_data) - self.assertEqual( - cooperative_awareness_message.cam['cam']['generationDeltaTime'], 24856) - self.assertEqual(cooperative_awareness_message.cam['cam']['camParameters'] - ['basicContainer']['referencePosition']['latitude'], int(tpv_data['lat']*10000000)) - self.assertEqual(cooperative_awareness_message.cam['cam']['camParameters'] - ['basicContainer']['referencePosition']['longitude'], int(tpv_data['lon']*10000000)) + tpv = { + "class": "TPV", "device": "/dev/ttyACM0", "mode": 3, + "time": "2020-03-13T13:01:14.000Z", "ept": 0.005, + "lat": 41.453606167, "lon": 2.073707333, "alt": 163.500, + "epx": 8.754, "epy": 10.597, "epv": 31.970, + "track": 0.0000, "speed": 0.011, "climb": 0.000, "eps": 0.57, + } + cam = CooperativeAwarenessMessage() + cam.fullfill_with_tpv_data(tpv) + self.assertEqual(cam.cam["cam"]["generationDeltaTime"], 24856) + pos = cam.cam["cam"]["camParameters"]["basicContainer"]["referencePosition"] + self.assertEqual(pos["latitude"], int(tpv["lat"] * 10000000)) + self.assertEqual(pos["longitude"], int(tpv["lon"] * 10000000)) -class TestCamTransmissionManagement(unittest.TestCase): +# --------------------------------------------------------------------------- +# TestVehicleData — new fields +# --------------------------------------------------------------------------- - def test_location_service_callback(self): - """ - Tests the location service callback. +class TestVehicleData(unittest.TestCase): - Mocks the call to fullfill_with_tpv_data and checks that the method is called with the correct parameters. - Mocks the call to send_next_cam and checks that the method is called with the correct parameters. - """ - btp_router = MagicMock() - cam_coder = MagicMock() - ca_basic_service_ldm = MagicMock() - ca_basic_service_ldm.add_provider_data_to_ldm = MagicMock() - vehicle_data = VehicleData( - station_id=30, - station_type=5, - drive_direction='forward', - vehicle_length={"vehicleLengthValue": 50, - "vehicleLengthConfidenceIndication": "unavailable"}, - vehicle_width=30 - ) - tpv_data = {"class": "TPV", "device": "/dev/ttyACM0", "mode": 3, "time": "2020-03-13T13:01:14.000Z", "ept": 0.005, "lat": 41.453606167, - "lon": 2.073707333, "alt": 163.500, "epx": 8.754, "epy": 10.597, "epv": 31.970, "track": 0.0000, "speed": 0.011, "climb": 0.000, "eps": 0.57} - cam_transmission_management = CAMTransmissionManagement( - btp_router, cam_coder, vehicle_data, ca_basic_service_ldm, ) - - with patch.object(CooperativeAwarenessMessage, 'fullfill_with_tpv_data') as mock_fullfill: - cam_transmission_management._send_cam = MagicMock() - cam_transmission_management.location_service_callback(tpv_data) - mock_fullfill.assert_called_with(tpv_data) - cam_transmission_management._send_cam.assert_called_once() - - def test_send_next_cam(self): - btp_router = MagicMock() - btp_router.btp_data_request = MagicMock() - cam_coder = MagicMock() - cam_coder.encode = MagicMock( - return_value=b'\x02\x02\x00\x00\x00\x00\x00\x00\x00\ri:@:\xd2t\x80?\xff\xff\xfc#\xb7t>\x00\xe1\x1f\xdf\xff\xfe\xbf\xe9\xed\x077\xfe\xeb\xff\xf6\x00') - vehicle_data = VehicleData( - station_id=30, - station_type=5, - drive_direction='forward', - vehicle_length={"vehicleLengthValue": 50, - "vehicleLengthConfidenceIndication": "unavailable"}, - vehicle_width=30 - ) - cam_transmission_management = CAMTransmissionManagement( - btp_router, cam_coder, vehicle_data) + def test_default_new_fields(self): + vd = VehicleData() + self.assertEqual(vd.vehicle_role, 0) + self.assertEqual(vd.exterior_lights, b"\x00") + self.assertIsNone(vd.special_vehicle_data) + + def test_invalid_vehicle_role_raises(self): + with self.assertRaises(ValueError): + VehicleData(vehicle_role=16) + with self.assertRaises(ValueError): + VehicleData(vehicle_role=-1) + + def test_invalid_exterior_lights_raises(self): + with self.assertRaises(ValueError): + VehicleData(exterior_lights=b"") + + def test_valid_non_default_role(self): + vd = VehicleData(vehicle_role=6, exterior_lights=b"\x80") + self.assertEqual(vd.vehicle_role, 6) + self.assertEqual(vd.exterior_lights, b"\x80") + + +# --------------------------------------------------------------------------- +# TestHaversine +# --------------------------------------------------------------------------- + +class TestHaversine(unittest.TestCase): + + def test_zero_distance(self): + self.assertAlmostEqual(_haversine_m( + 41.0, 2.0, 41.0, 2.0), 0.0, places=3) + + def test_known_distance_approx(self): + # ~5 m north at latitude 41° + d = _haversine_m(41.0, 2.0, 41.000045, 2.0) + self.assertGreater(d, 4.5) + self.assertLess(d, 5.5) + + def test_direction_symmetry(self): + d1 = _haversine_m(41.0, 2.0, 41.001, 2.001) + d2 = _haversine_m(41.001, 2.001, 41.0, 2.0) + self.assertAlmostEqual(d1, d2, places=6) + + +# --------------------------------------------------------------------------- +# TestCAMTransmissionManagementInit +# --------------------------------------------------------------------------- + +class TestCAMTransmissionManagementInit(unittest.TestCase): + + def test_t_gen_cam_starts_at_max(self): + """§6.1.3: T_GenCam default = T_GenCamMax.""" + ctm, _, _, _ = _make_ctm() + self.assertEqual(ctm.t_gen_cam, T_GEN_CAM_MAX) + + def test_not_active_on_init(self): + ctm, _, _, _ = _make_ctm() + self.assertFalse(ctm._active) + + def test_cam_count_zero_on_init(self): + ctm, _, _, _ = _make_ctm() + self.assertEqual(ctm._cam_count, 0) + + def test_last_cam_time_none_on_init(self): + ctm, _, _, _ = _make_ctm() + self.assertIsNone(ctm._last_cam_time_ms) + + +# --------------------------------------------------------------------------- +# TestStartStop — §6.1.2 +# --------------------------------------------------------------------------- + +class TestStartStop(unittest.TestCase): + + def test_start_sets_active(self): + ctm, _, _, _ = _make_ctm() + with patch( + "flexstack.facilities.ca_basic_service.cam_transmission_management.threading.Timer" + ) as mock_timer_cls: + mock_timer_cls.return_value = MagicMock() + ctm.start() + self.assertTrue(ctm._active) + + def test_start_schedules_timer(self): + ctm, _, _, _ = _make_ctm() + with patch( + "flexstack.facilities.ca_basic_service.cam_transmission_management.threading.Timer" + ) as mock_timer_cls: + mock_inst = MagicMock() + mock_timer_cls.return_value = mock_inst + ctm.start() + mock_timer_cls.assert_called_once() + mock_inst.start.assert_called_once() + + def test_stop_clears_active(self): + ctm, _, _, _ = _make_ctm() + with patch( + "flexstack.facilities.ca_basic_service.cam_transmission_management.threading.Timer" + ) as mock_timer_cls: + mock_timer_cls.return_value = MagicMock() + ctm.start() + ctm.stop() + self.assertFalse(ctm._active) + + def test_stop_cancels_timer(self): + ctm, _, _, _ = _make_ctm() + mock_timer = MagicMock() + ctm._timer = mock_timer + ctm._active = True + ctm.stop() + mock_timer.cancel.assert_called_once() + self.assertIsNone(ctm._timer) + + def test_start_resets_cam_count(self): + ctm, _, _, _ = _make_ctm() + ctm._cam_count = 5 + with patch( + "flexstack.facilities.ca_basic_service.cam_transmission_management.threading.Timer" + ) as mock_timer_cls: + mock_timer_cls.return_value = MagicMock() + ctm.start() + self.assertEqual(ctm._cam_count, 0) + + def test_double_start_is_idempotent(self): + ctm, _, _, _ = _make_ctm() + with patch( + "flexstack.facilities.ca_basic_service.cam_transmission_management.threading.Timer" + ) as mock_timer_cls: + mock_timer_cls.return_value = MagicMock() + ctm.start() + ctm.start() # second start should be a no-op + self.assertEqual(mock_timer_cls.call_count, 1) + + +# --------------------------------------------------------------------------- +# TestLocationServiceCallback +# --------------------------------------------------------------------------- + +class TestLocationServiceCallback(unittest.TestCase): + + def test_callback_updates_current_tpv(self): + """location_service_callback only caches the TPV (§6.1.3).""" + ctm, _, _, _ = _make_ctm() + tpv = _make_tpv() + ctm.location_service_callback(tpv) + self.assertEqual(ctm._current_tpv, tpv) + + def test_callback_does_not_trigger_send(self): + """Callback must NOT directly trigger CAM transmission.""" + ctm, btp_router, _, _ = _make_ctm() + tpv = _make_tpv() + ctm.location_service_callback(tpv) + btp_router.btp_data_request.assert_not_called() + + +# --------------------------------------------------------------------------- +# TestSendCam +# --------------------------------------------------------------------------- + +class TestSendCam(unittest.TestCase): + + def test_send_cam_encodes_and_calls_btp(self): + ctm, btp_router, cam_coder, _ = _make_ctm() cam = CooperativeAwarenessMessage() - cam_transmission_management._send_cam(cam) + ctm._send_cam(cam) + cam_coder.encode.assert_called_with(cam.cam) btp_router.btp_data_request.assert_called_once() - cam_coder.encode.assert_called_with( - cam.cam) + + def test_send_cam_sets_gn_max_packet_lifetime(self): + """§5.3.4.1: GN max packet lifetime shall not exceed 1000 ms.""" + ctm, btp_router, cam_coder, _ = _make_ctm() + cam = CooperativeAwarenessMessage() + ctm._send_cam(cam) + request = btp_router.btp_data_request.call_args[0][0] + self.assertEqual(request.gn_max_packet_lifetime, 1.0) + + def test_send_cam_destination_port_2001(self): + """§5.3.4.1: destination port shall be 2001.""" + ctm, btp_router, _, _ = _make_ctm() + ctm._send_cam(CooperativeAwarenessMessage()) + request = btp_router.btp_data_request.call_args[0][0] + self.assertEqual(request.destination_port, 2001) + + def test_send_cam_updates_ldm_if_present(self): + ldm = MagicMock() + ctm, _, _, _ = _make_ctm(ldm=ldm) + ctm._send_cam(CooperativeAwarenessMessage()) + ldm.add_provider_data_to_ldm.assert_called_once() + + +# --------------------------------------------------------------------------- +# TestCheckDynamics — §6.1.3 Condition 1 +# --------------------------------------------------------------------------- + +class TestCheckDynamics(unittest.TestCase): + + def _ctm_with_last_cam(self, heading, lat, lon, speed): + ctm, _, _, _ = _make_ctm() + ctm._last_cam_heading = heading + ctm._last_cam_lat = lat + ctm._last_cam_lon = lon + ctm._last_cam_speed = speed + return ctm + + def test_no_previous_data_returns_true(self): + ctm, _, _, _ = _make_ctm() + # _last_cam_heading is None → True + self.assertTrue(ctm._check_dynamics(_make_tpv())) + + def test_heading_diff_over_4_deg_returns_true(self): + ctm = self._ctm_with_last_cam(90.0, 41.0, 2.0, 5.0) + self.assertTrue(ctm._check_dynamics(_make_tpv(track=95.0))) + + def test_heading_diff_under_4_deg_returns_false(self): + ctm = self._ctm_with_last_cam(90.0, 41.0, 2.0, 5.0) + self.assertFalse(ctm._check_dynamics( + _make_tpv(track=93.0, lat=41.0, lon=2.0, speed=5.0))) + + def test_heading_wrap_around_360(self): + ctm = self._ctm_with_last_cam(358.0, 41.0, 2.0, 5.0) + # 2 - 358 ... wraps to 4° difference exactly; 5° should trigger + self.assertTrue(ctm._check_dynamics( + _make_tpv(track=3.0, lat=41.0, lon=2.0, speed=5.0))) + + def test_position_diff_over_4m_returns_true(self): + ctm = self._ctm_with_last_cam(90.0, 41.0, 2.0, 5.0) + # Shift ~5 m north + tpv = _make_tpv(lat=41.000045, lon=2.0, track=90.0, speed=5.0) + self.assertTrue(ctm._check_dynamics(tpv)) + + def test_position_diff_under_4m_no_other_change(self): + ctm = self._ctm_with_last_cam(90.0, 41.0, 2.0, 5.0) + # 1 m shift + tpv = _make_tpv(lat=41.000009, lon=2.0, track=90.0, speed=5.0) + self.assertFalse(ctm._check_dynamics(tpv)) + + def test_speed_diff_over_half_ms_returns_true(self): + ctm = self._ctm_with_last_cam(90.0, 41.0, 2.0, 5.0) + tpv = _make_tpv(speed=5.6, lat=41.0, lon=2.0, track=90.0) + self.assertTrue(ctm._check_dynamics(tpv)) + + def test_speed_diff_under_half_ms_no_other_change(self): + ctm = self._ctm_with_last_cam(90.0, 41.0, 2.0, 5.0) + tpv = _make_tpv(speed=5.4, lat=41.0, lon=2.0, track=90.0) + self.assertFalse(ctm._check_dynamics(tpv)) + + +# --------------------------------------------------------------------------- +# TestContainerInclusion — §6.1.3 optional containers +# --------------------------------------------------------------------------- + +class TestContainerInclusion(unittest.TestCase): + + # --- Low Frequency Container --- + + def test_lf_on_first_cam(self): + ctm, _, _, _ = _make_ctm() + self.assertEqual(ctm._cam_count, 0) + self.assertTrue(ctm._should_include_lf(1000)) + + def test_lf_after_500ms(self): + ctm, _, _, _ = _make_ctm() + ctm._cam_count = 1 + ctm._last_lf_time_ms = 500 + # elapsed = 500 ms ≥ 500 ms + self.assertTrue(ctm._should_include_lf(1000)) + + def test_lf_not_before_500ms(self): + ctm, _, _, _ = _make_ctm() + ctm._cam_count = 1 + ctm._last_lf_time_ms = 600 + # elapsed = 400 ms < 500 ms + self.assertFalse(ctm._should_include_lf(1000)) + + # --- Special Vehicle Container --- + + def test_special_not_included_for_default_role(self): + ctm, _, _, _ = _make_ctm( + vehicle_data=_make_vehicle_data(vehicle_role=0)) + self.assertFalse(ctm._should_include_special_vehicle(1000)) + + def test_special_on_first_cam_non_default_role(self): + ctm, _, _, _ = _make_ctm( + vehicle_data=_make_vehicle_data(vehicle_role=6)) + ctm._cam_count = 0 + self.assertTrue(ctm._should_include_special_vehicle(1000)) + + def test_special_after_500ms(self): + ctm, _, _, _ = _make_ctm( + vehicle_data=_make_vehicle_data(vehicle_role=6)) + ctm._cam_count = 1 + ctm._last_special_time_ms = 400 + # 500 ms elapsed + self.assertTrue(ctm._should_include_special_vehicle(900)) + + def test_special_not_before_500ms(self): + ctm, _, _, _ = _make_ctm( + vehicle_data=_make_vehicle_data(vehicle_role=6)) + ctm._cam_count = 1 + ctm._last_special_time_ms = 700 + # 200 ms elapsed + self.assertFalse(ctm._should_include_special_vehicle(900)) + + # --- Very Low Frequency Container --- + + def test_vlf_on_second_cam(self): + ctm, _, _, _ = _make_ctm() + ctm._cam_count = 1 # second CAM about to be sent + self.assertTrue(ctm._should_include_vlf(1000, False, False)) + + def test_vlf_not_on_first_cam(self): + ctm, _, _, _ = _make_ctm() + ctm._cam_count = 0 + self.assertFalse(ctm._should_include_vlf(1000, False, False)) + + def test_vlf_after_10s_no_lf_no_special(self): + ctm, _, _, _ = _make_ctm() + ctm._cam_count = 5 + ctm._last_vlf_time_ms = 0 + self.assertTrue(ctm._should_include_vlf(10001, False, False)) + + def test_vlf_not_if_lf_included(self): + ctm, _, _, _ = _make_ctm() + ctm._cam_count = 5 + ctm._last_vlf_time_ms = 0 + self.assertFalse(ctm._should_include_vlf( + 10001, include_lf=True, include_special=False)) + + def test_vlf_not_if_special_included(self): + ctm, _, _, _ = _make_ctm() + ctm._cam_count = 5 + ctm._last_vlf_time_ms = 0 + self.assertFalse(ctm._should_include_vlf( + 10001, include_lf=False, include_special=True)) + + def test_vlf_not_before_10s(self): + ctm, _, _, _ = _make_ctm() + ctm._cam_count = 5 + ctm._last_vlf_time_ms = 0 + self.assertFalse(ctm._should_include_vlf(9999, False, False)) + + # --- Two-Wheeler Container --- + + def test_two_wheeler_for_cyclist(self): + ctm, _, _, _ = _make_ctm( + vehicle_data=_make_vehicle_data(station_type=2)) + self.assertTrue(ctm._should_include_two_wheeler()) + + def test_two_wheeler_for_moped(self): + ctm, _, _, _ = _make_ctm( + vehicle_data=_make_vehicle_data(station_type=3)) + self.assertTrue(ctm._should_include_two_wheeler()) + + def test_two_wheeler_for_motorcycle(self): + ctm, _, _, _ = _make_ctm( + vehicle_data=_make_vehicle_data(station_type=4)) + self.assertTrue(ctm._should_include_two_wheeler()) + + def test_two_wheeler_not_for_passenger_car(self): + ctm, _, _, _ = _make_ctm( + vehicle_data=_make_vehicle_data(station_type=5)) + self.assertFalse(ctm._should_include_two_wheeler()) + + +# --------------------------------------------------------------------------- +# TestGenerateAndSendCam — CAM building, containers, state update +# --------------------------------------------------------------------------- + +class TestGenerateAndSendCam(unittest.TestCase): + + def _ctm_with_send_capture(self, vehicle_data=None): + ctm, btp_router, cam_coder, vd = _make_ctm(vehicle_data=vehicle_data) + sent = [] + + def capture(cam_obj): + sent.append(cam_obj) + ctm._send_cam = capture + return ctm, sent, cam_coder, vd + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0) + def test_first_cam_includes_lf_container(self, _mock_time): + ctm, sent, _, _ = self._ctm_with_send_capture() + tpv = _make_tpv() + ctm._generate_and_send_cam(tpv, 1_000_000, condition=1) + self.assertEqual(len(sent), 1) + params = sent[0].cam["cam"]["camParameters"] + self.assertIn("lowFrequencyContainer", params) + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0) + def test_lf_container_content(self, _mock_time): + ctm, sent, _, _ = self._ctm_with_send_capture( + vehicle_data=_make_vehicle_data( + vehicle_role=0, exterior_lights=b"\x80") + ) + tpv = _make_tpv() + ctm._generate_and_send_cam(tpv, 1_000_000, condition=1) + lf_choice, lf_data = sent[0].cam["cam"]["camParameters"]["lowFrequencyContainer"] + self.assertEqual(lf_choice, "basicVehicleContainerLowFrequency") + self.assertEqual(lf_data["vehicleRole"], "default") + self.assertEqual(lf_data["exteriorLights"], (b"\x80", 8)) + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0) + def test_second_cam_includes_vlf_extension_container(self, _mock_time): + ctm, sent, cam_coder, _ = self._ctm_with_send_capture() + ctm._cam_count = 1 # about to send second CAM + ctm._last_cam_time_ms = 999_000 + ctm._last_cam_heading = 90.0 + ctm._last_cam_lat = 41.0 + ctm._last_cam_lon = 2.0 + ctm._last_cam_speed = 5.0 + ctm._last_lf_time_ms = 500_000 # LF not due yet + tpv = _make_tpv() + ctm._generate_and_send_cam(tpv, 1_000_000, condition=2) + params = sent[0].cam["cam"]["camParameters"] + ext = params.get("extensionContainers", []) + container_ids = [e["containerId"] for e in ext] + self.assertIn(3, container_ids) # VLF = container id 3 + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0) + def test_two_wheeler_extension_container_in_every_cam(self, _mock_time): + vd = _make_vehicle_data(station_type=2) # cyclist + ctm, sent, cam_coder, _ = self._ctm_with_send_capture(vehicle_data=vd) + ctm._cam_count = 5 + ctm._last_cam_time_ms = 0 + ctm._last_lf_time_ms = 999_600 # LF recently sent + tpv = _make_tpv() + ctm._generate_and_send_cam(tpv, 1_000_000, condition=2) + ext = sent[0].cam["cam"]["camParameters"].get( + "extensionContainers", []) + container_ids = [e["containerId"] for e in ext] + self.assertIn(1, container_ids) # TwoWheeler = container id 1 + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0) + def test_special_vehicle_container_included(self, _mock_time): + vd = _make_vehicle_data( + vehicle_role=6, + special_vehicle_data=("emergencyContainer", { + "lightBarSirenInUse": (b"\x00", 8), + }), + ) + ctm, sent, _, _ = self._ctm_with_send_capture(vehicle_data=vd) + ctm._generate_and_send_cam(_make_tpv(), 1_000_000, condition=1) + params = sent[0].cam["cam"]["camParameters"] + self.assertIn("specialVehicleContainer", params) + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0) + def test_cam_count_increments_after_send(self, _mock_time): + ctm, _, _, _ = _make_ctm() + ctm._send_cam = MagicMock() + self.assertEqual(ctm._cam_count, 0) + ctm._generate_and_send_cam(_make_tpv(), 1_000_000, condition=1) + self.assertEqual(ctm._cam_count, 1) + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0) + def test_last_cam_time_updated_after_send(self, _mock_time): + ctm, _, _, _ = _make_ctm() + ctm._send_cam = MagicMock() + ctm._generate_and_send_cam(_make_tpv(), 1_000_500, condition=1) + self.assertEqual(ctm._last_cam_time_ms, 1_000_500) + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0) + def test_dynamics_state_updated_after_send(self, _mock_time): + ctm, _, _, _ = _make_ctm() + ctm._send_cam = MagicMock() + tpv = _make_tpv(lat=41.5, lon=2.5, track=135.0, speed=10.0) + ctm._generate_and_send_cam(tpv, 1_000_000, condition=1) + self.assertAlmostEqual(ctm._last_cam_heading, 135.0) + self.assertAlmostEqual(ctm._last_cam_lat, 41.5) + self.assertAlmostEqual(ctm._last_cam_lon, 2.5) + self.assertAlmostEqual(ctm._last_cam_speed, 10.0) + + +# --------------------------------------------------------------------------- +# TestAnnexB25ConstructionException +# --------------------------------------------------------------------------- + +class TestAnnexB25ConstructionException(unittest.TestCase): + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0) + def test_encode_exception_skips_state_update(self, _mock_time): + """Annex B.2.5: on construction/encoding failure, state must NOT be updated.""" + ctm, btp_router, cam_coder, _ = _make_ctm() + cam_coder.encode.side_effect = ValueError("encode error") + initial_cam_count = ctm._cam_count + + ctm._generate_and_send_cam(_make_tpv(), 1_000_000, condition=1) + + # State must remain unchanged + self.assertEqual(ctm._cam_count, initial_cam_count) + self.assertIsNone(ctm._last_cam_time_ms) + btp_router.btp_data_request.assert_not_called() + + +# --------------------------------------------------------------------------- +# TestTGenCamManagement — §6.1.3 T_GenCam state machine +# --------------------------------------------------------------------------- + +class TestTGenCamManagement(unittest.TestCase): + + def _ctm_with_prior_cam(self, last_cam_time_ms=0): + ctm, _, _, _ = _make_ctm() + ctm._send_cam = MagicMock() + # Simulate one previous CAM + ctm._last_cam_time_ms = last_cam_time_ms + ctm._last_cam_heading = 90.0 + ctm._last_cam_lat = 41.0 + ctm._last_cam_lon = 2.0 + ctm._last_cam_speed = 5.0 + ctm._cam_count = 1 + ctm._last_lf_time_ms = last_cam_time_ms + return ctm + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1.5) + def test_condition1_sets_t_gen_cam_to_elapsed(self, _mock_time): + """§6.1.3: condition-1 CAM sets T_GenCam = elapsed time.""" + ctm = self._ctm_with_prior_cam(last_cam_time_ms=1000) + # elapsed = 1500 - 1000 = 500 ms + ctm._generate_and_send_cam(_make_tpv(), 1500, condition=1) + self.assertEqual(ctm.t_gen_cam, 500) + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=2.0) + def test_condition2_resets_t_gen_cam_to_max(self, _mock_time): + """§6.1.3: condition-2 CAM resets T_GenCam to T_GenCamMax.""" + ctm = self._ctm_with_prior_cam(last_cam_time_ms=0) + ctm.t_gen_cam = 400 # artificially lowered + ctm._generate_and_send_cam(_make_tpv(), 2000, condition=2) + self.assertEqual(ctm.t_gen_cam, T_GEN_CAM_MAX) + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=2.0) + def test_n_gen_cam_resets_after_default_consecutive(self, _mock_time): + """ + §6.1.3: after N_GenCam consecutive condition-1 CAMs, + T_GenCam is reset to T_GenCamMax and the counter is reset. + """ + ctm = self._ctm_with_prior_cam(last_cam_time_ms=0) + ctm._n_gen_cam_counter = N_GEN_CAM_DEFAULT - 1 # one short of reset + + # Elapsed = 2000 ms (clamped to T_GEN_CAM_MAX) + ctm._generate_and_send_cam(_make_tpv(), 2000, condition=1) + + # Counter reached N_GenCam → reset + self.assertEqual(ctm.t_gen_cam, T_GEN_CAM_MAX) + self.assertEqual(ctm._n_gen_cam_counter, 0) + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=2.0) + def test_t_gen_cam_clamped_to_min(self, _mock_time): + """T_GenCam must not go below T_GenCamMin.""" + ctm = self._ctm_with_prior_cam(last_cam_time_ms=1950) + # elapsed = 2000 - 1950 = 50 ms < T_GenCamMin + ctm._generate_and_send_cam(_make_tpv(), 2000, condition=1) + self.assertEqual(ctm.t_gen_cam, T_GEN_CAM_MIN) + + +# --------------------------------------------------------------------------- +# TestEvaluateConditions — §6.1.3 conditions integration +# --------------------------------------------------------------------------- + +class TestEvaluateConditions(unittest.TestCase): + + def test_first_cam_sent_when_tpv_available(self): + ctm, _, _, _ = _make_ctm() + ctm._send_cam = MagicMock() + ctm._current_tpv = _make_tpv() + # No previous CAM → sends immediately + with patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0): + ctm._evaluate_and_maybe_send() + ctm._send_cam.assert_called_once() + + def test_no_cam_without_tpv(self): + ctm, _, _, _ = _make_ctm() + ctm._send_cam = MagicMock() + ctm._current_tpv = None + with patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0): + ctm._evaluate_and_maybe_send() + ctm._send_cam.assert_not_called() + + def test_condition2_triggers_after_t_gen_cam_elapsed(self): + ctm, _, _, _ = _make_ctm() + ctm._send_cam = MagicMock() + ctm._current_tpv = _make_tpv() + ctm._last_cam_time_ms = 0 + ctm._last_cam_heading = 90.0 + ctm._last_cam_lat = 41.0 + ctm._last_cam_lon = 2.0 + ctm._last_cam_speed = 5.0 + ctm._cam_count = 1 + ctm._last_lf_time_ms = 0 + ctm.t_gen_cam = T_GEN_CAM_MAX + # elapsed = 1001 ms ≥ T_GEN_CAM_MAX (1000) → condition 2 + with patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1.001): + ctm._evaluate_and_maybe_send() + ctm._send_cam.assert_called_once() + + def test_no_cam_before_t_gen_cam_and_no_dynamics(self): + ctm, _, _, _ = _make_ctm() + ctm._send_cam = MagicMock() + ctm._current_tpv = _make_tpv() + ctm._last_cam_time_ms = 999_000 + ctm._last_cam_heading = 90.0 + ctm._last_cam_lat = 41.0 + ctm._last_cam_lon = 2.0 + ctm._last_cam_speed = 5.0 + ctm._cam_count = 1 + ctm.t_gen_cam = T_GEN_CAM_MAX + # elapsed = 100 ms < T_GEN_CAM_DCC → no condition satisfied + with patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=999.1): + ctm._evaluate_and_maybe_send() + ctm._send_cam.assert_not_called() + + def test_condition1_triggers_on_heading_change(self): + ctm, _, _, _ = _make_ctm() + ctm._send_cam = MagicMock() + ctm._current_tpv = _make_tpv(track=96.0) # 6° change from last + ctm._last_cam_time_ms = 999_800 + ctm._last_cam_heading = 90.0 + ctm._last_cam_lat = 41.0 + ctm._last_cam_lon = 2.0 + ctm._last_cam_speed = 5.0 + ctm._cam_count = 1 + ctm._last_lf_time_ms = 0 + ctm.t_gen_cam = T_GEN_CAM_MAX + # elapsed = 100 ms = T_GEN_CAM_DCC, heading diff = 6° > 4° → condition 1 + with patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=999.9): + ctm._evaluate_and_maybe_send() + ctm._send_cam.assert_called_once() + + +# --------------------------------------------------------------------------- +# TestPathHistory +# --------------------------------------------------------------------------- + +class TestPathHistory(unittest.TestCase): + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0) + def test_path_history_added_after_send(self, _mock_time): + ctm, _, _, _ = _make_ctm() + ctm._send_cam = MagicMock() + tpv = _make_tpv(lat=41.5, lon=2.5) + ctm._generate_and_send_cam(tpv, 1_000_000, condition=1) + self.assertEqual(len(ctm._path_history), 1) + self.assertAlmostEqual(ctm._path_history[0][0], 41.5) + self.assertAlmostEqual(ctm._path_history[0][1], 2.5) + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0) + def test_get_path_history_returns_relative_delta(self, _mock_time): + ctm, _, _, _ = _make_ctm() + # Manually add a history point + ctm._path_history = [(41.0001, 2.0, 999_000_000)] # 0.0001° north + tpv = _make_tpv(lat=41.0, lon=2.0) + result = ctm._get_path_history(tpv) + self.assertEqual(len(result), 1) + # delta_lat = round((41.0001 - 41.0) * 10_000_000) = 1000 + self.assertEqual(result[0]["pathPosition"]["deltaLatitude"], 1000) + self.assertEqual(result[0]["pathPosition"]["deltaLongitude"], 0) + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0) + def test_path_history_empty_without_lat_lon(self, _mock_time): + ctm, _, _, _ = _make_ctm() + ctm._path_history = [(41.0, 2.0, 999_000)] + result = ctm._get_path_history({"track": 90.0}) # no lat/lon + self.assertEqual(result, []) + + @patch("flexstack.facilities.ca_basic_service.cam_transmission_management.TimeService.time", + return_value=1000.0) + def test_path_history_capped_at_23_entries(self, _mock_time): + ctm, _, _, _ = _make_ctm() + ctm._path_history = [ + (41.0 + i * 0.000001, 2.0, 900_000 + i * 1000) for i in range(30)] + tpv = _make_tpv(lat=41.0, lon=2.0) + result = ctm._get_path_history(tpv) + self.assertLessEqual(len(result), 23) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/flexstack/facilities/decentralized_environmental_notification_service/test_denm_transmission_management.py b/tests/flexstack/facilities/decentralized_environmental_notification_service/test_denm_transmission_management.py index 0f4e364..f61557c 100644 --- a/tests/flexstack/facilities/decentralized_environmental_notification_service/test_denm_transmission_management.py +++ b/tests/flexstack/facilities/decentralized_environmental_notification_service/test_denm_transmission_management.py @@ -1,12 +1,14 @@ import unittest from unittest.mock import MagicMock, patch from flexstack.applications.road_hazard_signalling_service.service_access_point import DENRequest +from flexstack.btp.service_access_point import BTPDataRequest from flexstack.facilities.decentralized_environmental_notification_service.denm_coder import DENMCoder from flexstack.facilities.decentralized_environmental_notification_service.\ denm_transmission_management import ( DENMTransmissionManagement, DecentralizedEnvironmentalNotificationMessage ) from flexstack.facilities.ca_basic_service.cam_transmission_management import VehicleData +from flexstack.security.security_profiles import SecurityProfile class TestDecentralizedEnvironmentalNotificationMessage(unittest.TestCase): @@ -260,3 +262,29 @@ def test_transmit_denm(self): self.denm_transmission_management.btp_router.btp_data_request.assert_called_once() self.denm_transmission_management.denm_coder.encode.assert_called_once_with( new_denm.denm) + + def test_transmit_denm_uses_denm_security_profile(self): + """§7.1.2: transmit_denm must request DENM security profile with ITS-AID 37.""" + self.denm_transmission_management.btp_router = MagicMock() + self.denm_transmission_management.btp_router.btp_data_request = MagicMock() + self.denm_transmission_management.denm_coder.encode = MagicMock(return_value=b'\x00') + + new_denm = MagicMock() + new_denm.denm = { + "header": {"stationId": 1}, + "denm": { + "management": { + "eventPosition": {"latitude": 0, "longitude": 0}, + "referenceTime": 0, + } + } + } + self.denm_transmission_management.transmit_denm(new_denm) + + call_args = self.denm_transmission_management.btp_router.btp_data_request.call_args + btp_request: BTPDataRequest = call_args[0][0] + self.assertEqual( + btp_request.security_profile, + SecurityProfile.DECENTRALIZED_ENVIRONMENTAL_NOTIFICATION_MESSAGE, + ) + self.assertEqual(btp_request.its_aid, 37) diff --git a/tests/flexstack/facilities/vru_awareness_service/test_vam_transmission_management.py b/tests/flexstack/facilities/vru_awareness_service/test_vam_transmission_management.py index 4ffe0a4..2d624ee 100644 --- a/tests/flexstack/facilities/vru_awareness_service/test_vam_transmission_management.py +++ b/tests/flexstack/facilities/vru_awareness_service/test_vam_transmission_management.py @@ -252,10 +252,12 @@ def _set_previous_vam_state( def test_location_service_callback_first_sending(self) -> None: self.vam_transmission_management.send_next_vam = MagicMock() - self.vam_transmission_management.location_service_callback(self.tpv_data) + self.vam_transmission_management.location_service_callback( + self.tpv_data) self.vam_transmission_management.send_next_vam.assert_called_once() - sent_vam: VAMMessage = self.vam_transmission_management.send_next_vam.call_args.kwargs["vam"] + sent_vam: VAMMessage = self.vam_transmission_management.send_next_vam.call_args.kwargs[ + "vam"] self.assertIsInstance(sent_vam, VAMMessage) self.assertEqual( sent_vam.vam["header"]["stationId"], @@ -277,7 +279,8 @@ def test_location_service_callback_generation_time_threshold( self.vam_transmission_management.t_genvam = vam_constants.T_GENVAMMIN self.vam_transmission_management.send_next_vam = MagicMock() - self.vam_transmission_management.location_service_callback(self.tpv_data) + self.vam_transmission_management.location_service_callback( + self.tpv_data) self.vam_transmission_management.send_next_vam.assert_called_once() @@ -291,7 +294,8 @@ def test_location_service_callback_thresholds_not_met( self._set_previous_vam_state() self.vam_transmission_management.send_next_vam = MagicMock() - self.vam_transmission_management.location_service_callback(self.tpv_data) + self.vam_transmission_management.location_service_callback( + self.tpv_data) self.vam_transmission_management.send_next_vam.assert_not_called() @@ -305,7 +309,8 @@ def test_location_service_callback_distance_threshold( self._set_previous_vam_state() self.vam_transmission_management.send_next_vam = MagicMock() - self.vam_transmission_management.location_service_callback(self.tpv_data) + self.vam_transmission_management.location_service_callback( + self.tpv_data) self.vam_transmission_management.send_next_vam.assert_called_once() @@ -319,7 +324,8 @@ def test_location_service_callback_speed_threshold( self._set_previous_vam_state(speed=-1.0) self.vam_transmission_management.send_next_vam = MagicMock() - self.vam_transmission_management.location_service_callback(self.tpv_data) + self.vam_transmission_management.location_service_callback( + self.tpv_data) self.vam_transmission_management.send_next_vam.assert_called_once() @@ -333,7 +339,8 @@ def test_location_service_callback_speed_threshold_not_met( self._set_previous_vam_state(speed=0.2) self.vam_transmission_management.send_next_vam = MagicMock() - self.vam_transmission_management.location_service_callback(self.tpv_data) + self.vam_transmission_management.location_service_callback( + self.tpv_data) self.vam_transmission_management.send_next_vam.assert_not_called() @@ -348,7 +355,8 @@ def test_send_next_vam(self) -> None: self.btp_router.btp_data_request.assert_called_once() self.assertEqual( self.vam_transmission_management.last_vam_generation_delta_time, - GenerationDeltaTime(msec=vam_message.vam["vam"]["generationDeltaTime"]), + GenerationDeltaTime( + msec=vam_message.vam["vam"]["generationDeltaTime"]), ) def test_send_next_vam_updates_ldm(self) -> None: @@ -364,7 +372,198 @@ def test_send_next_vam_updates_ldm(self) -> None: manager.send_next_vam(vam_message) - ldm_adapter.add_provider_data_to_ldm.assert_called_once_with(vam_message.vam) + ldm_adapter.add_provider_data_to_ldm.assert_called_once_with( + vam_message.vam) + + # ------------------------------------------------------------------ + # Condition 4: heading-change trigger (clause 6.4.1) + # ------------------------------------------------------------------ + + @patch( + "flexstack.facilities.vru_awareness_service.vam_transmission_management.Utils.euclidian_distance", + return_value=0, + ) + def test_location_service_callback_heading_threshold( + self, _mock_distance: MagicMock + ) -> None: + """A heading change > MINGROUNDVELOCITYORIENTATIONCHANGETHRESHOLD triggers a VAM.""" + self._set_previous_vam_state() + # Set last heading to 0°; new track is well above the 4° threshold. + self.vam_transmission_management.last_vam_heading = 0.0 + tpv = dict(self.tpv_data) + tpv["track"] = vam_constants.MINGROUNDVELOCITYORIENTATIONCHANGETHRESHOLD + 5.0 + self.vam_transmission_management.send_next_vam = MagicMock() + + self.vam_transmission_management.location_service_callback(tpv) + + self.vam_transmission_management.send_next_vam.assert_called_once() + + @patch( + "flexstack.facilities.vru_awareness_service.vam_transmission_management.Utils.euclidian_distance", + return_value=0, + ) + def test_location_service_callback_heading_threshold_not_met( + self, _mock_distance: MagicMock + ) -> None: + """A heading change <= threshold should NOT trigger a VAM on its own.""" + self._set_previous_vam_state() + self.vam_transmission_management.last_vam_heading = 0.0 + tpv = dict(self.tpv_data) + tpv["track"] = vam_constants.MINGROUNDVELOCITYORIENTATIONCHANGETHRESHOLD - 1.0 + self.vam_transmission_management.send_next_vam = MagicMock() + + self.vam_transmission_management.location_service_callback(tpv) + + self.vam_transmission_management.send_next_vam.assert_not_called() + + @patch( + "flexstack.facilities.vru_awareness_service.vam_transmission_management.Utils.euclidian_distance", + return_value=0, + ) + def test_heading_wrap_around_360(self, _mock_distance: MagicMock) -> None: + """Heading diff of 355° vs 5° should resolve to 10°, which is > 4°.""" + self._set_previous_vam_state() + self.vam_transmission_management.last_vam_heading = 355.0 + tpv = dict(self.tpv_data) + tpv["track"] = 5.0 # 10° difference across 0°/360° boundary + self.vam_transmission_management.send_next_vam = MagicMock() + + self.vam_transmission_management.location_service_callback(tpv) + + self.vam_transmission_management.send_next_vam.assert_called_once() + + def test_heading_fields_use_value_and_confidence(self) -> None: + """VAMMessage heading dict uses 'value'/'confidence', not legacy names.""" + tpv = dict(self.tpv_data) + tpv["track"] = 180.0 + tpv["epd"] = 5.0 + vam_message = VAMMessage() + vam_message.fullfill_high_frequency_container_with_tpv_data(tpv) + heading = vam_message.vam["vam"]["vamParameters"]["vruHighFrequencyContainer"]["heading"] + self.assertIn("value", heading) + self.assertIn("confidence", heading) + self.assertNotIn("headingValue", heading) + self.assertNotIn("headingConfidence", heading) + self.assertEqual(heading["value"], int(180.0 * 10)) + + def test_send_next_vam_updates_heading_tracking(self) -> None: + """send_next_vam stores the transmitted heading for future diff checks.""" + self.vam_coder.encode = MagicMock(return_value=b"encoded") + vam_message = VAMMessage() + vam_message.vam["vam"]["vamParameters"]["vruHighFrequencyContainer"]["heading"][ + "value" + ] = 900 # 90.0° + self.vam_transmission_management.send_next_vam(vam_message) + self.assertAlmostEqual( + self.vam_transmission_management.last_vam_heading, 90.0) + + # ------------------------------------------------------------------ + # LF container (clause 6.2) + # ------------------------------------------------------------------ + + def test_lf_container_included_on_first_vam(self) -> None: + """The VRU Low-Frequency Container must be present in the first VAM.""" + self.vam_coder.encode = MagicMock(return_value=b"encoded") + vam_message = VAMMessage() + self.vam_transmission_management.send_next_vam(vam_message) + self.assertIn( + "vruLowFrequencyContainer", + vam_message.vam["vam"]["vamParameters"], + ) + + def test_lf_container_not_included_when_not_due(self) -> None: + """After first VAM, LF container should NOT be included until T_GenVamLFMin expires.""" + self.vam_coder.encode = MagicMock(return_value=b"encoded") + # Send first VAM (sets last_lf_vam_time and is_first_vam=False) + self.vam_transmission_management.send_next_vam(VAMMessage()) + # Immediately send a second VAM — LF timeout has not elapsed + vam2 = VAMMessage() + self.vam_transmission_management.send_next_vam(vam2) + # LF container should be absent on the second VAM + self.assertNotIn( + "vruLowFrequencyContainer", + vam2.vam["vam"]["vamParameters"], + ) + + def test_lf_container_included_after_timeout(self) -> None: + """LF container re-appears once T_GenVamLFMin has elapsed.""" + import time as _time_module + + self.vam_coder.encode = MagicMock(return_value=b"encoded") + self.vam_transmission_management.send_next_vam(VAMMessage()) + # Backdate last_lf_vam_time so the timeout has expired + self.vam_transmission_management.last_lf_vam_time = ( + _time_module.time() - vam_constants.T_GENVAM_LFMIN / 1000.0 - 1.0 + ) + vam2 = VAMMessage() + self.vam_transmission_management.send_next_vam(vam2) + self.assertIn( + "vruLowFrequencyContainer", + vam2.vam["vam"]["vamParameters"], + ) + + # ------------------------------------------------------------------ + # Clustering integration + # ------------------------------------------------------------------ + + def test_passive_vru_suppresses_transmission(self) -> None: + """VAM must not be sent when clustering manager says not to transmit.""" + clustering_manager = MagicMock() + clustering_manager.should_transmit_vam.return_value = False + manager = VAMTransmissionManagement( + self.btp_router, + self.vam_coder, + self.device_data_provider, + clustering_manager=clustering_manager, + ) + manager.send_next_vam = MagicMock() + + manager.location_service_callback(self.tpv_data) + + manager.send_next_vam.assert_not_called() + + def test_cluster_info_container_attached_on_send(self) -> None: + """Cluster information container from manager is injected into outgoing VAM.""" + clustering_manager = MagicMock() + clustering_manager.get_cluster_information_container.return_value = { + "vruClusterInformation": {"clusterId": 7, "clusterCardinalitySize": 3, + "clusterBoundingBoxShape": {"circular": {"radius": 5}}} + } + clustering_manager.get_cluster_operation_container.return_value = None + manager = VAMTransmissionManagement( + self.btp_router, + self.vam_coder, + self.device_data_provider, + clustering_manager=clustering_manager, + ) + self.vam_coder.encode = MagicMock(return_value=b"encoded") + vam_message = VAMMessage() + manager.send_next_vam(vam_message) + self.assertIn( + "vruClusterInformationContainer", + vam_message.vam["vam"]["vamParameters"], + ) + + def test_cluster_op_container_attached_on_send(self) -> None: + """Cluster operation container from manager is injected into outgoing VAM.""" + clustering_manager = MagicMock() + clustering_manager.get_cluster_information_container.return_value = None + clustering_manager.get_cluster_operation_container.return_value = { + "clusterJoinInfo": {"clusterId": 5, "joinTime": 10} + } + manager = VAMTransmissionManagement( + self.btp_router, + self.vam_coder, + self.device_data_provider, + clustering_manager=clustering_manager, + ) + self.vam_coder.encode = MagicMock(return_value=b"encoded") + vam_message = VAMMessage() + manager.send_next_vam(vam_message) + self.assertIn( + "vruClusterOperationContainer", + vam_message.vam["vam"]["vamParameters"], + ) if __name__ == "__main__": diff --git a/tests/flexstack/facilities/vru_awareness_service/test_vru_clustering.py b/tests/flexstack/facilities/vru_awareness_service/test_vru_clustering.py new file mode 100644 index 0000000..469307b --- /dev/null +++ b/tests/flexstack/facilities/vru_awareness_service/test_vru_clustering.py @@ -0,0 +1,685 @@ +"""Unit tests for the VBS clustering state machine. + +Tests cover: +* All VBSState transitions (clause 5.4.2.2 of ETSI TS 103 300-3 V2.3.1). +* Cluster creation / destruction lifecycle. +* Join-notification, waiting, confirmation, cancellation, and failure flows. +* Leave-notification flow (from VRU_PASSIVE). +* Cluster-breakup warning flow (from VRU_ACTIVE_CLUSTER_LEADER). +* Leader-lost detection (timeClusterContinuity timeout). +* Container generation: VruClusterInformationContainer and + VruClusterOperationContainer. +* should_transmit_vam() and get_cluster_id() helpers. +* on_received_vam() nearby-VRU / cluster table updates. +* VRU role on/off transitions. +* Thread-safety (basic: running state machine from two threads). +""" +from __future__ import annotations + +import threading +import unittest + +from flexstack.facilities.vru_awareness_service import vam_constants +from flexstack.facilities.vru_awareness_service.vru_clustering import ( + ClusterBreakupReason, + ClusterLeaveReason, + VBSClusteringManager, + VBSState, +) + + +# --------------------------------------------------------------------------- +# Fake clock helpers +# --------------------------------------------------------------------------- + + +class _FakeClock: + """Mutable fake clock for deterministic time-based tests.""" + + def __init__(self, start: float = 1000.0) -> None: + self._t = start + + def __call__(self) -> float: # callable interface matching time.time + return self._t + + def advance(self, seconds: float) -> None: + """Advance the fake clock by *seconds*.""" + self._t += seconds + + +# --------------------------------------------------------------------------- +# Minimal VAM dict factory +# --------------------------------------------------------------------------- + + +def _make_vam( + station_id: int = 42, + lat: float = 48.0, + lon: float = 11.0, + speed: float = 1.5, + heading: float = 90.0, + cluster_id: int | None = None, + join_cluster_id: int | None = None, + leave_cluster_id: int | None = None, + leave_reason: str = "notProvided", + breakup_reason: str | None = None, + breakup_time: int = 12, +) -> dict: + """Return a minimal decoded VAM dict suitable for ``on_received_vam``.""" + params: dict = { + "basicContainer": { + "referencePosition": { + "latitude": int(lat * 1e7), + "longitude": int(lon * 1e7), + }, + }, + "vruHighFrequencyContainer": { + "speed": {"speedValue": int(speed * 100)}, + "heading": {"value": int(heading * 10)}, + }, + } + + if cluster_id is not None: + params["vruClusterInformationContainer"] = { + "vruClusterInformation": { + "clusterId": cluster_id, + "clusterCardinalitySize": 3, + "clusterBoundingBoxShape": { + "circular": {"radius": vam_constants.MAX_CLUSTER_DISTANCE} + }, + } + } + + op: dict = {} + if join_cluster_id is not None: + op["clusterJoinInfo"] = {"clusterId": join_cluster_id, "joinTime": 12} + if leave_cluster_id is not None: + op["clusterLeaveInfo"] = { + "clusterId": leave_cluster_id, + "clusterLeaveReason": leave_reason, + } + if breakup_reason is not None: + op["clusterBreakupInfo"] = { + "clusterBreakupReason": breakup_reason, + "breakupTime": breakup_time, + } + if op: + params["vruClusterOperationContainer"] = op + + return { + "header": {"stationId": station_id}, + "vam": {"vamParameters": params}, + } + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestInitialState(unittest.TestCase): + """VBSClusteringManager is created in VRU_ACTIVE_STANDALONE by default.""" + + def setUp(self) -> None: + self.clock = _FakeClock() + self.mgr = VBSClusteringManager( + own_station_id=1, own_vru_profile="pedestrian", time_fn=self.clock + ) + + def test_initial_state(self) -> None: + self.assertEqual(self.mgr.state, VBSState.VRU_ACTIVE_STANDALONE) + + def test_should_transmit_initially(self) -> None: + self.assertTrue(self.mgr.should_transmit_vam()) + + def test_no_cluster_info_initially(self) -> None: + self.assertIsNone(self.mgr.get_cluster_information_container()) + + def test_no_cluster_op_initially(self) -> None: + self.assertIsNone(self.mgr.get_cluster_operation_container()) + + def test_cluster_id_none_initially(self) -> None: + self.assertIsNone(self.mgr.get_cluster_id()) + + +class TestVruRoleTransitions(unittest.TestCase): + """VRU role-on / role-off state transitions (clause 5.4.2.2, Table 5).""" + + def setUp(self) -> None: + self.clock = _FakeClock() + self.mgr = VBSClusteringManager(own_station_id=1, time_fn=self.clock) + + def test_role_off_from_standalone(self) -> None: + self.mgr.set_vru_role_off() + self.assertEqual(self.mgr.state, VBSState.VRU_IDLE) + + def test_role_on_from_idle(self) -> None: + self.mgr.set_vru_role_off() + self.mgr.set_vru_role_on() + self.assertEqual(self.mgr.state, VBSState.VRU_ACTIVE_STANDALONE) + + def test_role_on_ignored_when_already_standalone(self) -> None: + """set_vru_role_on() from STANDALONE is a no-op.""" + self.mgr.set_vru_role_on() + self.assertEqual(self.mgr.state, VBSState.VRU_ACTIVE_STANDALONE) + + def test_idle_suppresses_transmit(self) -> None: + self.mgr.set_vru_role_off() + self.assertFalse(self.mgr.should_transmit_vam()) + + def test_role_off_clears_cluster_state(self) -> None: + """Role-off from LEADER tears down cluster data.""" + self._populate_nearby(count=vam_constants.NUM_CREATE_CLUSTER) + self.mgr.try_create_cluster(0.0, 0.0) + self.assertEqual(self.mgr.state, VBSState.VRU_ACTIVE_CLUSTER_LEADER) + self.mgr.set_vru_role_off() + self.assertIsNone(self.mgr.get_cluster_information_container()) + + def _populate_nearby(self, count: int) -> None: + """Inject *count* nearby VRU entries directly into the manager.""" + now = self.clock() + for i in range(count): + from flexstack.facilities.vru_awareness_service.vru_clustering import _NearbyVRU + # pylint: disable=protected-access + self.mgr._nearby_vrus[100 + i] = _NearbyVRU( + station_id=100 + i, + lat=0.0, + lon=0.0, + speed=1.0, + heading=0.0, + last_seen=now, + ) + + +class TestClusterCreation(unittest.TestCase): + """VRU_ACTIVE_STANDALONE → VRU_ACTIVE_CLUSTER_LEADER via try_create_cluster.""" + + def setUp(self) -> None: + self.clock = _FakeClock() + self.mgr = VBSClusteringManager(own_station_id=5, time_fn=self.clock) + + def _add_nearby(self, count: int) -> None: + from flexstack.facilities.vru_awareness_service.vru_clustering import _NearbyVRU + now = self.clock() + for i in range(count): + # pylint: disable=protected-access + self.mgr._nearby_vrus[200 + i] = _NearbyVRU( + station_id=200 + i, + lat=0.0, + lon=0.0, + speed=1.0, + heading=0.0, + last_seen=now, + ) + + def test_cluster_not_created_without_enough_vrus(self) -> None: + """Cluster requires at least NUM_CREATE_CLUSTER nearby VRUs.""" + self._add_nearby(vam_constants.NUM_CREATE_CLUSTER - 1) + result = self.mgr.try_create_cluster(0.0, 0.0) + self.assertFalse(result) + self.assertEqual(self.mgr.state, VBSState.VRU_ACTIVE_STANDALONE) + + def test_cluster_created_with_enough_vrus(self) -> None: + self._add_nearby(vam_constants.NUM_CREATE_CLUSTER) + result = self.mgr.try_create_cluster(0.0, 0.0) + self.assertTrue(result) + self.assertEqual(self.mgr.state, VBSState.VRU_ACTIVE_CLUSTER_LEADER) + + def test_cluster_info_container_present_after_creation(self) -> None: + self._add_nearby(vam_constants.NUM_CREATE_CLUSTER) + self.mgr.try_create_cluster(0.0, 0.0) + ctr = self.mgr.get_cluster_information_container() + self.assertIsNotNone(ctr) + assert ctr is not None + info = ctr["vruClusterInformation"] + self.assertIn("clusterId", info) + self.assertGreater(info["clusterId"], 0) + self.assertIn("clusterBoundingBoxShape", info) + self.assertIn("clusterCardinalitySize", info) + + def test_cluster_id_accessible_after_creation(self) -> None: + self._add_nearby(vam_constants.NUM_CREATE_CLUSTER) + self.mgr.try_create_cluster(0.0, 0.0) + self.assertIsNotNone(self.mgr.get_cluster_id()) + + def test_create_fails_when_not_standalone(self) -> None: + self.mgr.set_vru_role_off() + self._add_nearby(vam_constants.NUM_CREATE_CLUSTER) + result = self.mgr.try_create_cluster(0.0, 0.0) + self.assertFalse(result) + + def test_cluster_not_created_for_vrus_out_of_range(self) -> None: + """VRUs further than MAX_CLUSTER_DISTANCE are not counted.""" + from flexstack.facilities.vru_awareness_service.vru_clustering import _NearbyVRU + now = self.clock() + # Place 5 VRUs 1 km away — well outside MAX_CLUSTER_DISTANCE + for i in range(5): + # pylint: disable=protected-access + self.mgr._nearby_vrus[300 + i] = _NearbyVRU( + station_id=300 + i, + lat=10.0, # far from 0.0 + lon=10.0, + speed=1.0, + heading=0.0, + last_seen=now, + ) + result = self.mgr.try_create_cluster(0.0, 0.0) + self.assertFalse(result) + + +class TestClusterBreakup(unittest.TestCase): + """Cluster breakup warning flow (clause 5.4.2.2).""" + + def setUp(self) -> None: + self.clock = _FakeClock() + self.mgr = VBSClusteringManager(own_station_id=5, time_fn=self.clock) + # Force into LEADER state + from flexstack.facilities.vru_awareness_service.vru_clustering import _NearbyVRU + now = self.clock() + for i in range(vam_constants.NUM_CREATE_CLUSTER): + # pylint: disable=protected-access + self.mgr._nearby_vrus[400 + i] = _NearbyVRU( + station_id=400 + i, + lat=0.0, + lon=0.0, + speed=1.0, + heading=0.0, + last_seen=now, + ) + self.mgr.try_create_cluster(0.0, 0.0) + self.assertEqual(self.mgr.state, VBSState.VRU_ACTIVE_CLUSTER_LEADER) + + def test_breakup_initiation(self) -> None: + result = self.mgr.trigger_breakup_cluster(ClusterBreakupReason.CLUSTERING_PURPOSE_COMPLETED) + self.assertTrue(result) + + def test_breakup_container_present(self) -> None: + self.mgr.trigger_breakup_cluster(ClusterBreakupReason.NOT_PROVIDED) + op = self.mgr.get_cluster_operation_container() + self.assertIsNotNone(op) + assert op is not None + self.assertIn("clusterBreakupInfo", op) + info = op["clusterBreakupInfo"] + self.assertIn("clusterBreakupReason", info) + self.assertIn("breakupTime", info) + self.assertGreaterEqual(info["breakupTime"], 0) + self.assertLessEqual(info["breakupTime"], 127) + + def test_breakup_transitions_to_standalone_after_warning(self) -> None: + self.mgr.trigger_breakup_cluster(ClusterBreakupReason.NOT_PROVIDED) + self.clock.advance(vam_constants.TIME_CLUSTER_BREAKUP_WARNING) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + self.assertEqual(self.mgr.state, VBSState.VRU_ACTIVE_STANDALONE) + + def test_breakup_not_before_warning_expires(self) -> None: + self.mgr.trigger_breakup_cluster(ClusterBreakupReason.NOT_PROVIDED) + self.clock.advance(vam_constants.TIME_CLUSTER_BREAKUP_WARNING - 0.1) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + self.assertEqual(self.mgr.state, VBSState.VRU_ACTIVE_CLUSTER_LEADER) + + def test_double_breakup_ignored(self) -> None: + self.mgr.trigger_breakup_cluster(ClusterBreakupReason.NOT_PROVIDED) + result = self.mgr.trigger_breakup_cluster(ClusterBreakupReason.CLUSTERING_PURPOSE_COMPLETED) + self.assertFalse(result) + + def test_breakup_fails_when_not_leader(self) -> None: + mgr = VBSClusteringManager(own_station_id=99, time_fn=self.clock) + result = mgr.trigger_breakup_cluster() + self.assertFalse(result) + + +class TestClusterJoiningFlow(unittest.TestCase): + """Join notification → passive → confirmation / failure / cancellation flows.""" + + def setUp(self) -> None: + self.clock = _FakeClock() + self.mgr = VBSClusteringManager(own_station_id=10, time_fn=self.clock) + + def test_initiate_join(self) -> None: + result = self.mgr.initiate_join(cluster_id=7) + self.assertTrue(result) + + def test_join_operation_container_present(self) -> None: + self.mgr.initiate_join(cluster_id=7) + op = self.mgr.get_cluster_operation_container() + self.assertIsNotNone(op) + assert op is not None + self.assertIn("clusterJoinInfo", op) + self.assertEqual(op["clusterJoinInfo"]["clusterId"], 7) + + def test_join_time_decreases(self) -> None: + self.mgr.initiate_join(cluster_id=7) + op1 = self.mgr.get_cluster_operation_container() + self.clock.advance(1.0) + op2 = self.mgr.get_cluster_operation_container() + assert op1 is not None and op2 is not None + self.assertGreaterEqual( + op1["clusterJoinInfo"]["joinTime"], + op2["clusterJoinInfo"]["joinTime"], + ) + + def test_double_initiate_join_rejected(self) -> None: + self.mgr.initiate_join(cluster_id=7) + result2 = self.mgr.initiate_join(cluster_id=8) + self.assertFalse(result2) + + def test_join_notification_phase_ends_and_enters_waiting(self) -> None: + self.mgr.initiate_join(cluster_id=7) + self.clock.advance(vam_constants.TIME_CLUSTER_JOIN_NOTIFICATION) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + # Still standalone during waiting phase + self.assertEqual(self.mgr.state, VBSState.VRU_ACTIVE_STANDALONE) + + def test_failed_join_after_join_success_timeout(self) -> None: + self.mgr.initiate_join(cluster_id=7) + # Skip through notification phase + self.clock.advance(vam_constants.TIME_CLUSTER_JOIN_NOTIFICATION) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + # Skip through waiting phase + self.clock.advance(vam_constants.TIME_CLUSTER_JOIN_SUCCESS) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + # Should now have failed-join leave-notice in op container + op = self.mgr.get_cluster_operation_container() + self.assertIsNotNone(op) + assert op is not None + self.assertIn("clusterLeaveInfo", op) + self.assertEqual( + op["clusterLeaveInfo"]["clusterLeaveReason"], + ClusterLeaveReason.FAILED_JOIN.value, + ) + + def test_failed_join_leave_notice_expires(self) -> None: + self.mgr.initiate_join(cluster_id=7) + self.clock.advance(vam_constants.TIME_CLUSTER_JOIN_NOTIFICATION) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + self.clock.advance(vam_constants.TIME_CLUSTER_JOIN_SUCCESS) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + # Advance through leave notification period + self.clock.advance(vam_constants.TIME_CLUSTER_LEAVE_NOTIFICATION) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + self.assertIsNone(self.mgr.get_cluster_operation_container()) + + def test_cancelled_join(self) -> None: + self.mgr.initiate_join(cluster_id=7) + self.mgr.cancel_join() + op = self.mgr.get_cluster_operation_container() + self.assertIsNotNone(op) + assert op is not None + self.assertIn("clusterLeaveInfo", op) + self.assertEqual( + op["clusterLeaveInfo"]["clusterLeaveReason"], + ClusterLeaveReason.CANCELLED_JOIN.value, + ) + + def test_successful_join_via_received_vam(self) -> None: + """Leader VAM with matching cluster ID during waiting phase confirms join.""" + self.mgr.initiate_join(cluster_id=99) + # Advance past notification phase + self.clock.advance(vam_constants.TIME_CLUSTER_JOIN_NOTIFICATION) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + # Simulate leader's cluster VAM + vam = _make_vam(station_id=50, cluster_id=99) + self.mgr.on_received_vam(vam) + self.assertEqual(self.mgr.state, VBSState.VRU_PASSIVE) + self.assertEqual(self.mgr.get_cluster_id(), 99) + + def test_join_initiate_rejected_from_idle(self) -> None: + self.mgr.set_vru_role_off() + result = self.mgr.initiate_join(cluster_id=7) + self.assertFalse(result) + + +class TestClusterLeaving(unittest.TestCase): + """Leave notification flow from VRU_PASSIVE (clause 5.4.2.2).""" + + def _make_passive_manager(self) -> VBSClusteringManager: + """Return a manager that has been placed into VRU_PASSIVE.""" + mgr = VBSClusteringManager(own_station_id=20, time_fn=self.clock) + mgr.initiate_join(cluster_id=3) + self.clock.advance(vam_constants.TIME_CLUSTER_JOIN_NOTIFICATION) + mgr.update(0.0, 0.0, 1.0, 0.0) + # Confirm join via leader VAM + mgr.on_received_vam(_make_vam(station_id=15, cluster_id=3)) + self.assertEqual(mgr.state, VBSState.VRU_PASSIVE) + return mgr + + def setUp(self) -> None: + self.clock = _FakeClock() + self.mgr = self._make_passive_manager() + + def test_passive_suppresses_transmit(self) -> None: + self.assertFalse(self.mgr.should_transmit_vam()) + + def test_leave_starts_notification_phase(self) -> None: + self.mgr.trigger_leave_cluster(ClusterLeaveReason.SAFETY_CONDITION) + # During leave-notification the device moves back to STANDALONE and + # transmits leave VAMs + self.assertEqual(self.mgr.state, VBSState.VRU_ACTIVE_STANDALONE) + self.assertTrue(self.mgr.should_transmit_vam()) + op = self.mgr.get_cluster_operation_container() + self.assertIsNotNone(op) + assert op is not None + self.assertIn("clusterLeaveInfo", op) + self.assertEqual( + op["clusterLeaveInfo"]["clusterLeaveReason"], + ClusterLeaveReason.SAFETY_CONDITION.value, + ) + + def test_leave_notification_expires(self) -> None: + self.mgr.trigger_leave_cluster() + self.clock.advance(vam_constants.TIME_CLUSTER_LEAVE_NOTIFICATION) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + self.assertIsNone(self.mgr.get_cluster_operation_container()) + + +class TestLeaderLostDetection(unittest.TestCase): + """Leader-lost detection via timeClusterContinuity timeout (clause 5.4.2.2).""" + + def setUp(self) -> None: + self.clock = _FakeClock() + self.mgr = VBSClusteringManager(own_station_id=30, time_fn=self.clock) + # Enter VRU_PASSIVE + self.mgr.initiate_join(cluster_id=11) + self.clock.advance(vam_constants.TIME_CLUSTER_JOIN_NOTIFICATION) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + self.mgr.on_received_vam(_make_vam(station_id=77, cluster_id=11)) + self.assertEqual(self.mgr.state, VBSState.VRU_PASSIVE) + + def test_leader_lost_after_continuity_timeout(self) -> None: + self.clock.advance(vam_constants.TIME_CLUSTER_CONTINUITY) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + self.assertEqual(self.mgr.state, VBSState.VRU_ACTIVE_STANDALONE) + + def test_leader_not_lost_before_continuity_timeout(self) -> None: + self.clock.advance(vam_constants.TIME_CLUSTER_CONTINUITY - 0.1) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + self.assertEqual(self.mgr.state, VBSState.VRU_PASSIVE) + + def test_receiving_leader_vam_resets_timeout(self) -> None: + # Advance close to timeout + self.clock.advance(vam_constants.TIME_CLUSTER_CONTINUITY - 0.1) + # Receive a fresh leader VAM — resets the timer + self.mgr.on_received_vam(_make_vam(station_id=77, cluster_id=11)) + # Advance a bit more — should not yet trigger leader-lost + self.clock.advance(vam_constants.TIME_CLUSTER_CONTINUITY - 0.1) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + self.assertEqual(self.mgr.state, VBSState.VRU_PASSIVE) + + def test_leader_lost_sends_leave_notice(self) -> None: + self.clock.advance(vam_constants.TIME_CLUSTER_CONTINUITY) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + op = self.mgr.get_cluster_operation_container() + self.assertIsNotNone(op) + assert op is not None + self.assertIn("clusterLeaveInfo", op) + self.assertEqual( + op["clusterLeaveInfo"]["clusterLeaveReason"], + ClusterLeaveReason.CLUSTER_LEADER_LOST.value, + ) + + +class TestLeaderDisbandByLeader(unittest.TestCase): + """Passive VRU receives breakup VAM from leader and leaves cluster.""" + + def setUp(self) -> None: + self.clock = _FakeClock() + self.mgr = VBSClusteringManager(own_station_id=40, time_fn=self.clock) + self.mgr.initiate_join(cluster_id=22) + self.clock.advance(vam_constants.TIME_CLUSTER_JOIN_NOTIFICATION) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + self.mgr.on_received_vam(_make_vam(station_id=88, cluster_id=22)) + + def test_leader_breakup_triggers_standalone(self) -> None: + breakup_vam = _make_vam( + station_id=88, + breakup_reason=ClusterBreakupReason.CLUSTERING_PURPOSE_COMPLETED.value, + ) + self.mgr.on_received_vam(breakup_vam) + self.assertEqual(self.mgr.state, VBSState.VRU_ACTIVE_STANDALONE) + + +class TestNearbyTableMaintenance(unittest.TestCase): + """Nearby VRU and cluster tables track received VAMs and expire stale entries.""" + + def setUp(self) -> None: + self.clock = _FakeClock() + self.mgr = VBSClusteringManager(own_station_id=50, time_fn=self.clock) + + def test_nearby_vru_added_on_reception(self) -> None: + self.mgr.on_received_vam(_make_vam(station_id=60)) + self.assertEqual(self.mgr.get_nearby_vru_count(), 1) + + def test_nearby_vru_updated_on_second_reception(self) -> None: + self.mgr.on_received_vam(_make_vam(station_id=60, speed=1.0)) + self.mgr.on_received_vam(_make_vam(station_id=60, speed=2.0)) + # Same station should not add a duplicate entry + self.assertEqual(self.mgr.get_nearby_vru_count(), 1) + + def test_nearby_cluster_added_on_reception(self) -> None: + self.mgr.on_received_vam(_make_vam(station_id=70, cluster_id=5)) + self.assertEqual(self.mgr.get_nearby_cluster_count(), 1) + + def test_stale_entries_expire_on_update(self) -> None: + self.mgr.on_received_vam(_make_vam(station_id=60)) + self.clock.advance(vam_constants.T_GENVAMMAX / 1000.0) + self.mgr.update(0.0, 0.0, 1.0, 0.0) + self.assertEqual(self.mgr.get_nearby_vru_count(), 0) + + def test_malformed_vam_does_not_crash(self) -> None: + """on_received_vam tolerates incomplete VAM dicts.""" + self.mgr.on_received_vam({}) # should not raise + self.mgr.on_received_vam({"header": {}, "vam": {}}) + + +class TestClusterProfileEncoding(unittest.TestCase): + """_encode_cluster_profiles produces valid BIT STRING bytes.""" + + def test_pedestrian_bit_set(self) -> None: + # pylint: disable=protected-access + result = VBSClusteringManager._encode_cluster_profiles({"pedestrian"}) + self.assertEqual(len(result), 1) + self.assertTrue(result[0] & 0x80) # bit 0 (MSB) + + def test_bicyclist_bit_set(self) -> None: + # pylint: disable=protected-access + result = VBSClusteringManager._encode_cluster_profiles({"bicyclistAndLightVruVehicle"}) + self.assertTrue(result[0] & 0x40) + + def test_multiple_profiles(self) -> None: + # pylint: disable=protected-access + result = VBSClusteringManager._encode_cluster_profiles( + {"pedestrian", "motorcyclist"} + ) + self.assertTrue(result[0] & 0x80) # pedestrian + self.assertTrue(result[0] & 0x20) # motorcyclist + + def test_empty_profiles(self) -> None: + # pylint: disable=protected-access + result = VBSClusteringManager._encode_cluster_profiles(set()) + self.assertEqual(result, bytes([0])) + + +class TestContainerFormats(unittest.TestCase): + """Cluster information and operation containers have the expected dict structure.""" + + def setUp(self) -> None: + self.clock = _FakeClock() + self.mgr = VBSClusteringManager(own_station_id=1, time_fn=self.clock) + + def _create_cluster(self) -> None: + from flexstack.facilities.vru_awareness_service.vru_clustering import _NearbyVRU + now = self.clock() + for i in range(vam_constants.NUM_CREATE_CLUSTER): + # pylint: disable=protected-access + self.mgr._nearby_vrus[500 + i] = _NearbyVRU( + station_id=500 + i, + lat=0.0, + lon=0.0, + speed=1.0, + heading=0.0, + last_seen=now, + ) + self.mgr.try_create_cluster(0.0, 0.0) + + def test_info_container_keys(self) -> None: + self._create_cluster() + ctr = self.mgr.get_cluster_information_container() + assert ctr is not None + info = ctr["vruClusterInformation"] + self.assertIn("clusterId", info) + self.assertIn("clusterBoundingBoxShape", info) + self.assertIn("clusterCardinalitySize", info) + self.assertIn("circular", info["clusterBoundingBoxShape"]) + self.assertIn("radius", info["clusterBoundingBoxShape"]["circular"]) + + def test_info_container_none_when_not_leader(self) -> None: + self.assertIsNone(self.mgr.get_cluster_information_container()) + + def test_breakup_container_fields(self) -> None: + self._create_cluster() + self.mgr.trigger_breakup_cluster(ClusterBreakupReason.CLUSTERING_PURPOSE_COMPLETED) + op = self.mgr.get_cluster_operation_container() + assert op is not None + breakup = op["clusterBreakupInfo"] + self.assertEqual( + breakup["clusterBreakupReason"], + ClusterBreakupReason.CLUSTERING_PURPOSE_COMPLETED.value, + ) + self.assertIsInstance(breakup["breakupTime"], int) + self.assertGreaterEqual(breakup["breakupTime"], 0) + self.assertLessEqual(breakup["breakupTime"], 127) + + +class TestThreadSafety(unittest.TestCase): + """Basic concurrent access test: no crashes or deadlocks under parallel calls.""" + + def test_concurrent_update_and_received_vam(self) -> None: + clock = _FakeClock() + mgr = VBSClusteringManager(own_station_id=99, time_fn=clock) + errors: list[Exception] = [] + + def update_thread() -> None: + try: + for _ in range(50): + mgr.update(0.0, 0.0, 1.0, 90.0) + except Exception as exc: # pylint: disable=broad-except + errors.append(exc) + + def receive_thread() -> None: + try: + for i in range(50): + mgr.on_received_vam(_make_vam(station_id=i + 1)) + except Exception as exc: # pylint: disable=broad-except + errors.append(exc) + + t1 = threading.Thread(target=update_thread) + t2 = threading.Thread(target=receive_thread) + t1.start() + t2.start() + t1.join(timeout=5.0) + t2.join(timeout=5.0) + self.assertFalse(errors, f"Exceptions raised in threads: {errors}") + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/flexstack/geonet/test_common_header.py b/tests/flexstack/geonet/test_common_header.py index 8dbe157..dc24817 100644 --- a/tests/flexstack/geonet/test_common_header.py +++ b/tests/flexstack/geonet/test_common_header.py @@ -2,11 +2,13 @@ from flexstack.geonet.common_header import CommonHeader, CommonNH, HeaderType, HeaderSubType, TrafficClass from flexstack.geonet.service_access_point import GNDataRequest, PacketTransportType, GeoBroadcastHST +from flexstack.geonet.mib import MIB, GnIsMobile class TestCommonHeader(unittest.TestCase): def test_initialize_with_request(self): + mib = MIB(itsGnIsMobile=GnIsMobile.STATIONARY) request = GNDataRequest( upper_protocol_entity=CommonNH.BTP_B, packet_transport_type=PacketTransportType( @@ -16,7 +18,7 @@ def test_initialize_with_request(self): traffic_class=TrafficClass(), length=500, ) - ch = CommonHeader.initialize_with_request(request) + ch = CommonHeader.initialize_with_request(request, mib) self.assertEqual(ch.nh, CommonNH.BTP_B) self.assertEqual(ch.ht, HeaderType.GEOBROADCAST) self.assertEqual(ch.hst, GeoBroadcastHST.GEOBROADCAST_CIRCLE) diff --git a/tests/flexstack/geonet/test_guc_extended_header.py b/tests/flexstack/geonet/test_guc_extended_header.py new file mode 100644 index 0000000..430e84d --- /dev/null +++ b/tests/flexstack/geonet/test_guc_extended_header.py @@ -0,0 +1,74 @@ +import unittest + +from flexstack.geonet.exceptions import DecodeError +from flexstack.geonet.guc_extended_header import GUCExtendedHeader +from flexstack.geonet.gn_address import GNAddress, M, ST, MID +from flexstack.geonet.position_vector import LongPositionVector, ShortPositionVector + + +def _make_gn_addr() -> GNAddress: + return GNAddress(m=M.GN_MULTICAST, st=ST.CYCLIST, mid=MID(b"\xaa\xbb\xcc\xdd\x22\x33")) + + +class TestGUCExtendedHeader(unittest.TestCase): + """Unit tests for GUCExtendedHeader (§9.8.2 Table 11).""" + + def _make_header(self) -> GUCExtendedHeader: + lpv = LongPositionVector(gn_addr=_make_gn_addr(), latitude=413872756, longitude=21122668) + de_pv = ShortPositionVector(gn_addr=_make_gn_addr(), latitude=100000000, longitude=20000000) + return GUCExtendedHeader(sn=42, so_pv=lpv, de_pv=de_pv) + + def test_encode_length(self): + """Encoded GUC Extended Header must be exactly 48 bytes.""" + self.assertEqual(len(self._make_header().encode()), 48) + + def test_encode_decode_roundtrip(self): + """Encoding then decoding must return an equal header.""" + original = self._make_header() + decoded = GUCExtendedHeader.decode(original.encode()) + self.assertEqual(decoded, original) + + def test_decode_too_short_raises(self): + """Decoding fewer than 48 bytes must raise DecodeError.""" + with self.assertRaises(DecodeError): + GUCExtendedHeader.decode(b"\x00" * 47) + + def test_sn_byte_order(self): + """SN must be stored in big-endian at bytes 0-1 of the encoded header.""" + header = GUCExtendedHeader(sn=0x1234) + encoded = header.encode() + self.assertEqual(encoded[0], 0x12) + self.assertEqual(encoded[1], 0x34) + + def test_reserved_zero_by_default(self): + """Reserved field must default to 0 and be encoded at bytes 2-3.""" + header = GUCExtendedHeader() + encoded = header.encode() + self.assertEqual(encoded[2], 0x00) + self.assertEqual(encoded[3], 0x00) + + def test_initialize_factory(self): + """Factory method must set sn, so_pv, and de_pv from given arguments.""" + lpv = LongPositionVector(gn_addr=_make_gn_addr()) + de_pv = ShortPositionVector(gn_addr=_make_gn_addr()) + header = GUCExtendedHeader.initialize_with_request_sequence_number_ego_pv_de_pv( + sequence_number=7, ego_pv=lpv, de_pv=de_pv + ) + self.assertEqual(header.sn, 7) + self.assertEqual(header.so_pv, lpv) + self.assertEqual(header.de_pv, de_pv) + + def test_with_de_pv(self): + """with_de_pv must return a new header with only DE PV changed.""" + original = self._make_header() + new_de_pv = ShortPositionVector(gn_addr=GNAddress(), latitude=99, longitude=88) + updated = original.with_de_pv(new_de_pv) + self.assertEqual(updated.de_pv, new_de_pv) + self.assertEqual(updated.sn, original.sn) + self.assertEqual(updated.so_pv, original.so_pv) + # Must be a different object + self.assertIsNot(updated, original) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/flexstack/geonet/test_location_table.py b/tests/flexstack/geonet/test_location_table.py index 636d834..81d2250 100644 --- a/tests/flexstack/geonet/test_location_table.py +++ b/tests/flexstack/geonet/test_location_table.py @@ -4,12 +4,16 @@ from flexstack.geonet.gn_address import MID, M, ST, GNAddress from flexstack.geonet.exceptions import ( DuplicatedPacketException, - IncongruentTimestampException, ) from flexstack.geonet.location_table import ( LocationTableEntry, - LongPositionVector, + LocationTable, ) +from flexstack.geonet.tsb_extended_header import TSBExtendedHeader +from flexstack.geonet.guc_extended_header import GUCExtendedHeader +from flexstack.geonet.gbc_extended_header import GBCExtendedHeader +from flexstack.geonet.ls_extended_header import LSRequestExtendedHeader, LSReplyExtendedHeader +from flexstack.geonet.position_vector import LongPositionVector, ShortPositionVector from flexstack.geonet.mib import MIB @@ -45,9 +49,9 @@ def test_update_position_vector(self, mock_time): position_vector2 = position_vector2.set_tst_in_normal_timestamp_seconds( timestamp + 0.1) entry.update_position_vector(position_vector2) - self.assertRaises( - IncongruentTimestampException, entry.update_position_vector, position_vector - ) + # §C.2 ELSE: older PV must be silently ignored – no exception, PV stays at position_vector2 + entry.update_position_vector(position_vector) + self.assertEqual(entry.position_vector, position_vector2) @patch("time.time") def test_update_pdr(self, mock_time): @@ -78,14 +82,435 @@ def test_update_pdr(self, mock_time): self.assertAlmostEqual(entry.pdr, 190.00000940775553) def test_duplicate_packet(self): + """Annex A.2: SN already in DPL must raise DuplicatedPacketException.""" mib = MIB() entry = LocationTableEntry(mib) - packet1 = b"\x07\xd1\x00\x00\x02\x02\x00\x00\x07\xa9\xb5\xc8\x40\x59\xca\x03\xa2\x4d\x91\x82\x8b\xe1\x90\x19\x03\x84\x38\x6c\xf0\x00\xe1\x0f\xc0\x00\x2c\x82\xf0\x8a\x80\x03\xff\x05\xff\xf8\x00\x00\x0e\x00\x70\x80\x54\xd8\x5d\x9f\x2b\xc0" - entry.check_duplicate_packet(packet1) + entry.check_duplicate_sn(42) self.assertRaises( - DuplicatedPacketException, entry.check_duplicate_packet, packet1 + DuplicatedPacketException, entry.check_duplicate_sn, 42 + ) + + def test_dpd_ring_buffer_evicts_oldest(self): + """A.2: when DPL is full the oldest SN must be evicted, allowing re-use.""" + mib = MIB(itsGnDPLLength=3) + entry = LocationTableEntry(mib) + # Fill the ring buffer: SNs 0, 1, 2 + entry.check_duplicate_sn(0) + entry.check_duplicate_sn(1) + entry.check_duplicate_sn(2) + # Adding SN 3 evicts SN 0 + entry.check_duplicate_sn(3) + # SN 0 should no longer be in the DPL → not a duplicate + entry.check_duplicate_sn(0) # must NOT raise + + def test_dpd_not_applied_to_shb(self): + """A.1: SHB packets must NOT trigger DPD (no SN field).""" + mib = MIB() + entry = LocationTableEntry(mib) + position_vector = LongPositionVector( + gn_addr=GNAddress()).set_tst_in_normal_timestamp_seconds(1675071608.0) + payload = b"shb_payload" + # Call twice with the same payload – must NOT raise DuplicatedPacketException + entry.update_with_shb_packet(position_vector, payload) + entry.update_with_shb_packet(position_vector, payload) # second call must not raise + + +class TestLocationTableTSB(unittest.TestCase): + """Tests for TSB-specific location table behaviour (\u00a710.3.9.3 steps 5-6).""" + + _TIMESTAMP = 1675071608.0 + + def _make_tsb_header(self, gn_addr, timestamp_offset: float = 0.0, sn: int = 1): + lpv = LongPositionVector(gn_addr=gn_addr).set_tst_in_normal_timestamp_seconds( + self._TIMESTAMP + timestamp_offset) + return TSBExtendedHeader(sn=sn, so_pv=lpv) + + def _make_gn_addr(self): + return GNAddress( + m=M.GN_MULTICAST, + st=ST.CYCLIST, + mid=MID(b"\xaa\xbb\xcc\xdd\x22\x33"), ) + @patch("flexstack.geonet.location_table.TimeService.time") + def test_new_entry_is_neighbour_set_to_false(self, mock_time): + """Step 5b: IS_NEIGHBOUR must be FALSE for a newly created TSB LocTE.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + addr = self._make_gn_addr() + header = self._make_tsb_header(addr) + + table.new_tsb_packet(header, b"payload") + + entry = table.get_entry(addr) + self.assertIsNotNone(entry) + self.assertFalse(entry.is_neighbour) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_existing_entry_is_neighbour_unchanged(self, mock_time): + """NOTE 1: IS_NEIGHBOUR flag remains unchanged for an existing TSB LocTE.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + addr = self._make_gn_addr() + + # First packet: creates entry with IS_NEIGHBOUR = False + table.new_tsb_packet(self._make_tsb_header(addr, 0.0, sn=1), b"payload1") + # Manually set IS_NEIGHBOUR to True (e.g. SHB was received before) + table.get_entry(addr).is_neighbour = True + + # Second TSB packet (different SN so DPD does not trigger): IS_NEIGHBOUR must remain True. + # Use same timestamp so the PV satisfies tst >= prev_tst and refresh_table keeps the entry. + table.new_tsb_packet(self._make_tsb_header(addr, 0.0, sn=2), b"payload2") + + self.assertTrue(table.get_entry(addr).is_neighbour) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_duplicate_tsb_packet_raises(self, mock_time): + """DPD (annex A.2): same SN received twice must raise DuplicatedPacketException.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + addr = self._make_gn_addr() + header = self._make_tsb_header(addr) + payload = b"same_payload" + + table.new_tsb_packet(header, payload) + with self.assertRaises(DuplicatedPacketException): + table.new_tsb_packet(header, payload) + + +class TestLocationTableGUC(unittest.TestCase): + """Tests for GUC-specific location table behaviour (§10.3.8.3/.8.4 steps 5-6).""" + + _TIMESTAMP = 1675071608.0 + + def _make_gn_addr(self): + return GNAddress(m=M.GN_MULTICAST, st=ST.CYCLIST, mid=MID(b"\xaa\xbb\xcc\xdd\x22\x33")) + + def _make_guc_header(self, so_gn_addr, de_gn_addr, timestamp_offset: float = 0.0, sn: int = 1): + so_lpv = LongPositionVector(gn_addr=so_gn_addr).set_tst_in_normal_timestamp_seconds( + self._TIMESTAMP + timestamp_offset) + de_spv = ShortPositionVector(gn_addr=de_gn_addr) + return GUCExtendedHeader(sn=sn, so_pv=so_lpv, de_pv=de_spv) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_new_entry_is_neighbour_set_to_false(self, mock_time): + """IS_NEIGHBOUR must be FALSE for a newly created GUC LocTE.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + so_addr = self._make_gn_addr() + de_addr = GNAddress() + header = self._make_guc_header(so_addr, de_addr) + + table.new_guc_packet(header, b"payload") + + entry = table.get_entry(so_addr) + self.assertIsNotNone(entry) + self.assertFalse(entry.is_neighbour) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_existing_entry_is_neighbour_unchanged(self, mock_time): + """NOTE 2: IS_NEIGHBOUR flag remains unchanged for an existing GUC LocTE.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + so_addr = self._make_gn_addr() + de_addr = GNAddress() + + # First packet creates the entry + table.new_guc_packet(self._make_guc_header(so_addr, de_addr, 0.0, sn=1), b"payload1") + # Manually set IS_NEIGHBOUR to True + table.get_entry(so_addr).is_neighbour = True + + # Second packet (different SN so DPD does not trigger) must not reset IS_NEIGHBOUR + table.new_guc_packet(self._make_guc_header(so_addr, de_addr, 0.0, sn=2), b"payload2") + + self.assertTrue(table.get_entry(so_addr).is_neighbour) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_duplicate_guc_packet_raises(self, mock_time): + """DPD (annex A.2): same SN received twice must raise DuplicatedPacketException.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + so_addr = self._make_gn_addr() + de_addr = GNAddress() + header = self._make_guc_header(so_addr, de_addr) + payload = b"dup_payload" + + table.new_guc_packet(header, payload) + with self.assertRaises(DuplicatedPacketException): + table.new_guc_packet(header, payload) + + +class TestLocationTableGAC(unittest.TestCase): + """Tests for GAC-specific location table behaviour (§10.3.12.3 steps 5-6).""" + + _TIMESTAMP = 1675071608.0 + + def _make_gn_addr(self): + return GNAddress(m=M.GN_MULTICAST, st=ST.CYCLIST, mid=MID(b"\xaa\xbb\xcc\xdd\x22\x33")) + + def _make_gac_header(self, gn_addr, timestamp_offset: float = 0.0, sn: int = 1): + so_lpv = LongPositionVector(gn_addr=gn_addr).set_tst_in_normal_timestamp_seconds( + self._TIMESTAMP + timestamp_offset) + return GBCExtendedHeader( + sn=sn, so_pv=so_lpv, + latitude=413872756, longitude=21122668, a=100, b=100, angle=0 + ) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_new_entry_is_neighbour_set_to_false(self, mock_time): + """Step 5b: IS_NEIGHBOUR must be FALSE for a newly created GAC LocTE.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + addr = self._make_gn_addr() + header = self._make_gac_header(addr) + + table.new_gac_packet(header, b"payload") + + entry = table.get_entry(addr) + self.assertIsNotNone(entry) + self.assertFalse(entry.is_neighbour) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_existing_entry_is_neighbour_unchanged(self, mock_time): + """NOTE 1: IS_NEIGHBOUR flag must remain unchanged for an existing GAC LocTE.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + addr = self._make_gn_addr() + + # First packet creates entry (IS_NEIGHBOUR = False) + table.new_gac_packet(self._make_gac_header(addr, 0.0, sn=1), b"payload1") + # Manually mark as neighbour (e.g. a SHB was received from this node) + table.get_entry(addr).is_neighbour = True + + # Second GAC packet (different SN so DPD does not trigger): IS_NEIGHBOUR must NOT be reset to False + table.new_gac_packet(self._make_gac_header(addr, 0.0, sn=2), b"payload2") + + self.assertTrue(table.get_entry(addr).is_neighbour) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_duplicate_gac_packet_raises(self, mock_time): + """DPD (annex A.2): same SN received twice must raise DuplicatedPacketException.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + addr = self._make_gn_addr() + header = self._make_gac_header(addr) + payload = b"dup_gac_payload" + + table.new_gac_packet(header, payload) + with self.assertRaises(DuplicatedPacketException): + table.new_gac_packet(header, payload) + + +class TestLocationTableLS(unittest.TestCase): + """Tests for Location Service location table methods (§10.3.7).""" + + _TIMESTAMP = 1675071608.0 + + def _make_gn_addr(self): + return GNAddress(m=M.GN_MULTICAST, st=ST.CYCLIST, mid=MID(b"\xaa\xbb\xcc\xdd\x22\x33")) + + def _make_req_addr(self): + return GNAddress(m=M.GN_MULTICAST, st=ST.PEDESTRIAN, mid=MID(b"\x11\x22\x33\x44\x55\x66")) + + def _make_ls_request_header(self, so_addr, timestamp_offset: float = 0.0): + so_lpv = LongPositionVector(gn_addr=so_addr).set_tst_in_normal_timestamp_seconds( + self._TIMESTAMP + timestamp_offset) + return LSRequestExtendedHeader(sn=1, so_pv=so_lpv, request_gn_addr=self._make_req_addr()) + + def _make_ls_reply_header(self, so_addr, de_addr, timestamp_offset: float = 0.0): + so_lpv = LongPositionVector(gn_addr=so_addr).set_tst_in_normal_timestamp_seconds( + self._TIMESTAMP + timestamp_offset) + de_pv = ShortPositionVector(gn_addr=de_addr) + return LSReplyExtendedHeader(sn=2, so_pv=so_lpv, de_pv=de_pv) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_ensure_entry_creates_new(self, mock_time): + """ensure_entry must create a new LocTE if none exists for the address.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + addr = self._make_gn_addr() + + self.assertIsNone(table.get_entry(addr)) + entry = table.ensure_entry(addr) + self.assertIsNotNone(entry) + self.assertIs(table.get_entry(addr), entry) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_ensure_entry_returns_existing(self, mock_time): + """ensure_entry must return the existing LocTE if one already exists.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + addr = self._make_gn_addr() + first = table.ensure_entry(addr) + second = table.ensure_entry(addr) + self.assertIs(first, second) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_ls_request_new_entry_is_neighbour_false(self, mock_time): + """Step 5b: IS_NEIGHBOUR must be FALSE for a newly created LS Request LocTE.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + addr = self._make_gn_addr() + header = self._make_ls_request_header(addr) + + table.new_ls_request_packet(header, b"payload") + + entry = table.get_entry(addr) + self.assertIsNotNone(entry) + self.assertFalse(entry.is_neighbour) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_ls_request_existing_entry_is_neighbour_unchanged(self, mock_time): + """NOTE: IS_NEIGHBOUR flag must remain unchanged for an existing LS Request LocTE.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + addr = self._make_gn_addr() + # First packet creates entry (IS_NEIGHBOUR = False) + table.new_ls_request_packet(self._make_ls_request_header(addr, 0.0), b"payload1") + table.get_entry(addr).is_neighbour = True # manually mark as neighbour + # Second packet (different SN so DPD does not trigger) must not reset IS_NEIGHBOUR + header2 = LSRequestExtendedHeader( + sn=2, + so_pv=LongPositionVector(gn_addr=addr).set_tst_in_normal_timestamp_seconds(self._TIMESTAMP), + request_gn_addr=self._make_req_addr(), + ) + table.new_ls_request_packet(header2, b"payload2") + self.assertTrue(table.get_entry(addr).is_neighbour) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_ls_reply_new_entry_is_neighbour_false(self, mock_time): + """IS_NEIGHBOUR must be FALSE for a newly created LS Reply LocTE.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + so_addr = self._make_gn_addr() + de_addr = self._make_req_addr() + header = self._make_ls_reply_header(so_addr, de_addr) + + table.new_ls_reply_packet(header, b"payload") + + entry = table.get_entry(so_addr) + self.assertIsNotNone(entry) + self.assertFalse(entry.is_neighbour) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_duplicate_ls_request_raises(self, mock_time): + """DPD (annex A.2): same SN received twice must raise DuplicatedPacketException.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + addr = self._make_gn_addr() + header = self._make_ls_request_header(addr) + payload = b"dup_ls_request_payload" + + table.new_ls_request_packet(header, payload) + with self.assertRaises(DuplicatedPacketException): + table.new_ls_request_packet(header, payload) + + +# --------------------------------------------------------------------------- +# Annex C – Position vector update +# --------------------------------------------------------------------------- + +class TestAnnexC(unittest.TestCase): + """Unit tests verifying compliance with ETSI EN 302 636-4-1 V1.4.1 Annex C.""" + + _TIMESTAMP = 1675071608.0 + + def _make_lpv(self, timestamp_seconds: float) -> LongPositionVector: + gn_addr = GNAddress(m=M.GN_MULTICAST, st=ST.CYCLIST, mid=MID(b"\xaa\xbb\xcc\xdd\x22\x33")) + return LongPositionVector( + gn_addr=gn_addr, pai=True, latitude=413872756, longitude=21122668 + ).set_tst_in_normal_timestamp_seconds(timestamp_seconds) + + # ── §C.2 update_position_vector ───────────────────────────────────────── + + def test_pv_not_updated_when_tst_equal(self): + """§C.2 ELSE: when received TST equals stored TST, PV must remain unchanged.""" + mib = MIB() + entry = LocationTableEntry(mib) + pv1 = self._make_lpv(self._TIMESTAMP) + entry.update_position_vector(pv1) + + # Build a PV with identical TST but different lat/lon + pv2 = LongPositionVector( + gn_addr=pv1.gn_addr, pai=True, latitude=0, longitude=0 + ).set_tst_in_normal_timestamp_seconds(self._TIMESTAMP) + + entry.update_position_vector(pv2) # same TST -> must do nothing, no exception + self.assertEqual(entry.position_vector, pv1) + + def test_pv_not_updated_when_tst_older(self): + """§C.2 ELSE: when received TST is older than stored TST, PV must remain unchanged.""" + mib = MIB() + entry = LocationTableEntry(mib) + pv_newer = self._make_lpv(self._TIMESTAMP + 1.0) + entry.update_position_vector(pv_newer) + + pv_older = self._make_lpv(self._TIMESTAMP) + entry.update_position_vector(pv_older) # older -> must do nothing, no exception + self.assertEqual(entry.position_vector, pv_newer) + + def test_pv_updated_when_tst_newer(self): + """§C.2 IF: received PV with strictly newer TST must replace stored PV.""" + mib = MIB() + entry = LocationTableEntry(mib) + pv_old = self._make_lpv(self._TIMESTAMP) + entry.update_position_vector(pv_old) + + pv_new = self._make_lpv(self._TIMESTAMP + 1.0) + entry.update_position_vector(pv_new) + self.assertEqual(entry.position_vector, pv_new) + + # ── §C.2 refresh_table lifetime ───────────────────────────────────────── + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_locte_not_expired_within_lifetime(self, mock_time): + """§C.2: LocTE must survive until itsGnLifetimeLocTE seconds have elapsed.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + gn_addr = GNAddress() + pv = LongPositionVector(gn_addr=gn_addr).set_tst_in_normal_timestamp_seconds( + self._TIMESTAMP) + tsb = TSBExtendedHeader(sn=1, so_pv=pv) + table.new_tsb_packet(tsb, b"payload") + + # Advance time just inside the lifetime window (19 s < 20 s) + mock_time.return_value = self._TIMESTAMP + 19 + table.refresh_table() + self.assertIsNotNone(table.get_entry(gn_addr)) + + @patch("flexstack.geonet.location_table.TimeService.time") + def test_locte_expired_after_lifetime(self, mock_time): + """§C.2: LocTE must be removed after itsGnLifetimeLocTE (20 s) have elapsed.""" + mock_time.return_value = self._TIMESTAMP + mib = MIB() + table = LocationTable(mib) + gn_addr = GNAddress() + pv = LongPositionVector(gn_addr=gn_addr).set_tst_in_normal_timestamp_seconds( + self._TIMESTAMP) + tsb = TSBExtendedHeader(sn=1, so_pv=pv) + table.new_tsb_packet(tsb, b"payload") + + # Advance time past the lifetime window (21 s > 20 s) + mock_time.return_value = self._TIMESTAMP + 21 + table.refresh_table() + self.assertIsNone(table.get_entry(gn_addr)) + if __name__ == "__main__": unittest.main() diff --git a/tests/flexstack/geonet/test_ls_extended_header.py b/tests/flexstack/geonet/test_ls_extended_header.py new file mode 100644 index 0000000..ed4da0d --- /dev/null +++ b/tests/flexstack/geonet/test_ls_extended_header.py @@ -0,0 +1,129 @@ +import unittest + +from flexstack.geonet.exceptions import DecodeError +from flexstack.geonet.ls_extended_header import LSRequestExtendedHeader, LSReplyExtendedHeader +from flexstack.geonet.gn_address import GNAddress, M, ST, MID +from flexstack.geonet.position_vector import LongPositionVector, ShortPositionVector + + +def _make_gn_addr() -> GNAddress: + return GNAddress(m=M.GN_MULTICAST, st=ST.CYCLIST, mid=MID(b"\xaa\xbb\xcc\xdd\x22\x33")) + + +def _make_request_addr() -> GNAddress: + return GNAddress(m=M.GN_MULTICAST, st=ST.PEDESTRIAN, mid=MID(b"\x11\x22\x33\x44\x55\x66")) + + +class TestLSRequestExtendedHeader(unittest.TestCase): + """Unit tests for LSRequestExtendedHeader (§9.8.7 Table 16).""" + + def _make_header(self) -> LSRequestExtendedHeader: + so_pv = LongPositionVector(gn_addr=_make_gn_addr(), latitude=413872756, longitude=21122668) + return LSRequestExtendedHeader(sn=7, so_pv=so_pv, request_gn_addr=_make_request_addr()) + + def test_encode_length(self): + """Encoded LS Request Extended Header must be exactly 36 bytes.""" + self.assertEqual(len(self._make_header().encode()), 36) + + def test_encode_decode_roundtrip(self): + """Encoding then decoding must return an equal header.""" + original = self._make_header() + decoded = LSRequestExtendedHeader.decode(original.encode()) + self.assertEqual(decoded, original) + + def test_decode_too_short_raises(self): + """Decoding fewer than 36 bytes must raise DecodeError.""" + with self.assertRaises(DecodeError): + LSRequestExtendedHeader.decode(b"\x00" * 35) + + def test_sn_byte_order(self): + """SN must be stored big-endian at bytes 0-1 of the encoded header.""" + header = LSRequestExtendedHeader(sn=0x1234) + encoded = header.encode() + self.assertEqual(encoded[0], 0x12) + self.assertEqual(encoded[1], 0x34) + + def test_reserved_zero_by_default(self): + """Reserved field must default to 0 and be encoded at bytes 2-3.""" + header = LSRequestExtendedHeader() + encoded = header.encode() + self.assertEqual(encoded[2], 0x00) + self.assertEqual(encoded[3], 0x00) + + def test_so_pv_occupies_bytes_4_to_27(self): + """SO PV must occupy bytes 4-27 (24 bytes).""" + encoded = self._make_header().encode() + self.assertEqual(len(encoded[4:28]), 24) + + def test_request_gn_addr_occupies_bytes_28_to_35(self): + """Request GN_ADDR must occupy bytes 28-35 (8 bytes).""" + encoded = self._make_header().encode() + self.assertEqual(len(encoded[28:36]), 8) + + def test_initialize_factory(self): + """initialize() must produce the same header as the direct constructor.""" + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + req_addr = _make_request_addr() + via_init = LSRequestExtendedHeader.initialize(42, so_pv, req_addr) + direct = LSRequestExtendedHeader(sn=42, so_pv=so_pv, request_gn_addr=req_addr) + self.assertEqual(via_init, direct) + + +class TestLSReplyExtendedHeader(unittest.TestCase): + """Unit tests for LSReplyExtendedHeader (§9.8.8 Table 17).""" + + def _make_header(self) -> LSReplyExtendedHeader: + so_pv = LongPositionVector(gn_addr=_make_gn_addr(), latitude=413872756, longitude=21122668) + de_pv = ShortPositionVector(gn_addr=_make_request_addr(), latitude=100000000, longitude=20000000) + return LSReplyExtendedHeader(sn=3, so_pv=so_pv, de_pv=de_pv) + + def test_encode_length(self): + """Encoded LS Reply Extended Header must be exactly 48 bytes.""" + self.assertEqual(len(self._make_header().encode()), 48) + + def test_encode_decode_roundtrip(self): + """Encoding then decoding must return an equal header.""" + original = self._make_header() + decoded = LSReplyExtendedHeader.decode(original.encode()) + self.assertEqual(decoded, original) + + def test_decode_too_short_raises(self): + """Decoding fewer than 48 bytes must raise DecodeError.""" + with self.assertRaises(DecodeError): + LSReplyExtendedHeader.decode(b"\x00" * 47) + + def test_sn_byte_order(self): + """SN must be stored big-endian at bytes 0-1.""" + header = LSReplyExtendedHeader(sn=0xABCD) + encoded = header.encode() + self.assertEqual(encoded[0], 0xAB) + self.assertEqual(encoded[1], 0xCD) + + def test_reserved_zero_by_default(self): + """Reserved field must default to 0 and be encoded at bytes 2-3.""" + header = LSReplyExtendedHeader() + encoded = header.encode() + self.assertEqual(encoded[2], 0x00) + self.assertEqual(encoded[3], 0x00) + + def test_so_pv_occupies_bytes_4_to_27(self): + """SO PV must occupy bytes 4-27 (24 bytes).""" + encoded = self._make_header().encode() + self.assertEqual(len(encoded[4:28]), 24) + + def test_de_pv_occupies_bytes_28_to_47(self): + """DE PV must occupy bytes 28-47 (20 bytes).""" + encoded = self._make_header().encode() + self.assertEqual(len(encoded[28:48]), 20) + + def test_initialize_factory(self): + """initialize() must produce the same header as the direct constructor.""" + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + de_pv = ShortPositionVector(gn_addr=_make_request_addr()) + via_init = LSReplyExtendedHeader.initialize(99, so_pv, de_pv) + direct = LSReplyExtendedHeader(sn=99, so_pv=so_pv, de_pv=de_pv) + self.assertEqual(via_init, direct) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/flexstack/geonet/test_router.py b/tests/flexstack/geonet/test_router.py index bfb6b08..1612a02 100644 --- a/tests/flexstack/geonet/test_router.py +++ b/tests/flexstack/geonet/test_router.py @@ -1,14 +1,20 @@ +from __future__ import annotations + import unittest -from unittest.mock import Mock +from unittest.mock import Mock, MagicMock from flexstack.geonet.router import DADException, GNForwardingAlgorithmResponse, Router -from flexstack.geonet.mib import MIB -from flexstack.geonet.position_vector import LongPositionVector -from flexstack.geonet.service_access_point import Area, CommonNH, GNDataIndication, GNDataRequest, GNDataConfirm, GeoBroadcastHST, HeaderType, ResultCode, TopoBroadcastHST, PacketTransportType -from flexstack.geonet.gn_address import ST, GNAddress -from flexstack.geonet.basic_header import BasicHeader +from flexstack.geonet.mib import MIB, AreaForwardingAlgorithm +from flexstack.geonet.position_vector import LongPositionVector, ShortPositionVector +from flexstack.geonet.service_access_point import Area, CommonNH, GNDataIndication, GNDataRequest, GNDataConfirm, GeoBroadcastHST, GeoAnycastHST, HeaderType, LocationServiceHST, ResultCode, TopoBroadcastHST, PacketTransportType, TrafficClass +from flexstack.geonet.gn_address import ST, GNAddress, M, MID +from flexstack.geonet.basic_header import BasicHeader, BasicNH from flexstack.geonet.common_header import CommonHeader from flexstack.geonet.gbc_extended_header import GBCExtendedHeader +from flexstack.geonet.guc_extended_header import GUCExtendedHeader +from flexstack.geonet.ls_extended_header import LSRequestExtendedHeader, LSReplyExtendedHeader +from flexstack.security.verify_service import VerifyService +from flexstack.security.sn_sap import SNVERIFYConfirm, ReportVerify class TestRouter(unittest.TestCase): @@ -80,7 +86,7 @@ def test_GNDataRequestSHB(self): # Assert that the result is an instance of GNDataConfirm self.assertIsInstance(result, GNDataConfirm) link_layer.send.assert_called_once_with( - b'\x11\x00\x1a\x01\x00P\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00request_data') + b'\x11\x00\x1a\x01\x00P\x00\x80\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00request_data') self.assertEqual(result.result_code, ResultCode.ACCEPTED) def test_calculate_distance(self): @@ -155,8 +161,8 @@ def test_GNForwardingAlgorithmSelection(self): self.assertEqual(result, GNForwardingAlgorithmResponse.AREA_FORWARDING) def test_GNDataforwardGBC(self): - # Given - mib = MIB() + # Given – use SIMPLE forwarding so AREA_FORWARDING sends immediately (§F.2) + mib = MIB(itsGnAreaForwardingAlgorithm=AreaForwardingAlgorithm.SIMPLE) router = Router(mib) router.link_layer = Mock() router.link_layer.send = Mock() @@ -262,10 +268,12 @@ def test_GNDataIndicateSHB(self): mib = MIB() router = Router(mib) router.location_table.new_shb_packet = Mock() + router.duplicate_address_detection = Mock() common_header = CommonHeader( hst=TopoBroadcastHST.SINGLE_HOP, # type: ignore ht=HeaderType.TSB ) + basic_header = BasicHeader(rhl=1) position_vector = LongPositionVector( latitude=421255850, longitude=27601710, @@ -275,10 +283,12 @@ def test_GNDataIndicateSHB(self): packet = position_vector.encode() + bytes(4) + b'payload' # When - result = router.gn_data_indicate_shb(packet, common_header) + result = router.gn_data_indicate_shb( + packet, common_header, basic_header) # Then router.location_table.new_shb_packet.assert_called_once() + router.duplicate_address_detection.assert_called_once() self.assertEqual(result.data, b'payload') def test_GNDataIndicateGBC(self): @@ -287,6 +297,7 @@ def test_GNDataIndicateGBC(self): router = Router(mib) router.location_table.new_gbc_packet = Mock() router.gn_geometric_function_f = Mock(return_value=0.5) + router.gn_data_forward_gbc = Mock() gbc_extended_header = GBCExtendedHeader( latitude=421255850, longitude=27601710, @@ -296,16 +307,26 @@ def test_GNDataIndicateGBC(self): ) router.duplicate_address_detection = Mock() common_header = CommonHeader() + basic_header = BasicHeader(rhl=5) packet = gbc_extended_header.encode() + b'payload' # When - result = router.gn_data_indicate_gbc(packet, common_header) + result = router.gn_data_indicate_gbc( + packet, common_header, basic_header) # Then self.assertEqual(result.data, b'payload') + # destination_area must be populated from the GBC extended header (Table 38) + self.assertIsNotNone(result.destination_area) + self.assertEqual(result.destination_area.latitude, 421255850) + # remaining_packet_lifetime and remaining_hop_limit must be set (Table 38) + self.assertIsNotNone(result.remaining_packet_lifetime) + self.assertEqual(result.remaining_hop_limit, 5) router.location_table.new_gbc_packet.assert_called_once() router.gn_geometric_function_f.assert_called_once() router.duplicate_address_detection.assert_called_once() + # forwarding must be triggered (steps 9-14, rhl=5>0) + router.gn_data_forward_gbc.assert_called_once() def test_GNDataIndicate(self): # Given @@ -347,6 +368,87 @@ def test_GNDataIndicate(self): router.gn_data_indicate_gbc.assert_not_called() router.gn_data_indicate_shb.assert_called_once() + def test_gn_security_enabled_drops_unsecured_packets(self): + """When itsGnSecurity=ENABLED, unsecured packets (NH=COMMON_HEADER) must be silently dropped.""" + from flexstack.geonet.mib import GnSecurity + mib = MIB(itsGnSecurity=GnSecurity.ENABLED) + router = Router(mib) + callback = Mock() + router.register_indication_callback(callback) + router.gn_data_indicate_shb = Mock() + + # Build an unsecured SHB packet (NH=COMMON_HEADER) + basic_header = BasicHeader(version=1) + common_header = CommonHeader( + ht=HeaderType.TSB, hst=TopoBroadcastHST.SINGLE_HOP) # type: ignore + packet = basic_header.encode_to_bytes() + common_header.encode_to_bytes() + bytes(28) + + router.gn_data_indicate(packet) + + router.gn_data_indicate_shb.assert_not_called() + callback.assert_not_called() + + def test_gn_security_disabled_accepts_unsecured_packets(self): + """When itsGnSecurity=DISABLED (default), unsecured packets must be processed normally.""" + mib = MIB() # itsGnSecurity defaults to DISABLED + router = Router(mib) + router.gn_data_indicate_shb = Mock(return_value=GNDataIndication()) + callback = Mock() + router.register_indication_callback(callback) + + basic_header = BasicHeader(version=1) + common_header = CommonHeader( + ht=HeaderType.TSB, hst=TopoBroadcastHST.SINGLE_HOP) # type: ignore + packet = basic_header.encode_to_bytes() + common_header.encode_to_bytes() + bytes(28) + + router.gn_data_indicate(packet) + + router.gn_data_indicate_shb.assert_called_once() + + def test_GNDataRequestBeacon(self): + # Given + mib = MIB() + router = Router(mib) + link_layer = Mock() + link_layer.send = Mock() + router.link_layer = link_layer + + # When + router.gn_data_request_beacon() + + # Then - packet = BasicHeader(4) + CommonHeader(8) + LPV(24) = 36 bytes + # BasicHeader: version=1, NH=COMMON_HEADER, LT=60s(0x1a), RHL=1 + # CommonHeader: NH=ANY(0x00), HT=BEACON(0x1)(+HST=0 => 0x10), TC=0, + # flags=itsGnIsMobile=MOBILE=0x80, PL=0, MHL=1, reserved=0 + link_layer.send.assert_called_once_with( + b'\x11\x00\x1a\x01\x00\x10\x00\x01\x00\x00\x01\x00' + bytes(24) + ) + + def test_GNDataIndicateBeacon(self): + # Given + mib = MIB() + router = Router(mib) + router.location_table.new_shb_packet = Mock() + router.duplicate_address_detection = Mock() + callback = Mock() + router.register_indication_callback(callback) + position_vector = LongPositionVector( + latitude=421255850, + longitude=27601710, + ) + # Beacon packet (after Basic and Common headers are stripped): only a LPV, no payload + packet = position_vector.encode() + + # When + router.gn_data_indicate_beacon(packet) + + # Then: DAD is executed (§10.3.6.3 -> §10.3.10.3 step 3) + router.duplicate_address_detection.assert_called_once() + # And location table is updated + router.location_table.new_shb_packet.assert_called_once() + # Beacons do NOT pass payload to upper entity (§10.3.6.3 exception for step 8) + callback.assert_not_called() + def test_duplicate_address_detection(self): # Given mib = MIB() @@ -374,3 +476,2306 @@ def test_refresh_ego_position_vector(self): self.assertEqual(router.ego_position_vector.longitude, 75674116) self.assertEqual(router.ego_position_vector.s, 9) self.assertEqual(router.ego_position_vector.h, 103) + + +class TestRouterSecuredPacket(unittest.TestCase): + """Tests for SECURED_PACKET reception in the GeoNetworking router.""" + + def _build_secured_packet(self, inner_bytes: bytes) -> bytes: + """Return a GN PDU with BasicNH.SECURED_PACKET + arbitrary inner payload.""" + basic_header = BasicHeader(version=1).set_nh(BasicNH.SECURED_PACKET) + return basic_header.encode_to_bytes() + inner_bytes + + def test_gn_data_indicate_secured_no_verify_service(self): + """Secured packets must be silently discarded when no VerifyService is configured.""" + mib = MIB() + router = Router(mib) # verify_service defaults to None + callback = MagicMock() + router.register_indication_callback(callback) + + packet = self._build_secured_packet(b"some_signed_data") + router.gn_data_indicate(packet) + + callback.assert_not_called() + + def test_gn_data_indicate_secured_verification_failed(self): + """Secured packets must be discarded when verification returns a failure report.""" + mib = MIB() + verify_service = MagicMock(spec=VerifyService) + verify_service.verify.return_value = SNVERIFYConfirm( + report=ReportVerify.FALSE_SIGNATURE, + certificate_id=b"", + its_aid=b"", + its_aid_length=0, + permissions=b"", + plain_message=b"", + ) + router = Router(mib, verify_service=verify_service) + callback = MagicMock() + router.register_indication_callback(callback) + + packet = self._build_secured_packet(b"bad_signed_data") + router.gn_data_indicate(packet) + + verify_service.verify.assert_called_once() + callback.assert_not_called() + + def test_gn_data_indicate_secured_verification_success(self): + """On successful verification the inner GN packet must be indicated via the callback.""" + mib = MIB() + # Build a valid inner SHB GN packet (common_header + LPV + media_dep + payload) + common_header = CommonHeader( + ht=HeaderType.TSB, + hst=TopoBroadcastHST.SINGLE_HOP, # type: ignore + ) + long_position_vector = LongPositionVector() + media_dep = b"\x00\x00\x00\x00" + upper_payload = b"secured_cam_data" + plain_message = ( + common_header.encode_to_bytes() + long_position_vector.encode() + media_dep + upper_payload + ) + + verify_service = MagicMock(spec=VerifyService) + verify_service.verify.return_value = SNVERIFYConfirm( + report=ReportVerify.SUCCESS, + certificate_id=b"\x01\x02\x03\x04\x05\x06\x07\x08", + its_aid=b"", + its_aid_length=0, + permissions=b"", + plain_message=plain_message, + ) + + router = Router(mib, verify_service=verify_service) + router.location_table.new_shb_packet = MagicMock() + router.duplicate_address_detection = MagicMock() + callback = MagicMock() + router.register_indication_callback(callback) + + packet = self._build_secured_packet(b"valid_signed_data") + router.gn_data_indicate(packet) + + verify_service.verify.assert_called_once() + callback.assert_called_once() + indication: GNDataIndication = callback.call_args[0][0] + self.assertEqual(indication.data, upper_payload) + + +class TestProcessBasicHeader(unittest.TestCase): + """Unit tests for Router.process_basic_header.""" + + def _make_packet(self, nh: BasicNH, payload: bytes = b"payload") -> bytes: + bh = BasicHeader(version=1).set_nh(nh) + return bh.encode_to_bytes() + payload + + def test_common_header_dispatches_to_process_common_header(self): + """NH=COMMON_HEADER (security DISABLED) must forward to process_common_header.""" + mib = MIB() + router = Router(mib) + router.process_common_header = Mock() + + payload = b"\x00" * 8 + b"rest" + packet = self._make_packet(BasicNH.COMMON_HEADER, payload) + router.process_basic_header(packet) + + router.process_common_header.assert_called_once() + args = router.process_common_header.call_args[0] + # remaining bytes after basic header + self.assertEqual(args[0], payload) + + def test_secured_packet_dispatches_to_process_security_header(self): + """NH=SECURED_PACKET must forward to process_security_header.""" + mib = MIB() + router = Router(mib) + router.process_security_header = Mock() + + payload = b"signed_bytes" + packet = self._make_packet(BasicNH.SECURED_PACKET, payload) + router.process_basic_header(packet) + + router.process_security_header.assert_called_once() + args = router.process_security_header.call_args[0] + self.assertEqual(args[0], payload) + + def test_security_enabled_drops_common_header(self): + """NH=COMMON_HEADER must be silently dropped when itsGnSecurity=ENABLED.""" + from flexstack.geonet.mib import GnSecurity + mib = MIB(itsGnSecurity=GnSecurity.ENABLED) + router = Router(mib) + router.process_common_header = Mock() + + packet = self._make_packet(BasicNH.COMMON_HEADER) + router.process_basic_header(packet) + + router.process_common_header.assert_not_called() + + def test_security_enabled_accepts_secured_packet(self): + """NH=SECURED_PACKET must still be forwarded when itsGnSecurity=ENABLED.""" + from flexstack.geonet.mib import GnSecurity + mib = MIB(itsGnSecurity=GnSecurity.ENABLED) + router = Router(mib) + router.process_security_header = Mock() + + packet = self._make_packet(BasicNH.SECURED_PACKET, b"signed_bytes") + router.process_basic_header(packet) + + router.process_security_header.assert_called_once() + + def test_wrong_version_raises(self): + """A basic header with a version != itsGnProtocolVersion must raise.""" + mib = MIB() + router = Router(mib) + bh = BasicHeader(version=0) # version 0 != default 1 + packet = bh.encode_to_bytes() + b"payload" + with self.assertRaises(NotImplementedError): + router.process_basic_header(packet) + + +class TestProcessCommonHeader(unittest.TestCase): + """Unit tests for Router.process_common_header.""" + + def _make_common_header_packet( + self, + ht: HeaderType, + hst, + rhl: int = 1, + mhl: int = 1, + payload: bytes = b"", + ) -> tuple[CommonHeader, BasicHeader, bytes]: + bh = BasicHeader(version=1, rhl=rhl) + ch = CommonHeader(ht=ht, hst=hst, mhl=mhl) # type: ignore + return ch, bh, ch.encode_to_bytes() + payload + + def test_shb_dispatches_and_calls_callback(self): + """TSB/SINGLE_HOP must call gn_data_indicate_shb and invoke the callback.""" + mib = MIB() + router = Router(mib) + router.gn_data_indicate_shb = Mock(return_value=GNDataIndication()) + callback = Mock() + router.register_indication_callback(callback) + + ch, bh, packet = self._make_common_header_packet( + HeaderType.TSB, TopoBroadcastHST.SINGLE_HOP, payload=bytes(28) + ) + router.process_common_header(packet, bh) + + router.gn_data_indicate_shb.assert_called_once() + callback.assert_called_once() + + def test_gbc_dispatches_and_calls_callback(self): + """GEOBROADCAST must call gn_data_indicate_gbc and invoke the callback.""" + mib = MIB() + router = Router(mib) + router.gn_data_indicate_gbc = Mock(return_value=GNDataIndication()) + callback = Mock() + router.register_indication_callback(callback) + + ch, bh, packet = self._make_common_header_packet( + HeaderType.GEOBROADCAST, GeoBroadcastHST.GEOBROADCAST_CIRCLE, payload=bytes( + 44) + ) + router.process_common_header(packet, bh) + + router.gn_data_indicate_gbc.assert_called_once() + callback.assert_called_once() + + def test_beacon_dispatches_and_does_not_call_callback(self): + """BEACON must call gn_data_indicate_beacon and must NOT invoke the callback.""" + mib = MIB() + router = Router(mib) + router.gn_data_indicate_beacon = Mock() + callback = Mock() + router.register_indication_callback(callback) + + ch, bh, packet = self._make_common_header_packet( + HeaderType.BEACON, TopoBroadcastHST.SINGLE_HOP, payload=bytes(24) + ) + router.process_common_header(packet, bh) + + router.gn_data_indicate_beacon.assert_called_once() + callback.assert_not_called() + + def test_hop_limit_exceeded_raises(self): + """RHL > MHL must raise DecapError.""" + from flexstack.geonet.exceptions import DecapError + mib = MIB() + router = Router(mib) + + ch, bh, packet = self._make_common_header_packet( + HeaderType.TSB, TopoBroadcastHST.SINGLE_HOP, rhl=5, mhl=3 + ) + with self.assertRaises(DecapError): + router.process_common_header(packet, bh) + + +class TestProcessSecurityHeader(unittest.TestCase): + """Unit tests for Router.process_security_header.""" + + def _make_basic_header(self) -> BasicHeader: + return BasicHeader(version=1).set_nh(BasicNH.SECURED_PACKET) + + def test_no_verify_service_discards(self): + """Without a VerifyService configured, the packet must be silently discarded.""" + mib = MIB() + router = Router(mib) # verify_service=None + router.process_common_header = Mock() + + router.process_security_header( + b"signed_data", self._make_basic_header()) + + router.process_common_header.assert_not_called() + + def test_verification_failure_discards(self): + """A failed verification result must not dispatch further.""" + mib = MIB() + verify_service = MagicMock(spec=VerifyService) + verify_service.verify.return_value = SNVERIFYConfirm( + report=ReportVerify.FALSE_SIGNATURE, + certificate_id=b"", + its_aid=b"", + its_aid_length=0, + permissions=b"", + plain_message=b"", + ) + router = Router(mib, verify_service=verify_service) + router.process_common_header = Mock() + + router.process_security_header(b"bad_data", self._make_basic_header()) + + verify_service.verify.assert_called_once() + router.process_common_header.assert_not_called() + + def test_verification_success_dispatches_to_process_common_header(self): + """Successful verification must hand the plain_message to process_common_header.""" + mib = MIB() + plain_message = bytes(8) + b"inner_payload" + verify_service = MagicMock(spec=VerifyService) + verify_service.verify.return_value = SNVERIFYConfirm( + report=ReportVerify.SUCCESS, + certificate_id=b"\x01\x02\x03\x04\x05\x06\x07\x08", + its_aid=b"", + its_aid_length=0, + permissions=b"", + plain_message=plain_message, + ) + router = Router(mib, verify_service=verify_service) + router.process_common_header = Mock() + + bh = self._make_basic_header() + router.process_security_header(b"valid_data", bh) + + verify_service.verify.assert_called_once() + router.process_common_header.assert_called_once_with( + plain_message, bh.set_nh(BasicNH.COMMON_HEADER)) + + +class TestGNDataIndicateTSB(unittest.TestCase): + """Unit tests for Router.gn_data_indicate_tsb (\u00a710.3.9.3).""" + + def _build_tsb_payload(self, rhl: int = 3) -> tuple: + """Return (basic_header, common_header, tsb_ext_header, upper_payload, raw_packet_after_common_header).""" + from flexstack.geonet.tsb_extended_header import TSBExtendedHeader + bh = BasicHeader(version=1, rhl=rhl) + ch = CommonHeader(ht=HeaderType.TSB, + hst=TopoBroadcastHST.MULTI_HOP) # type: ignore + tsb = TSBExtendedHeader(sn=1, so_pv=LongPositionVector()) + payload = b"tsb_upper_payload" + packet_after_common = tsb.encode() + payload + return bh, ch, tsb, payload, packet_after_common + + def test_delivery_to_upper_entity(self): + """Step 7: payload and correct indication fields must reach the callback.""" + mib = MIB() + router = Router(mib) + router.location_table.new_tsb_packet = Mock() + router.duplicate_address_detection = Mock() + callback = Mock() + router.register_indication_callback(callback) + + bh, ch, tsb, payload, raw = self._build_tsb_payload() + indication = router.gn_data_indicate_tsb(raw, ch, bh) + + self.assertEqual(indication.data, payload) + self.assertEqual(indication.packet_transport_type.header_subtype, + TopoBroadcastHST.MULTI_HOP) + router.duplicate_address_detection.assert_called_once() + router.location_table.new_tsb_packet.assert_called_once() + + def test_forwarded_when_rhl_gt_1(self): + """Steps 9+12: packet must be forwarded via LL when RHL > 1.""" + mib = MIB() + router = Router(mib) + router.location_table.new_tsb_packet = Mock() + router.duplicate_address_detection = Mock() + link_layer = Mock() + router.link_layer = link_layer + + bh, ch, tsb, payload, raw = self._build_tsb_payload(rhl=3) + router.gn_data_indicate_tsb(raw, ch, bh) + + link_layer.send.assert_called_once() + # RHL in forwarded packet must be decremented to 2 + forwarded = link_layer.send.call_args[0][0] + forwarded_bh = BasicHeader.decode_from_bytes(forwarded[0:4]) + self.assertEqual(forwarded_bh.rhl, 2) + + def test_not_forwarded_when_rhl_equals_1(self): + """Step 9a: packet must NOT be forwarded when RHL decrements to 0.""" + mib = MIB() + router = Router(mib) + router.location_table.new_tsb_packet = Mock() + router.duplicate_address_detection = Mock() + link_layer = Mock() + router.link_layer = link_layer + + bh, ch, tsb, payload, raw = self._build_tsb_payload(rhl=1) + router.gn_data_indicate_tsb(raw, ch, bh) + + link_layer.send.assert_not_called() + + def test_indication_via_gn_data_indicate(self): + """gn_data_indicate must dispatch MULTI_HOP TSB and invoke the callback.""" + mib = MIB() + router = Router(mib) + router.gn_data_indicate_tsb = Mock(return_value=GNDataIndication()) + callback = Mock() + router.register_indication_callback(callback) + + from flexstack.geonet.tsb_extended_header import TSBExtendedHeader + bh = BasicHeader(version=1) + ch = CommonHeader(ht=HeaderType.TSB, + hst=TopoBroadcastHST.MULTI_HOP) # type: ignore + tsb = TSBExtendedHeader(sn=1) + packet = bh.encode_to_bytes() + ch.encode_to_bytes() + tsb.encode() + b"data" + + router.gn_data_indicate(packet) + + router.gn_data_indicate_tsb.assert_called_once() + callback.assert_called_once() + + def test_remaining_hop_limit_in_indication(self): + """Table 32: remaining_hop_limit must equal the original RHL value.""" + mib = MIB() + router = Router(mib) + router.location_table.new_tsb_packet = Mock() + router.duplicate_address_detection = Mock() + + bh, ch, tsb, payload, raw = self._build_tsb_payload(rhl=5) + indication = router.gn_data_indicate_tsb(raw, ch, bh) + + self.assertEqual(indication.remaining_hop_limit, 5) + + +def _make_gn_addr() -> GNAddress: + return GNAddress(m=M.GN_MULTICAST, st=ST.CYCLIST, mid=MID(b"\xaa\xbb\xcc\xdd\x22\x33")) + + +def _make_guc_packet(so_pv: LongPositionVector, de_addr: GNAddress, + rhl: int = 0, payload: bytes = b"guc_payload"): + """Build a complete GUC wire packet (BH + CH + ext + payload). rhl=0 passes the mhl check.""" + de_pv = ShortPositionVector(gn_addr=de_addr) + bh = BasicHeader(version=1, rhl=rhl) + ch = CommonHeader(ht=HeaderType.GEOUNICAST) # type: ignore + guc = GUCExtendedHeader(sn=1, so_pv=so_pv, de_pv=de_pv) + return bh.encode_to_bytes() + ch.encode_to_bytes() + guc.encode() + payload + + +class TestGNDataRequestGUC(unittest.TestCase): + """Unit tests for Router.gn_data_request_guc (§10.3.8.2).""" + + def test_no_locte_returns_accepted(self): + """When there is no LocTE for the destination the stub must return ACCEPTED.""" + mib = MIB() + router = Router(mib) + dest_addr = _make_gn_addr() + request = GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOUNICAST), + destination=dest_addr, + data=b"hello", + ) + confirm = router.gn_data_request(request) + self.assertEqual(confirm.result_code, ResultCode.ACCEPTED) + + def test_sends_packet_when_locte_exists(self): + """When a LocTE exists for the destination a GUC packet must be sent to the LL.""" + mib = MIB() + router = Router(mib) + dest_addr = _make_gn_addr() + # Inject a fake LocTE for the destination + de_lpv = LongPositionVector( + gn_addr=dest_addr, latitude=100, longitude=200) + fake_entry = Mock() + fake_entry.position_vector = de_lpv + router.location_table.get_entry = Mock(return_value=fake_entry) + router.location_table.get_neighbours = Mock(return_value=[fake_entry]) + + link_layer = Mock() + router.link_layer = link_layer + + request = GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOUNICAST), + destination=dest_addr, + data=b"hello", + ) + confirm = router.gn_data_request(request) + + self.assertEqual(confirm.result_code, ResultCode.ACCEPTED) + link_layer.send.assert_called_once() + + def test_dispatched_via_gn_data_request(self): + """gn_data_request must dispatch GEOUNICAST to gn_data_request_guc.""" + mib = MIB() + router = Router(mib) + router.gn_data_request_guc = Mock( + return_value=GNDataConfirm(result_code=ResultCode.ACCEPTED)) + dest_addr = _make_gn_addr() + request = GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOUNICAST), + destination=dest_addr, + data=b"hello", + ) + router.gn_data_request(request) + router.gn_data_request_guc.assert_called_once_with(request) + + +class TestGNDataIndicateGUC(unittest.TestCase): + """Unit tests for Router.gn_data_indicate_guc (§10.3.8.3 / §10.3.8.4).""" + + def _build_guc_raw(self, so_pv: LongPositionVector, de_addr: GNAddress, + rhl: int = 3, payload: bytes = b"guc_payload"): + """Return (bh, ch, guc_ext, payload, raw_after_common).""" + de_pv = ShortPositionVector(gn_addr=de_addr) + bh = BasicHeader(version=1, rhl=rhl) + ch = CommonHeader(ht=HeaderType.GEOUNICAST) # type: ignore + guc = GUCExtendedHeader(sn=1, so_pv=so_pv, de_pv=de_pv) + raw = guc.encode() + payload + return bh, ch, guc, payload, raw + + def test_destination_delivery(self): + """§10.3.8.4: when DE == self, payload must be delivered via GN-DATA.indication.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_guc_packet = Mock() + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + de_addr = mib.itsGnLocalGnAddr # self is the destination + bh, ch, guc, payload, raw = self._build_guc_raw(so_pv, de_addr) + + indication = router.gn_data_indicate_guc(raw, ch, bh) + + self.assertEqual(indication.data, payload) + self.assertEqual( + indication.packet_transport_type.header_type, HeaderType.GEOUNICAST) + router.duplicate_address_detection.assert_called_once() + router.location_table.new_guc_packet.assert_called_once() + + def test_destination_not_forwarded(self): + """§10.3.8.4: destination node must NOT forward the packet.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_guc_packet = Mock() + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + de_addr = mib.itsGnLocalGnAddr + bh, ch, guc, payload, raw = self._build_guc_raw(so_pv, de_addr) + router.gn_data_indicate_guc(raw, ch, bh) + + link_layer.send.assert_not_called() + + def test_forwarder_forwards_with_decremented_rhl(self): + """§10.3.8.3: forwarder must forward packet with RHL decremented by 1.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_guc_packet = Mock() + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + de_addr = _make_gn_addr() # different from self + bh, ch, guc, payload, raw = self._build_guc_raw(so_pv, de_addr, rhl=3) + router.gn_data_indicate_guc(raw, ch, bh) + + link_layer.send.assert_called_once() + forwarded = link_layer.send.call_args[0][0] + fwd_bh = BasicHeader.decode_from_bytes(forwarded[0:4]) + self.assertEqual(fwd_bh.rhl, 2) + + def test_forwarder_not_forwarded_when_rhl_1(self): + """Step 9: packet must NOT be forwarded when RHL decrements to 0.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_guc_packet = Mock() + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + de_addr = _make_gn_addr() + bh, ch, guc, payload, raw = self._build_guc_raw(so_pv, de_addr, rhl=1) + router.gn_data_indicate_guc(raw, ch, bh) + + link_layer.send.assert_not_called() + + def test_dispatched_via_gn_data_indicate(self): + """gn_data_indicate must dispatch GEOUNICAST to gn_data_indicate_guc and invoke callback.""" + mib = MIB() + router = Router(mib) + router.gn_data_indicate_guc = Mock(return_value=GNDataIndication()) + callback = Mock() + router.register_indication_callback(callback) + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + de_addr = mib.itsGnLocalGnAddr + packet = _make_guc_packet(so_pv, de_addr) + router.gn_data_indicate(packet) + + router.gn_data_indicate_guc.assert_called_once() + callback.assert_called_once() + + +# --------------------------------------------------------------------------- +# GAC helpers +# --------------------------------------------------------------------------- + +def _make_gac_area(inside: bool) -> Area: + """ + Return an Area whose centre is at lat=0, lon=0, radius a=100 m (circle). + The ego position vector in Router defaults to lat=0, lon=0, so: + inside=True → ego is at the centre → F ≥ 0 + inside=False → ego is at lat=10000000 (≈1°) → F < 0 + """ + return Area(latitude=0, longitude=0, a=100, b=100, angle=0) + + +def _make_gac_packet(so_pv: LongPositionVector, area: Area, + hst: GeoAnycastHST = GeoAnycastHST.GEOANYCAST_CIRCLE, + rhl: int = 0, payload: bytes = b"gac_payload") -> bytes: + """Build a complete GAC wire packet (BH + CH + ext + payload). rhl=0 passes the mhl check.""" + bh = BasicHeader(version=1, rhl=rhl) + ch = CommonHeader(ht=HeaderType.GEOANYCAST, hst=hst) # type: ignore + gbc = GBCExtendedHeader( + sn=1, so_pv=so_pv, + latitude=area.latitude, longitude=area.longitude, + a=area.a, b=area.b, angle=area.angle, + ) + return bh.encode_to_bytes() + ch.encode_to_bytes() + gbc.encode() + payload + + +class TestGNDataRequestGAC(unittest.TestCase): + """Unit tests for Router.gn_data_request_gac (§10.3.12.2).""" + + def _make_request(self, inside: bool = True) -> GNDataRequest: + area = _make_gac_area(inside) + return GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOANYCAST, + header_subtype=GeoAnycastHST.GEOANYCAST_CIRCLE, + ), + area=area, + data=b"hello", + ) + + def test_dispatched_via_gn_data_request(self): + """gn_data_request must dispatch GEOANYCAST to gn_data_request_gac.""" + mib = MIB() + router = Router(mib) + router.gn_data_request_gac = Mock( + return_value=GNDataConfirm(result_code=ResultCode.ACCEPTED)) + request = self._make_request(inside=True) + router.gn_data_request(request) + router.gn_data_request_gac.assert_called_once_with(request) + + def test_sends_packet_when_inside_area(self): + """Inside area: source must send the GAC packet to the LL.""" + mib = MIB() + router = Router(mib) + # Ego is at (0,0), area centred at (0,0) radius 100 m → inside + router.ego_position_vector = LongPositionVector( + latitude=0, longitude=0) + link_layer = Mock() + router.link_layer = link_layer + + confirm = router.gn_data_request_gac(self._make_request(inside=True)) + + self.assertEqual(confirm.result_code, ResultCode.ACCEPTED) + link_layer.send.assert_called_once() + + def test_sends_packet_when_outside_area(self): + """Outside area: GF executes; no neighbours + SCF=False → BCAST fallback → LL send.""" + mib = MIB() + router = Router(mib) + # Move ego far outside the area + router.ego_position_vector = LongPositionVector( + latitude=100000000, longitude=100000000) + link_layer = Mock() + router.link_layer = link_layer + + confirm = router.gn_data_request_gac(self._make_request(inside=False)) + + # §E.2: local optimum with SCF=False → BCAST fallback → packet is sent + self.assertEqual(confirm.result_code, ResultCode.ACCEPTED) + link_layer.send.assert_called_once() + + +class TestGNDataIndicateGAC(unittest.TestCase): + """Unit tests for Router.gn_data_indicate_gac (§10.3.12.3).""" + + def _setup_router_inside(self): + """Router ego placed inside the GAC area (lat=0, lon=0, area centred at 0,0 r=100m).""" + mib = MIB() + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=0, longitude=0) + router.duplicate_address_detection = Mock() + router.location_table.new_gac_packet = Mock() + return router + + def _setup_router_outside(self): + """Router ego placed outside the GAC area.""" + mib = MIB() + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=100000000, longitude=100000000) + router.duplicate_address_detection = Mock() + router.location_table.new_gac_packet = Mock() + return router + + def _base_gac_raw(self, so_pv, area, rhl=3, payload=b"gac_payload"): + """Return (bh, ch, gbc_ext, payload, raw_after_common).""" + bh = BasicHeader(version=1, rhl=rhl) + ch = CommonHeader(ht=HeaderType.GEOANYCAST, + hst=GeoAnycastHST.GEOANYCAST_CIRCLE) # type: ignore + gbc = GBCExtendedHeader( + sn=1, so_pv=so_pv, + latitude=area.latitude, longitude=area.longitude, + a=area.a, b=area.b, angle=area.angle, + ) + raw = gbc.encode() + payload + return bh, ch, gbc, payload, raw + + def test_inside_area_delivers_to_upper_entity(self): + """§10.3.12.3 step 9a: inside area → payload delivered via GN-DATA.indication.""" + router = self._setup_router_inside() + area = _make_gac_area(inside=True) + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + bh, ch, gbc, payload, raw = self._base_gac_raw(so_pv, area) + + indication = router.gn_data_indicate_gac(raw, ch, bh) + + self.assertEqual(indication.data, payload) + self.assertEqual( + indication.packet_transport_type.header_type, HeaderType.GEOANYCAST) + router.duplicate_address_detection.assert_called_once() + router.location_table.new_gac_packet.assert_called_once() + + def test_inside_area_does_not_forward(self): + """§10.3.12.3 step 9b: inside area → packet MUST NOT be forwarded (omit further steps).""" + router = self._setup_router_inside() + area = _make_gac_area(inside=True) + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + link_layer = Mock() + router.link_layer = link_layer + bh, ch, gbc, payload, raw = self._base_gac_raw(so_pv, area, rhl=5) + + router.gn_data_indicate_gac(raw, ch, bh) + + link_layer.send.assert_not_called() + + def test_outside_area_forwards_with_decremented_rhl(self): + """§10.3.12.3 step 10: outside area → packet forwarded with RHL decremented by 1.""" + router = self._setup_router_outside() + # area at (0,0); ego is far away → outside + area = _make_gac_area(inside=True) + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + link_layer = Mock() + router.link_layer = link_layer + bh, ch, gbc, payload, raw = self._base_gac_raw(so_pv, area, rhl=3) + + router.gn_data_indicate_gac(raw, ch, bh) + + link_layer.send.assert_called_once() + forwarded = link_layer.send.call_args[0][0] + fwd_bh = BasicHeader.decode_from_bytes(forwarded[0:4]) + self.assertEqual(fwd_bh.rhl, 2) + + def test_outside_area_does_not_deliver_to_upper_entity(self): + """§10.3.12.3 NOTE 2: outside area → payload must NOT reach upper layer.""" + router = self._setup_router_outside() + area = _make_gac_area(inside=True) + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + bh, ch, gbc, payload, raw = self._base_gac_raw(so_pv, area, rhl=3) + + indication = router.gn_data_indicate_gac(raw, ch, bh) + + # Outside → returns None (no upper-layer delivery) + self.assertIsNone(indication) + + def test_outside_area_rhl_1_discards(self): + """§10.3.12.3 step 10a(i): outside area with RHL=1 → discard (RHL decrements to 0).""" + router = self._setup_router_outside() + area = _make_gac_area(inside=True) + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + link_layer = Mock() + router.link_layer = link_layer + bh, ch, gbc, payload, raw = self._base_gac_raw(so_pv, area, rhl=1) + + router.gn_data_indicate_gac(raw, ch, bh) + + link_layer.send.assert_not_called() + + def test_dispatched_via_gn_data_indicate(self): + """gn_data_indicate must dispatch GEOANYCAST to gn_data_indicate_gac and invoke callback.""" + mib = MIB() + router = Router(mib) + router.gn_data_indicate_gac = Mock(return_value=GNDataIndication()) + callback = Mock() + router.register_indication_callback(callback) + + area = _make_gac_area(inside=True) + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + packet = _make_gac_packet(so_pv, area) + router.gn_data_indicate(packet) + + router.gn_data_indicate_gac.assert_called_once() + callback.assert_called_once() + + +# --------------------------------------------------------------------------- +# Helpers for LS tests +# --------------------------------------------------------------------------- + +def _make_ls_request_packet( + so_pv: LongPositionVector, + request_gn_addr: GNAddress, + rhl: int = 3, + payload: bytes = b"", +) -> bytes: + """Build a complete LS Request wire packet (BH + CH + ext).""" + bh = BasicHeader(version=1, rhl=rhl) + ch = CommonHeader(ht=HeaderType.LS, + hst=LocationServiceHST.LS_REQUEST) # type: ignore + ls_req = LSRequestExtendedHeader( + sn=1, so_pv=so_pv, request_gn_addr=request_gn_addr) + return bh.encode_to_bytes() + ch.encode_to_bytes() + ls_req.encode() + payload + + +def _make_ls_reply_packet( + so_pv: LongPositionVector, + de_pv: ShortPositionVector, + rhl: int = 3, + payload: bytes = b"", +) -> bytes: + """Build a complete LS Reply wire packet (BH + CH + ext).""" + bh = BasicHeader(version=1, rhl=rhl) + ch = CommonHeader(ht=HeaderType.LS, + hst=LocationServiceHST.LS_REPLY) # type: ignore + ls_reply = LSReplyExtendedHeader(sn=2, so_pv=so_pv, de_pv=de_pv) + return bh.encode_to_bytes() + ch.encode_to_bytes() + ls_reply.encode() + payload + + +class TestGNLSRequest(unittest.TestCase): + """Unit tests for Router.gn_ls_request and Router._ls_retransmit (§10.3.7.1.2/3).""" + + def test_sends_ls_request_packet(self): + """gn_ls_request must broadcast an LS Request packet via the link layer.""" + mib = MIB() + router = Router(mib) + link_layer = Mock() + router.link_layer = link_layer + sought = _make_gn_addr() + + router.gn_ls_request(sought) + + link_layer.send.assert_called_once() + raw = link_layer.send.call_args[0][0] + # Verify it is an LS Request packet + ch = CommonHeader.decode_from_bytes(raw[4:12]) + self.assertEqual(ch.ht, HeaderType.LS) + self.assertEqual(ch.hst, LocationServiceHST.LS_REQUEST) + + def test_sets_ls_pending(self): + """gn_ls_request must set ls_pending=TRUE on the destination LocTE.""" + mib = MIB() + router = Router(mib) + sought = _make_gn_addr() + + router.gn_ls_request(sought) + + entry = router.location_table.get_entry(sought) + self.assertIsNotNone(entry) + self.assertTrue(entry.ls_pending) + + def test_buffers_request_when_ls_pending(self): + """If ls_pending is already TRUE, gn_ls_request must buffer the new request and NOT resend.""" + mib = MIB() + router = Router(mib) + link_layer = Mock() + router.link_layer = link_layer + sought = _make_gn_addr() + + # First call starts LS + router.gn_ls_request(sought) + send_count = link_layer.send.call_count + + # Build a dummy buffered request + extra_req = GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOUNICAST), + destination=sought, + data=b"extra", + ) + # Second call while LS is pending: must NOT send another LS Request + router.gn_ls_request(sought, extra_req) + self.assertEqual(link_layer.send.call_count, send_count) + # Buffer must contain the extra request + self.assertIn(extra_req, router._ls_packet_buffers.get(sought, [])) + + def test_retransmit_resends_and_increments_counter(self): + """_ls_retransmit must resend the LS Request and increment the counter.""" + mib = MIB() + router = Router(mib) + link_layer = Mock() + router.link_layer = link_layer + sought = _make_gn_addr() + + router.gn_ls_request(sought) + initial_count = link_layer.send.call_count + + # Fire retransmit directly (avoids actual timer sleep) + router._ls_retransmit(sought) + + self.assertEqual(link_layer.send.call_count, initial_count + 1) + self.assertEqual(router._ls_retransmit_counters.get(sought), 1) + + def test_retransmit_gives_up_at_max_retrans(self): + """_ls_retransmit must stop and set ls_pending=FALSE when counter reaches maximum.""" + mib = MIB(itsGnLocationServiceMaxRetrans=2) + router = Router(mib) + link_layer = Mock() + router.link_layer = link_layer + sought = _make_gn_addr() + + router.gn_ls_request(sought) + # Fire retransmit at the limit + router._ls_retransmit_counters[sought] = mib.itsGnLocationServiceMaxRetrans + count_before = link_layer.send.call_count + router._ls_retransmit(sought) + + # No extra send should have happened + self.assertEqual(link_layer.send.call_count, count_before) + entry = router.location_table.get_entry(sought) + self.assertIsNotNone(entry) + self.assertFalse(entry.ls_pending) + + def test_no_locte_triggers_ls_via_gn_data_request_guc(self): + """gn_data_request_guc must trigger gn_ls_request when no LocTE for destination exists.""" + mib = MIB() + router = Router(mib) + link_layer = Mock() + router.link_layer = link_layer + dest = _make_gn_addr() + request = GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOUNICAST), + destination=dest, + data=b"hello", + ) + confirm = router.gn_data_request(request) + + # Must return ACCEPTED and have sent an LS Request + self.assertEqual(confirm.result_code, ResultCode.ACCEPTED) + link_layer.send.assert_called_once() + raw = link_layer.send.call_args[0][0] + ch = CommonHeader.decode_from_bytes(raw[4:12]) + self.assertEqual(ch.ht, HeaderType.LS) + + +class TestGNDataIndicateLSRequest(unittest.TestCase): + """Unit tests for Router.gn_data_indicate_ls_request (§10.3.7.2 / §10.3.7.3).""" + + def _build_packet_body(self, so_pv, request_gn_addr, payload=b""): + """Return the bytes AFTER the common header (ext header + payload).""" + ls_req = LSRequestExtendedHeader( + sn=1, so_pv=so_pv, request_gn_addr=request_gn_addr) + return ls_req.encode() + payload + + def test_forwarder_forwards_with_decremented_rhl(self): + """§10.3.7.2: forwarder must re-broadcast LS Request with RHL decremented by 1.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_ls_request_packet = Mock() + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + # request_gn_addr is NOT self → forwarder role + request_gn_addr = _make_gn_addr() + bh = BasicHeader(version=1, rhl=3) + ch = CommonHeader(ht=HeaderType.LS, + hst=LocationServiceHST.LS_REQUEST) # type: ignore + body = self._build_packet_body(so_pv, request_gn_addr) + + router.gn_data_indicate_ls_request(body, ch, bh) + + link_layer.send.assert_called_once() + forwarded = link_layer.send.call_args[0][0] + fwd_bh = BasicHeader.decode_from_bytes(forwarded[0:4]) + self.assertEqual(fwd_bh.rhl, 2) + + def test_forwarder_rhl_1_discards(self): + """§10.3.7.2: forwarder must discard packet when RHL decrements to 0.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_ls_request_packet = Mock() + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + request_gn_addr = _make_gn_addr() + bh = BasicHeader(version=1, rhl=1) + ch = CommonHeader(ht=HeaderType.LS, + hst=LocationServiceHST.LS_REQUEST) # type: ignore + body = self._build_packet_body(so_pv, request_gn_addr) + + router.gn_data_indicate_ls_request(body, ch, bh) + + link_layer.send.assert_not_called() + + def test_destination_sends_ls_reply(self): + """§10.3.7.3: destination (Request_GN_ADDR == own) must send an LS Reply.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_ls_request_packet = Mock() + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + # Inject SO LocTE so the reply can find DE PV + fake_so_entry = Mock() + fake_so_entry.position_vector = so_pv + router.location_table.get_entry = Mock(return_value=fake_so_entry) + + # request_gn_addr IS self + request_gn_addr = mib.itsGnLocalGnAddr + bh = BasicHeader(version=1, rhl=3) + ch = CommonHeader(ht=HeaderType.LS, + hst=LocationServiceHST.LS_REQUEST) # type: ignore + body = self._build_packet_body(so_pv, request_gn_addr) + + router.gn_data_indicate_ls_request(body, ch, bh) + + link_layer.send.assert_called_once() + reply_raw = link_layer.send.call_args[0][0] + reply_ch = CommonHeader.decode_from_bytes(reply_raw[4:12]) + self.assertEqual(reply_ch.ht, HeaderType.LS) + self.assertEqual(reply_ch.hst, LocationServiceHST.LS_REPLY) + + def test_destination_does_not_forward(self): + """§10.3.7.3: after sending reply the destination must NOT re-broadcast the request.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_ls_request_packet = Mock() + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + fake_so_entry = Mock() + fake_so_entry.position_vector = so_pv + router.location_table.get_entry = Mock(return_value=fake_so_entry) + + request_gn_addr = mib.itsGnLocalGnAddr + bh = BasicHeader(version=1, rhl=3) + ch = CommonHeader(ht=HeaderType.LS, + hst=LocationServiceHST.LS_REQUEST) # type: ignore + body = self._build_packet_body(so_pv, request_gn_addr) + + router.gn_data_indicate_ls_request(body, ch, bh) + + # Exactly ONE send (the LS Reply) – not forwarded + link_layer.send.assert_called_once() + + def test_dispatched_via_gn_data_indicate(self): + """process_common_header must dispatch HT=LS to gn_data_indicate_ls.""" + mib = MIB() + router = Router(mib) + router.gn_data_indicate_ls = Mock() + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + raw = _make_ls_request_packet(so_pv, _make_gn_addr(), rhl=0) + router.gn_data_indicate(raw) + + router.gn_data_indicate_ls.assert_called_once() + + +class TestGNDataIndicateLSReply(unittest.TestCase): + """Unit tests for Router.gn_data_indicate_ls_reply (§10.3.7.1.4 / §10.3.7.2).""" + + def _build_packet_body(self, so_pv, de_pv, payload=b""): + """Return the bytes AFTER the common header (ext header + payload).""" + ls_reply = LSReplyExtendedHeader(sn=2, so_pv=so_pv, de_pv=de_pv) + return ls_reply.encode() + payload + + def test_source_sets_ls_pending_false(self): + """§10.3.7.1.4: source receiving reply must set ls_pending=FALSE.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_ls_reply_packet = Mock() + + so_addr = _make_gn_addr() + so_pv = LongPositionVector(gn_addr=so_addr) + de_pv = ShortPositionVector(gn_addr=mib.itsGnLocalGnAddr) + + # Simulate an ongoing LS for so_addr + entry = router.location_table.ensure_entry(so_addr) + entry.ls_pending = True + router._ls_packet_buffers[so_addr] = [] + router._ls_retransmit_counters[so_addr] = 1 + + bh = BasicHeader(version=1, rhl=3) + ch = CommonHeader(ht=HeaderType.LS, + hst=LocationServiceHST.LS_REPLY) # type: ignore + body = self._build_packet_body(so_pv, de_pv) + + router.gn_data_indicate_ls_reply(body, ch, bh) + + self.assertFalse(entry.ls_pending) + + def test_source_flushes_buffered_requests(self): + """§10.3.7.1.4 step 7: source must re-process buffered GNDataRequests after LS completes.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_ls_reply_packet = Mock() + link_layer = Mock() + router.link_layer = link_layer + + so_addr = _make_gn_addr() + so_pv = LongPositionVector(gn_addr=so_addr) + de_pv = ShortPositionVector(gn_addr=mib.itsGnLocalGnAddr) + + # Prepare a buffered request for so_addr + buffered_req = GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOUNICAST), + destination=so_addr, + data=b"buffered", + ) + entry = router.location_table.ensure_entry(so_addr) + entry.ls_pending = True + router._ls_packet_buffers[so_addr] = [buffered_req] + router._ls_retransmit_counters[so_addr] = 0 + + # Inject a LocTE with a real position vector so gn_data_request_guc can build GUC packet + so_pv_full = LongPositionVector( + gn_addr=so_addr, latitude=100, longitude=200) + fake_entry = Mock() + fake_entry.position_vector = so_pv_full + router.location_table.get_entry = Mock(return_value=fake_entry) + router.location_table.get_neighbours = Mock(return_value=[fake_entry]) + + bh = BasicHeader(version=1, rhl=3) + ch = CommonHeader(ht=HeaderType.LS, + hst=LocationServiceHST.LS_REPLY) # type: ignore + body = self._build_packet_body(so_pv, de_pv) + + router.gn_data_indicate_ls_reply(body, ch, bh) + + # link_layer.send is called once for the flushed GUC packet + link_layer.send.assert_called_once() + flushed_raw = link_layer.send.call_args[0][0] + flushed_ch = CommonHeader.decode_from_bytes(flushed_raw[4:12]) + self.assertEqual(flushed_ch.ht, HeaderType.GEOUNICAST) + + def test_forwarder_forwards_with_decremented_rhl(self): + """§10.3.7.2 forwarder: LS Reply must be forwarded with RHL decremented by 1.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_ls_reply_packet = Mock() + link_layer = Mock() + router.link_layer = link_layer + + so_addr = _make_gn_addr() + so_pv = LongPositionVector(gn_addr=so_addr) + # DE is NOT self → forwarder role + de_addr = _make_gn_addr() + de_pv = ShortPositionVector(gn_addr=de_addr) + + router.location_table.get_entry = Mock( + return_value=None) # DE not a neighbour + + bh = BasicHeader(version=1, rhl=3) + ch = CommonHeader(ht=HeaderType.LS, + hst=LocationServiceHST.LS_REPLY) # type: ignore + body = self._build_packet_body(so_pv, de_pv) + + router.gn_data_indicate_ls_reply(body, ch, bh) + + link_layer.send.assert_called_once() + forwarded = link_layer.send.call_args[0][0] + fwd_bh = BasicHeader.decode_from_bytes(forwarded[0:4]) + self.assertEqual(fwd_bh.rhl, 2) + + def test_forwarder_rhl_1_discards(self): + """§10.3.7.2 forwarder: LS Reply must be discarded when RHL decrements to 0.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_ls_reply_packet = Mock() + link_layer = Mock() + router.link_layer = link_layer + + so_addr = _make_gn_addr() + so_pv = LongPositionVector(gn_addr=so_addr) + de_addr = _make_gn_addr() + de_pv = ShortPositionVector(gn_addr=de_addr) + router.location_table.get_entry = Mock(return_value=None) + + bh = BasicHeader(version=1, rhl=1) + ch = CommonHeader(ht=HeaderType.LS, + hst=LocationServiceHST.LS_REPLY) # type: ignore + body = self._build_packet_body(so_pv, de_pv) + + router.gn_data_indicate_ls_reply(body, ch, bh) + + link_layer.send.assert_not_called() + + def test_dispatched_via_gn_data_indicate(self): + """process_common_header must dispatch HT=LS/HST=LS_REPLY to gn_data_indicate_ls.""" + mib = MIB() + router = Router(mib) + router.gn_data_indicate_ls = Mock() + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + de_pv = ShortPositionVector(gn_addr=_make_gn_addr()) + raw = _make_ls_reply_packet(so_pv, de_pv, rhl=0) + router.gn_data_indicate(raw) + + router.gn_data_indicate_ls.assert_called_once() + + +# --------------------------------------------------------------------------- +# Annex B – Packet data rate and geographical area size control +# --------------------------------------------------------------------------- + +class TestAnnexB(unittest.TestCase): + """Unit tests verifying compliance with ETSI EN 302 636-4-1 V1.4.1 Annex B.""" + + # ── §B.3 _compute_area_size_m2 ────────────────────────────────────────── + + def test_compute_area_size_circle(self): + """§B.3: Circle area = π × a².""" + import math + area = Area(a=1000, b=0, latitude=0, longitude=0, angle=0) + result = Router._compute_area_size_m2( + GeoBroadcastHST.GEOBROADCAST_CIRCLE, area) + self.assertAlmostEqual(result, math.pi * 1000 ** 2, places=0) + + def test_compute_area_size_ellipse(self): + """§B.3: Ellipse area = π × a × b.""" + import math + area = Area(a=2000, b=500, latitude=0, longitude=0, angle=0) + result = Router._compute_area_size_m2( + GeoBroadcastHST.GEOBROADCAST_ELIP, area) + self.assertAlmostEqual(result, math.pi * 2000 * 500, places=0) + + def test_compute_area_size_rect(self): + """§B.3: Rectangle area = 4 × a × b (a, b are half-lengths from centre).""" + area = Area(a=1000, b=500, latitude=0, longitude=0, angle=0) + result = Router._compute_area_size_m2( + GeoBroadcastHST.GEOBROADCAST_RECT, area) + self.assertAlmostEqual(result, 4 * 1000 * 500, places=0) + + def test_compute_area_size_gac_circle(self): + """§B.3: GAC circle variant also handled correctly.""" + import math + area = Area(a=1000, b=0, latitude=0, longitude=0, angle=0) + result = Router._compute_area_size_m2( + GeoAnycastHST.GEOANYCAST_CIRCLE, area) + self.assertAlmostEqual(result, math.pi * 1000 ** 2, places=0) + + # ── §B.3 GBC source ──────────────────────────────────────────────────── + + def test_gbc_source_large_area_returns_geo_scope_too_large(self): + """§B.3: gn_data_request_gbc must return GEOGRAPHICAL_SCOPE_TOO_LARGE for area > itsGnMaxGeoAreaSize.""" + mib = MIB() # itsGnMaxGeoAreaSize = 10 km² + router = Router(mib) + # Circle with a=2000m → area ≈ 12.57 km² > 10 km² + request = GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOBROADCAST, + header_subtype=GeoBroadcastHST.GEOBROADCAST_CIRCLE, + ), + area=Area(a=2000, b=0, latitude=0, longitude=0, angle=0), + data=b"hello", + ) + confirm = router.gn_data_request_gbc(request) + self.assertEqual(confirm.result_code, + ResultCode.GEOGRAPHICAL_SCOPE_TOO_LARGE) + + def test_gbc_source_small_area_not_rejected(self): + """§B.3: gn_data_request_gbc must NOT reject when area ≤ itsGnMaxGeoAreaSize.""" + mib = MIB() + router = Router(mib) + router.link_layer = Mock() + router.gn_forwarding_algorithm_selection = Mock( + return_value=GNForwardingAlgorithmResponse.AREA_FORWARDING) + # Circle with a=100m → area ≈ 31,416 m² < 10 km² + request = GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOBROADCAST, + header_subtype=GeoBroadcastHST.GEOBROADCAST_CIRCLE, + ), + area=Area(a=100, b=100, latitude=0, longitude=0, angle=0), + data=b"hello", + ) + confirm = router.gn_data_request_gbc(request) + self.assertEqual(confirm.result_code, ResultCode.ACCEPTED) + + def test_gac_source_large_area_returns_geo_scope_too_large(self): + """§B.3: gn_data_request_gac must also reject oversized areas (delegates to gn_data_request_gbc).""" + mib = MIB() + router = Router(mib) + request = GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOANYCAST, + header_subtype=GeoAnycastHST.GEOANYCAST_CIRCLE, + ), + area=Area(a=2000, b=0, latitude=0, longitude=0, angle=0), + data=b"hello", + ) + confirm = router.gn_data_request_gac(request) + self.assertEqual(confirm.result_code, + ResultCode.GEOGRAPHICAL_SCOPE_TOO_LARGE) + + # ── §B.3 GBC forwarder ───────────────────────────────────────────────── + + def test_gbc_forwarder_large_area_not_forwarded_but_delivered_inside(self): + """§B.3: Forwarder must NOT forward a GBC with oversized area, but MUST deliver if inside.""" + mib = MIB() + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=0, longitude=0) + router.duplicate_address_detection = Mock() + router.location_table.new_gbc_packet = Mock() + router.location_table.get_entry = Mock(return_value=None) + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + # Circle a=2000m → area > 10 km²; centred at (0,0), ego at (0,0) → inside + gbc = GBCExtendedHeader( + sn=1, so_pv=so_pv, latitude=0, longitude=0, a=2000, b=0) + bh = BasicHeader(version=1, rhl=3) + ch = CommonHeader(ht=HeaderType.GEOBROADCAST, + hst=GeoBroadcastHST.GEOBROADCAST_CIRCLE) # type: ignore + raw = gbc.encode() + b"payload" + + indication = router.gn_data_indicate_gbc(raw, ch, bh) + + link_layer.send.assert_not_called() + self.assertEqual(indication.data, b"payload") + + def test_gbc_forwarder_large_area_not_forwarded_outside(self): + """§B.3: Forwarder must NOT forward a GBC with oversized area even when outside.""" + mib = MIB() + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=100000000, longitude=100000000) + router.duplicate_address_detection = Mock() + router.location_table.new_gbc_packet = Mock() + router.location_table.get_entry = Mock(return_value=None) + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + gbc = GBCExtendedHeader( + sn=1, so_pv=so_pv, latitude=0, longitude=0, a=2000, b=0) + bh = BasicHeader(version=1, rhl=3) + ch = CommonHeader(ht=HeaderType.GEOBROADCAST, + hst=GeoBroadcastHST.GEOBROADCAST_CIRCLE) # type: ignore + raw = gbc.encode() + b"payload" + + indication = router.gn_data_indicate_gbc(raw, ch, bh) + + link_layer.send.assert_not_called() + # Outside area → no upper-layer delivery + self.assertIsNone(indication) + + # ── §B.3 GAC forwarder ───────────────────────────────────────────────── + + def test_gac_forwarder_large_area_not_forwarded(self): + """§B.3: Forwarder must NOT forward a GAC with oversized area when outside.""" + mib = MIB() + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=100000000, longitude=100000000) + router.duplicate_address_detection = Mock() + router.location_table.new_gac_packet = Mock() + router.location_table.get_entry = Mock(return_value=None) + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + gbc = GBCExtendedHeader( + sn=1, so_pv=so_pv, latitude=0, longitude=0, a=2000, b=0) + bh = BasicHeader(version=1, rhl=3) + ch = CommonHeader(ht=HeaderType.GEOANYCAST, + hst=GeoAnycastHST.GEOANYCAST_CIRCLE) # type: ignore + raw = gbc.encode() + b"payload" + + router.gn_data_indicate_gac(raw, ch, bh) + + link_layer.send.assert_not_called() + + # ── §B.2 PDR enforcement – GBC ───────────────────────────────────────── + + def _make_high_pdr_entry(self, mib: MIB): + """Return a Mock LocTE with PDR above itsGnMaxPacketDataRate (100 kB/s = 100,000 bytes/s).""" + entry = Mock() + entry.pdr = mib.itsGnMaxPacketDataRate * 1000 + 1 # just above threshold + return entry + + def test_gbc_pdr_exceeded_not_forwarded_but_delivered_inside(self): + """§B.2: GBC inside area must still be delivered to upper entity even when SO PDR exceeded.""" + mib = MIB() + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=0, longitude=0) + router.duplicate_address_detection = Mock() + router.location_table.new_gbc_packet = Mock() + router.location_table.get_entry = Mock( + return_value=self._make_high_pdr_entry(mib)) + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + gbc = GBCExtendedHeader( + sn=1, so_pv=so_pv, latitude=0, longitude=0, a=100, b=100) + bh = BasicHeader(version=1, rhl=3) + ch = CommonHeader(ht=HeaderType.GEOBROADCAST, + hst=GeoBroadcastHST.GEOBROADCAST_CIRCLE) # type: ignore + raw = gbc.encode() + b"payload" + + indication = router.gn_data_indicate_gbc(raw, ch, bh) + + link_layer.send.assert_not_called() + self.assertEqual(indication.data, b"payload") + + def test_gbc_pdr_ok_forwarded(self): + """§B.2: GBC must be forwarded when SO PDR is within limit.""" + mib = MIB() + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=0, longitude=0) + router.duplicate_address_detection = Mock() + router.location_table.new_gbc_packet = Mock() + entry = Mock() + entry.pdr = 0 # well below threshold + router.location_table.get_entry = Mock(return_value=entry) + router.gn_data_forward_gbc = Mock() + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + gbc = GBCExtendedHeader( + sn=1, so_pv=so_pv, latitude=0, longitude=0, a=100, b=100) + bh = BasicHeader(version=1, rhl=3) + ch = CommonHeader(ht=HeaderType.GEOBROADCAST, + hst=GeoBroadcastHST.GEOBROADCAST_CIRCLE) # type: ignore + raw = gbc.encode() + b"payload" + + router.gn_data_indicate_gbc(raw, ch, bh) + + router.gn_data_forward_gbc.assert_called_once() + + # ── §B.2 PDR enforcement – GAC ───────────────────────────────────────── + + def test_gac_pdr_exceeded_not_forwarded(self): + """§B.2: GAC outside area must NOT be forwarded when SO PDR exceeded.""" + mib = MIB() + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=100000000, longitude=100000000) + router.duplicate_address_detection = Mock() + router.location_table.new_gac_packet = Mock() + router.location_table.get_entry = Mock( + return_value=self._make_high_pdr_entry(mib)) + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + gbc = GBCExtendedHeader( + sn=1, so_pv=so_pv, latitude=0, longitude=0, a=100, b=100) + bh = BasicHeader(version=1, rhl=3) + ch = CommonHeader(ht=HeaderType.GEOANYCAST, + hst=GeoAnycastHST.GEOANYCAST_CIRCLE) # type: ignore + raw = gbc.encode() + b"payload" + + router.gn_data_indicate_gac(raw, ch, bh) + + link_layer.send.assert_not_called() + + # ── §B.2 PDR enforcement – GUC ───────────────────────────────────────── + + def test_guc_pdr_exceeded_not_forwarded(self): + """§B.2: GUC forwarder must NOT forward when SO PDR exceeded.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_guc_packet = Mock() + router.location_table.get_entry = Mock( + return_value=self._make_high_pdr_entry(mib)) + link_layer = Mock() + router.link_layer = link_layer + + so_addr = _make_gn_addr() + so_pv = LongPositionVector(gn_addr=so_addr) + de_addr = _make_gn_addr() # not self → forwarder role + raw = _make_guc_packet(so_pv, de_addr) + bh = BasicHeader.decode_from_bytes(raw[0:4]) + ch = CommonHeader.decode_from_bytes(raw[4:12]) + body = raw[12:] + + router.gn_data_indicate_guc(body, ch, bh) + + link_layer.send.assert_not_called() + + # ── §B.2 PDR enforcement – TSB ───────────────────────────────────────── + + def test_tsb_pdr_exceeded_not_forwarded(self): + """§B.2: TSB must NOT be forwarded when SO PDR exceeded.""" + from flexstack.geonet.tsb_extended_header import TSBExtendedHeader + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_tsb_packet = Mock() + router.location_table.get_entry = Mock( + return_value=self._make_high_pdr_entry(mib)) + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + tsb = TSBExtendedHeader(sn=1, so_pv=so_pv) + bh = BasicHeader(version=1, rhl=3) + ch = CommonHeader(ht=HeaderType.TSB, + hst=TopoBroadcastHST.MULTI_HOP) # type: ignore + raw = tsb.encode() + b"payload" + + router.gn_data_indicate_tsb(raw, ch, bh) + + link_layer.send.assert_not_called() + + def test_tsb_pdr_exceeded_still_delivered(self): + """§B.2: TSB payload must still be delivered to upper entity even when PDR exceeded.""" + from flexstack.geonet.tsb_extended_header import TSBExtendedHeader + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_tsb_packet = Mock() + router.location_table.get_entry = Mock( + return_value=self._make_high_pdr_entry(mib)) + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + tsb = TSBExtendedHeader(sn=1, so_pv=so_pv) + bh = BasicHeader(version=1, rhl=3) + ch = CommonHeader(ht=HeaderType.TSB, + hst=TopoBroadcastHST.MULTI_HOP) # type: ignore + raw = tsb.encode() + b"payload" + + indication = router.gn_data_indicate_tsb(raw, ch, bh) + + self.assertEqual(indication.data, b"payload") + + +# --------------------------------------------------------------------------- +# Annex C – Position vector update in forwarded packets +# --------------------------------------------------------------------------- + +class TestAnnexC(unittest.TestCase): + """Unit tests verifying compliance with ETSI EN 302 636-4-1 V1.4.1 Annex C.3.""" + + _TIMESTAMP = 1675071608.0 + + def _make_de_entry(self, mib: MIB, tst_seconds: float) -> Mock: + """Return a Mock LocTE entry that is a neighbour, with a given LPV timestamp.""" + de_addr = GNAddress(m=M.GN_MULTICAST, st=ST.CYCLIST, + mid=MID(b"\xbb\xcc\xdd\xee\x11\x22")) + de_lpv = LongPositionVector( + gn_addr=de_addr, latitude=111111, longitude=222222 + ).set_tst_in_normal_timestamp_seconds(tst_seconds) + entry = Mock() + entry.is_neighbour = True + entry.position_vector = de_lpv + entry.pdr = 0.0 + return entry, de_addr + + def _build_guc_raw_with_tst(self, so_pv, de_addr, de_tst_seconds, rhl=3): + """Build a GUC raw packet where the DE PV has a specific TST.""" + de_spv = ShortPositionVector( + gn_addr=de_addr + ).set_tst_in_normal_timestamp_seconds(de_tst_seconds) + bh = BasicHeader(version=1, rhl=rhl) + ch = CommonHeader(ht=HeaderType.GEOUNICAST) # type: ignore + guc = GUCExtendedHeader(sn=1, so_pv=so_pv, de_pv=de_spv) + raw = guc.encode() + b"guc_payload" + return bh, ch, guc, raw + + def _build_ls_reply_raw_with_tst(self, so_pv, de_addr, de_tst_seconds, rhl=3): + """Build an LS Reply packet where the DE PV has a specific TST.""" + de_spv = ShortPositionVector( + gn_addr=de_addr + ).set_tst_in_normal_timestamp_seconds(de_tst_seconds) + bh = BasicHeader(version=1, rhl=rhl) + ch = CommonHeader(ht=HeaderType.LS, + hst=LocationServiceHST.LS_REPLY) # type: ignore + ls_reply = LSReplyExtendedHeader(sn=2, so_pv=so_pv, de_pv=de_spv) + raw = ls_reply.encode() + b"ls_payload" + return bh, ch, ls_reply, raw + + # ── §C.3 GUC forwarder ────────────────────────────────────────────────── + + def test_guc_forwarder_de_pv_updated_when_loct_is_newer(self): + """§C.3: GUC forwarder must update DE PV when LocT TST is strictly newer.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_guc_packet = Mock() + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()).set_tst_in_normal_timestamp_seconds( + self._TIMESTAMP) + + # DE is a neighbour with LocT TST = TIMESTAMP + 1 (newer than packet's DE PV) + de_entry, de_addr = self._make_de_entry(mib, self._TIMESTAMP + 1.0) + # LocT entry for SO (get_entry called for SO in PDR check, then for DE in C.3) + + def mock_get_entry(addr): + if addr == de_addr: + return de_entry + return None + router.location_table.get_entry = Mock(side_effect=mock_get_entry) + + # Packet DE PV has older TST = TIMESTAMP + bh, ch, guc, raw = self._build_guc_raw_with_tst( + so_pv, de_addr, self._TIMESTAMP) + router.gn_data_indicate_guc(raw, ch, bh) + + link_layer.send.assert_called_once() + forwarded = link_layer.send.call_args[0][0] + # Decode the forwarded GUC header to check DE PV was refreshed + from flexstack.geonet.guc_extended_header import GUCExtendedHeader as GUCHdr + fwd_guc = GUCHdr.decode(forwarded[12:]) + # DE PV TST must now match LocT TST (TIMESTAMP + 1) + from flexstack.geonet.position_vector import TST + expected_tst = TST().set_in_normal_timestamp_seconds(self._TIMESTAMP + 1.0) + self.assertEqual(fwd_guc.de_pv.tst, expected_tst) + + def test_guc_forwarder_de_pv_not_updated_when_loct_is_older(self): + """§C.3 ELSE: GUC forwarder must NOT update DE PV when LocT TST is not newer.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_guc_packet = Mock() + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()).set_tst_in_normal_timestamp_seconds( + self._TIMESTAMP) + + # DE is a neighbour with LocT TST = TIMESTAMP (same, not newer than packet's DE PV) + de_entry, de_addr = self._make_de_entry(mib, self._TIMESTAMP) + + def mock_get_entry(addr): + if addr == de_addr: + return de_entry + return None + router.location_table.get_entry = Mock(side_effect=mock_get_entry) + + # Packet DE PV has the same TST = TIMESTAMP (LocT is not strictly newer) + bh, ch, guc, raw = self._build_guc_raw_with_tst( + so_pv, de_addr, self._TIMESTAMP) + router.gn_data_indicate_guc(raw, ch, bh) + + link_layer.send.assert_called_once() + forwarded = link_layer.send.call_args[0][0] + from flexstack.geonet.guc_extended_header import GUCExtendedHeader as GUCHdr + fwd_guc = GUCHdr.decode(forwarded[12:]) + # DE PV TST must remain as in the original packet + from flexstack.geonet.position_vector import TST + expected_tst = TST().set_in_normal_timestamp_seconds(self._TIMESTAMP) + self.assertEqual(fwd_guc.de_pv.tst, expected_tst) + # Coordinates must also be unchanged (original packet has lat=0, lon=0) + self.assertEqual(fwd_guc.de_pv.latitude, 0) + self.assertEqual(fwd_guc.de_pv.longitude, 0) + + # ── §C.3 LS Reply forwarder ───────────────────────────────────────────── + + def test_ls_reply_forwarder_de_pv_updated_when_loct_is_newer(self): + """§C.3: LS Reply forwarder must update DE PV when LocT TST is strictly newer.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_ls_reply_packet = Mock() + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()).set_tst_in_normal_timestamp_seconds( + self._TIMESTAMP) + + # DE is a neighbour with LocT TST = TIMESTAMP + 1 (newer) + de_entry, de_addr = self._make_de_entry(mib, self._TIMESTAMP + 1.0) + + def mock_get_entry(addr): + if addr == de_addr: + return de_entry + return None + router.location_table.get_entry = Mock(side_effect=mock_get_entry) + + bh, ch, ls_reply, raw = self._build_ls_reply_raw_with_tst( + so_pv, de_addr, self._TIMESTAMP) + router.gn_data_indicate_ls_reply(raw, ch, bh) + + link_layer.send.assert_called_once() + forwarded = link_layer.send.call_args[0][0] + fwd_ls = LSReplyExtendedHeader.decode(forwarded[12:]) + from flexstack.geonet.position_vector import TST + expected_tst = TST().set_in_normal_timestamp_seconds(self._TIMESTAMP + 1.0) + self.assertEqual(fwd_ls.de_pv.tst, expected_tst) + + def test_ls_reply_forwarder_de_pv_not_updated_when_loct_is_older(self): + """§C.3 ELSE: LS Reply forwarder must NOT update DE PV when LocT TST is not newer.""" + mib = MIB() + router = Router(mib) + router.duplicate_address_detection = Mock() + router.location_table.new_ls_reply_packet = Mock() + link_layer = Mock() + router.link_layer = link_layer + + so_pv = LongPositionVector(gn_addr=_make_gn_addr()).set_tst_in_normal_timestamp_seconds( + self._TIMESTAMP) + + # DE is a neighbour with LocT TST = TIMESTAMP (not newer than packet) + de_entry, de_addr = self._make_de_entry(mib, self._TIMESTAMP) + + def mock_get_entry(addr): + if addr == de_addr: + return de_entry + return None + router.location_table.get_entry = Mock(side_effect=mock_get_entry) + + bh, ch, ls_reply, raw = self._build_ls_reply_raw_with_tst( + so_pv, de_addr, self._TIMESTAMP) + router.gn_data_indicate_ls_reply(raw, ch, bh) + + link_layer.send.assert_called_once() + forwarded = link_layer.send.call_args[0][0] + fwd_ls = LSReplyExtendedHeader.decode(forwarded[12:]) + from flexstack.geonet.position_vector import TST + expected_tst = TST().set_in_normal_timestamp_seconds(self._TIMESTAMP) + self.assertEqual(fwd_ls.de_pv.tst, expected_tst) + self.assertEqual(fwd_ls.de_pv.latitude, 0) + self.assertEqual(fwd_ls.de_pv.longitude, 0) + + +# --------------------------------------------------------------------------- +# Annex D – GeoNetworking forwarding algorithm selection procedure +# --------------------------------------------------------------------------- + +class TestAnnexD(unittest.TestCase): + """Unit tests verifying compliance with ETSI EN 302 636-4-1 V1.4.1 Annex D.""" + + # Area centred at (421255850, 27601710) with a = b = 100 m + _AREA = Area(latitude=421255850, longitude=27601710, a=100, b=100, angle=0) + # Ego positions (1/10 micro-degree) + _LAT_INSIDE = 421255850 # at area centre → F = 1 + _LON_INSIDE = 27601710 + _LAT_OUTSIDE = 421236840 # far outside → F << 0 + _LON_OUTSIDE = 27632710 + + def _make_request(self) -> GNDataRequest: + return GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOBROADCAST, + header_subtype=GeoBroadcastHST.GEOBROADCAST_CIRCLE, + ), + area=self._AREA, + ) + + def _make_router_outside(self) -> Router: + mib = MIB() + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=self._LAT_OUTSIDE, longitude=self._LON_OUTSIDE) + return router + + # ── §D.1 F(x,y) for rectangle ─────────────────────────────────────────── + + def test_rect_f_at_centre_is_one(self): + """§D / EN 302 931: F(0, 0) for rectangle must equal 1 (at centre).""" + mib = MIB() + router = Router(mib) + result = router.gn_geometric_function_f( + GeoBroadcastHST.GEOBROADCAST_RECT, self._AREA, + self._LAT_INSIDE, self._LON_INSIDE, + ) + self.assertAlmostEqual(result, 1.0) + + def test_rect_f_outside_on_y_axis_is_negative(self): + """§D / EN 302 931: F(x≈0, y>>b) for rectangle must be negative (outside).""" + mib = MIB() + router = Router(mib) + # Same latitude as centre (x≈0) but longitude far away (|y| >> b=100m) + result = router.gn_geometric_function_f( + GeoBroadcastHST.GEOBROADCAST_RECT, self._AREA, + self._LAT_INSIDE, self._LON_OUTSIDE, + ) + self.assertLess(result, 0) + + # ── §D.2 gn_forwarding_algorithm_selection ────────────────────────────── + + def test_ego_inside_returns_area_forwarding(self): + """§D step 9: ego inside/at border → AREA_FORWARDING.""" + mib = MIB() + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=self._LAT_INSIDE, longitude=self._LON_INSIDE) + result = router.gn_forwarding_algorithm_selection(self._make_request()) + self.assertEqual(result, GNForwardingAlgorithmResponse.AREA_FORWARDING) + + def test_ego_outside_no_sender_returns_non_area_forwarding(self): + """§D ELSE, no sender (source op): SE_POS_VALID = False → NON_AREA_FORWARDING.""" + router = self._make_router_outside() + result = router.gn_forwarding_algorithm_selection(self._make_request()) + self.assertEqual( + result, GNForwardingAlgorithmResponse.NON_AREA_FORWARDING) + + def test_ego_outside_sender_outside_pai_true_returns_non_area_forwarding(self): + """§D ELSE: sender outside area, PAI=True → F_SE < 0 → NON_AREA_FORWARDING.""" + router = self._make_router_outside() + sender_addr = _make_gn_addr() + se_entry = router.location_table.ensure_entry(sender_addr) + se_pv = LongPositionVector( + gn_addr=sender_addr, pai=True, + latitude=self._LAT_OUTSIDE, longitude=self._LON_OUTSIDE, + ) + se_entry.update_position_vector(se_pv) + + result = router.gn_forwarding_algorithm_selection( + self._make_request(), sender_gn_addr=sender_addr) + self.assertEqual( + result, GNForwardingAlgorithmResponse.NON_AREA_FORWARDING) + + def test_ego_outside_sender_inside_pai_true_returns_discarted(self): + """§D ELSE: sender inside area, PAI=True → SE_POS_VALID AND F_SE ≥ 0 → DISCARTED.""" + router = self._make_router_outside() + sender_addr = _make_gn_addr() + se_entry = router.location_table.ensure_entry(sender_addr) + se_pv = LongPositionVector( + gn_addr=sender_addr, pai=True, + latitude=self._LAT_INSIDE, longitude=self._LON_INSIDE, + ) + se_entry.update_position_vector(se_pv) + + result = router.gn_forwarding_algorithm_selection( + self._make_request(), sender_gn_addr=sender_addr) + self.assertEqual(result, GNForwardingAlgorithmResponse.DISCARTED) + + def test_ego_outside_sender_inside_pai_false_returns_non_area_forwarding(self): + """§D ELSE: sender inside area but PAI=False → SE_POS_VALID=False → NON_AREA_FORWARDING.""" + router = self._make_router_outside() + sender_addr = _make_gn_addr() + se_entry = router.location_table.ensure_entry(sender_addr) + se_pv = LongPositionVector( + gn_addr=sender_addr, pai=False, + latitude=self._LAT_INSIDE, longitude=self._LON_INSIDE, + ) + se_entry.update_position_vector(se_pv) + + result = router.gn_forwarding_algorithm_selection( + self._make_request(), sender_gn_addr=sender_addr) + self.assertEqual( + result, GNForwardingAlgorithmResponse.NON_AREA_FORWARDING) + + def test_ego_outside_sender_not_in_loct_returns_non_area_forwarding(self): + """§D ELSE: sender not in LocT → PV_SE not found → SE_POS_VALID=False → NON_AREA_FORWARDING.""" + router = self._make_router_outside() + unknown_addr = GNAddress(m=M.GN_MULTICAST, st=ST.CYCLIST, + mid=MID(b"\xff\xff\xff\xff\xff\xff")) + result = router.gn_forwarding_algorithm_selection( + self._make_request(), sender_gn_addr=unknown_addr) + self.assertEqual( + result, GNForwardingAlgorithmResponse.NON_AREA_FORWARDING) + + # ── §D.4 GAC indicate – Annex D sender check ──────────────────────────── + + def _build_gac_raw_with_pv(self, so_pv: LongPositionVector, rhl: int = 3) -> bytes: + """Build a complete GAC wire packet with the given SO PV.""" + area = _make_gac_area(inside=True) # centred at (0,0), r=100m + bh = BasicHeader(version=1, rhl=rhl) + ch = CommonHeader(ht=HeaderType.GEOANYCAST, + hst=GeoAnycastHST.GEOANYCAST_CIRCLE) # type: ignore + gbc = GBCExtendedHeader( + sn=1, so_pv=so_pv, + latitude=area.latitude, longitude=area.longitude, + a=area.a, b=area.b, angle=area.angle, + ) + return bh.encode_to_bytes() + ch.encode_to_bytes() + gbc.encode() + b"payload" + + def _setup_gac_outside_router(self): + """Return a router whose ego is outside the GAC test area at (0,0) r=100m.""" + mib = MIB() + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=100000000, longitude=100000000) + router.duplicate_address_detection = Mock() + router.location_table.new_gac_packet = Mock() # prevent LocT-update side-effects + link_layer = Mock() + router.link_layer = link_layer + return router, link_layer + + def test_gac_indicate_ego_outside_sender_inside_pai_true_discards(self): + """§D: ego outside, sender at area centre with PAI=True → must be discarded.""" + router, link_layer = self._setup_gac_outside_router() + so_addr = _make_gn_addr() + # Pre-populate LocT: sender at area centre (0,0), PAI=True → F_SE = 1 ≥ 0 + se_entry = router.location_table.ensure_entry(so_addr) + se_pv = LongPositionVector( + gn_addr=so_addr, pai=True, latitude=0, longitude=0) + se_entry.update_position_vector(se_pv) + + so_pv = LongPositionVector( + gn_addr=so_addr, pai=True, latitude=0, longitude=0) + raw = self._build_gac_raw_with_pv(so_pv, rhl=3) + ch = CommonHeader.decode_from_bytes(raw[4:12]) + bh = BasicHeader.decode_from_bytes(raw[0:4]) + + router.gn_data_indicate_gac(raw[12:], ch, bh) + + link_layer.send.assert_not_called() + + def test_gac_indicate_ego_outside_sender_inside_pai_false_forwards(self): + """§D ELSE: ego outside, sender inside but PAI=False → SE_POS_VALID=False → forward.""" + router, link_layer = self._setup_gac_outside_router() + so_addr = _make_gn_addr() + # Pre-populate LocT: sender at area centre (0,0) but PAI=False → SE_POS_VALID = False + se_entry = router.location_table.ensure_entry(so_addr) + se_pv = LongPositionVector( + gn_addr=so_addr, pai=False, latitude=0, longitude=0) + se_entry.update_position_vector(se_pv) + + so_pv = LongPositionVector( + gn_addr=so_addr, pai=False, latitude=0, longitude=0) + raw = self._build_gac_raw_with_pv(so_pv, rhl=3) + ch = CommonHeader.decode_from_bytes(raw[4:12]) + bh = BasicHeader.decode_from_bytes(raw[0:4]) + + router.gn_data_indicate_gac(raw[12:], ch, bh) + + link_layer.send.assert_called_once() + + def test_gac_indicate_ego_outside_sender_outside_pai_true_forwards(self): + """§D ELSE: ego outside, sender also outside area with PAI=True → F_SE < 0 → forward.""" + router, link_layer = self._setup_gac_outside_router() + so_addr = _make_gn_addr() + # Pre-populate LocT: sender far outside area, PAI=True → F_SE < 0 + se_entry = router.location_table.ensure_entry(so_addr) + se_pv = LongPositionVector( + gn_addr=so_addr, pai=True, latitude=100000000, longitude=100000000) + se_entry.update_position_vector(se_pv) + + so_pv = LongPositionVector( + gn_addr=so_addr, pai=True, latitude=100000000, longitude=100000000) + raw = self._build_gac_raw_with_pv(so_pv, rhl=3) + ch = CommonHeader.decode_from_bytes(raw[4:12]) + bh = BasicHeader.decode_from_bytes(raw[0:4]) + + router.gn_data_indicate_gac(raw[12:], ch, bh) + + link_layer.send.assert_called_once() + + +# --------------------------------------------------------------------------- +# Annex E – Non-area forwarding algorithms (Greedy Forwarding) +# --------------------------------------------------------------------------- + +class TestAnnexE(unittest.TestCase): + """Unit tests verifying compliance with ETSI EN 302 636-4-1 V1.4.1 Annex E.""" + + # Coordinates in 1/10 µdeg + _EGO_LAT = 421255850 # ~42.1 N + _EGO_LON = 27601710 # ~2.76 E + # Destination ~200 m east of ego + _DEST_LAT = 421255850 + _DEST_LON = 27628710 + # Area centre for GBC/GAC tests (same as Annex D) + _AREA = Area(latitude=421255850, longitude=27601710, a=100, b=100, angle=0) + + def _make_router(self) -> Router: + mib = MIB() + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=self._EGO_LAT, longitude=self._EGO_LON) + return router + + def _neighbour_entry(self, router: Router, lat: int, lon: int): + """Insert a LocTE marked as neighbour at (lat, lon).""" + addr = GNAddress(m=M.GN_MULTICAST, st=ST.UNKNOWN, + mid=MID(b"\x01\x02\x03\x04\x05\x06")) + entry = router.location_table.ensure_entry(addr) + pv = LongPositionVector(gn_addr=addr, latitude=lat, longitude=lon) + entry.update_position_vector(pv) + entry.is_neighbour = True + return entry + + # ── _distance_m ───────────────────────────────────────────────────────── + + def test_distance_m_zero_same_point(self): + """Distance from a point to itself is 0.""" + d = Router._distance_m(self._EGO_LAT, self._EGO_LON, + self._EGO_LAT, self._EGO_LON) + self.assertAlmostEqual(d, 0.0) + + def test_distance_m_positive_different_points(self): + """Distance between two distinct points is positive.""" + d = Router._distance_m(self._EGO_LAT, self._EGO_LON, + self._DEST_LAT, self._DEST_LON) + self.assertGreater(d, 0.0) + + # ── gn_greedy_forwarding ──────────────────────────────────────────────── + + def test_greedy_neighbour_closer_to_dest_returns_true(self): + """§E.2: neighbour closer to destination than ego → GF returns True (send to NH).""" + router = self._make_router() + tc = TrafficClass() + # Place neighbour halfway between ego and destination + mid_lon = (self._EGO_LON + self._DEST_LON) // 2 + self._neighbour_entry(router, self._DEST_LAT, mid_lon) + + result = router.gn_greedy_forwarding( + self._DEST_LAT, self._DEST_LON, tc) + + self.assertTrue(result) + + def test_greedy_no_neighbours_scf_false_bcast_fallback(self): + """§E.2: local optimum, SCF=False → BCAST fallback → returns True.""" + router = self._make_router() + tc = TrafficClass(scf=False) + + result = router.gn_greedy_forwarding( + self._DEST_LAT, self._DEST_LON, tc) + + self.assertTrue(result) + + def test_greedy_no_neighbours_scf_true_buffer(self): + """§E.2: local optimum, SCF=True → buffer → returns False.""" + router = self._make_router() + tc = TrafficClass(scf=True) + + result = router.gn_greedy_forwarding( + self._DEST_LAT, self._DEST_LON, tc) + + self.assertFalse(result) + + def test_greedy_neighbour_farther_than_ego_local_optimum_scf_false(self): + """§E.2: neighbour farther from dest than ego → local optimum + SCF=False → True.""" + router = self._make_router() + tc = TrafficClass(scf=False) + # Place neighbour on the opposite side of ego relative to the destination + far_lon = self._EGO_LON - (self._DEST_LON - self._EGO_LON) + self._neighbour_entry(router, self._EGO_LAT, far_lon) + + result = router.gn_greedy_forwarding( + self._DEST_LAT, self._DEST_LON, tc) + + self.assertTrue(result) + + # ── GUC source: GF blocks send when local optimum + SCF ───────────────── + + def test_guc_source_scf_true_local_optimum_not_sent(self): + """§10.3.8.2 step 4 / §E.2: dest == ego (MFR=0) + SCF=True → local optimum → not sent.""" + router = self._make_router() + link_layer = Mock() + router.link_layer = link_layer + # Add one neighbour far from dest so Step 3 (no-neighbours gate) is bypassed + far_neigh_lon = self._EGO_LON - (self._DEST_LON - self._EGO_LON) * 2 + self._neighbour_entry(router, self._EGO_LAT, far_neigh_lon) + # Destination positioned at ego → MFR=0; no neighbour is closer → local optimum + dest_addr = GNAddress(m=M.GN_MULTICAST, st=ST.UNKNOWN, + mid=MID(b"\xAA\xBB\xCC\xDD\xEE\xFF")) + dest_entry = router.location_table.ensure_entry(dest_addr) + dest_lpv = LongPositionVector( + gn_addr=dest_addr, latitude=self._EGO_LAT, longitude=self._EGO_LON) + dest_entry.update_position_vector(dest_lpv) + tc_scf = TrafficClass(scf=True) + request = GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOUNICAST, + ), + destination=dest_addr, + traffic_class=tc_scf, + data=b"test", + ) + + confirm = router.gn_data_request_guc(request) + + self.assertEqual(confirm.result_code, ResultCode.ACCEPTED) + link_layer.send.assert_not_called() + + # ── GBC NON_AREA: GF executes, sends toward area centre ───────────────── + + def test_gbc_non_area_forwarding_sends_with_scf_false(self): + """§E.2 via gn_data_request_gbc: ego outside area + no neighbours + SCF=False → sent.""" + mib = MIB() + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=100000000, longitude=100000000) + link_layer = Mock() + router.link_layer = link_layer + + request = GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOBROADCAST, + header_subtype=GeoBroadcastHST.GEOBROADCAST_CIRCLE, + ), + area=self._AREA, + traffic_class=TrafficClass(scf=False), + data=b"gbc-data", + ) + confirm = router.gn_data_request_gbc(request) + + self.assertEqual(confirm.result_code, ResultCode.ACCEPTED) + link_layer.send.assert_called_once() + + def test_gbc_non_area_forwarding_not_sent_with_scf_true(self): + """§E.2 via gn_data_request_gbc: ego outside + farther neighbour + SCF=True → buffered.""" + mib = MIB() + router = Router(mib) + # Ego 300 m north of area centre – outside area (radius=100 m) + ego_lat = self._AREA.latitude + 30_000 # +300 m in 1/10 µdeg + router.ego_position_vector = LongPositionVector( + latitude=ego_lat, longitude=self._AREA.longitude) + link_layer = Mock() + router.link_layer = link_layer + # Add a neighbour 600 m from area centre (farther than ego) so the + # "no-neighbours + SCF" pre-check is bypassed and GF is actually exercised + neigh_addr = GNAddress( + m=M.GN_MULTICAST, st=ST.UNKNOWN, mid=MID(b"\x11\x22\x33\x44\x55\x66")) + neigh_entry = router.location_table.ensure_entry(neigh_addr) + neigh_pv = LongPositionVector( + gn_addr=neigh_addr, + latitude=self._AREA.latitude + 60_000, + longitude=self._AREA.longitude, + ) + neigh_entry.update_position_vector(neigh_pv) + neigh_entry.is_neighbour = True + + request = GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOBROADCAST, + header_subtype=GeoBroadcastHST.GEOBROADCAST_CIRCLE, + ), + area=self._AREA, + traffic_class=TrafficClass(scf=True), + data=b"gbc-data", + ) + confirm = router.gn_data_request_gbc(request) + + self.assertEqual(confirm.result_code, ResultCode.ACCEPTED) + link_layer.send.assert_not_called() + + +class TestAnnexF(unittest.TestCase): + """Unit tests verifying compliance with ETSI EN 302 636-4-1 V1.4.1 Annex F.""" + + # Area centred inside ego position (same geometry as Annex D/E tests) + _AREA = Area(latitude=421255850, longitude=27601710, a=100, b=100, angle=0) + _LAT_INSIDE = 421255850 + _LON_INSIDE = 27601710 + + def _make_router_inside(self, algo: AreaForwardingAlgorithm = AreaForwardingAlgorithm.CBF) -> Router: + mib = MIB(itsGnAreaForwardingAlgorithm=algo) + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=self._LAT_INSIDE, longitude=self._LON_INSIDE) + router.duplicate_address_detection = Mock() + router.location_table.new_gbc_packet = Mock() + return router + + def _make_gbc_headers(self, so_pv: LongPositionVector, rhl: int = 3): + bh = BasicHeader(version=1, rhl=rhl) + ch = CommonHeader( + ht=HeaderType.GEOBROADCAST, + hst=GeoBroadcastHST.GEOBROADCAST_CIRCLE, # type: ignore + ) + gbc = GBCExtendedHeader( + sn=42, so_pv=so_pv, + latitude=self._AREA.latitude, longitude=self._AREA.longitude, + a=self._AREA.a, b=self._AREA.b, angle=self._AREA.angle, + ) + return bh, ch, gbc + + # ── §F.3 _cbf_compute_timeout_ms ──────────────────────────────────────── + + def test_cbf_timeout_at_zero_dist_is_max(self): + """§F.3 eq. F.1: DIST=0 → TO = TO_CBF_MAX.""" + mib = MIB() + router = Router(mib) + to = router._cbf_compute_timeout_ms(0.0) + self.assertAlmostEqual(to, mib.itsGnCbfMaxTime) + + def test_cbf_timeout_at_dist_max_is_min(self): + """§F.3 eq. F.1: DIST=DIST_MAX → TO = TO_CBF_MIN.""" + mib = MIB() + router = Router(mib) + to = router._cbf_compute_timeout_ms( + float(mib.itsGnDefaultMaxCommunicationRange)) + self.assertAlmostEqual(to, mib.itsGnCbfMinTime) + + def test_cbf_timeout_linear_midpoint(self): + """§F.3 eq. F.1: DIST=DIST_MAX/2 → TO = midpoint between MIN and MAX.""" + mib = MIB() + router = Router(mib) + mid_dist = mib.itsGnDefaultMaxCommunicationRange / 2.0 + to = router._cbf_compute_timeout_ms(mid_dist) + expected = (mib.itsGnCbfMinTime + mib.itsGnCbfMaxTime) / 2.0 + self.assertAlmostEqual(to, expected) + + # ── §F.3 gn_area_cbf_forwarding ───────────────────────────────────────── + + def test_cbf_new_packet_buffered_not_sent_immediately(self): + """§F.3: new packet → buffered in CBF buffer → link_layer.send NOT called immediately.""" + router = self._make_router_inside() + link_layer = Mock() + router.link_layer = link_layer + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + bh, ch, gbc = self._make_gbc_headers(so_pv) + + result = router.gn_area_cbf_forwarding(bh, ch, gbc, b"payload") + + self.assertTrue(result) # buffered (§F.3 return 0) + link_layer.send.assert_not_called() + # Clean up timer + key = (gbc.so_pv.gn_addr, gbc.sn) + with router._cbf_lock: + if key in router._cbf_buffer: + router._cbf_buffer.pop(key).cancel() + + def test_cbf_duplicate_cancels_timer_and_discards(self): + """§F.3: duplicate arrival → timer cancelled, returns False (discard, -1).""" + router = self._make_router_inside() + link_layer = Mock() + router.link_layer = link_layer + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + bh, ch, gbc = self._make_gbc_headers(so_pv) + + first = router.gn_area_cbf_forwarding(bh, ch, gbc, b"payload") + second = router.gn_area_cbf_forwarding(bh, ch, gbc, b"payload") + + self.assertTrue(first) # first: buffered + self.assertFalse(second) # duplicate: discarded + link_layer.send.assert_not_called() + # Buffer must be empty after duplicate suppression + key = (gbc.so_pv.gn_addr, gbc.sn) + with router._cbf_lock: + self.assertNotIn(key, router._cbf_buffer) + + def test_cbf_timer_expiry_sends_packet(self): + """§F.3: when timer fires → link_layer.send is called once.""" + import threading + import unittest.mock + + router = self._make_router_inside() + link_layer = Mock() + router.link_layer = link_layer + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + bh, ch, gbc = self._make_gbc_headers(so_pv) + + fired_event = threading.Event() + + def send_and_signal(pkt): + fired_event.set() + + link_layer.send.side_effect = send_and_signal + + # Patch Timer to fire after a very short delay (10 ms) + real_timer_args = [] + + def fast_timer(interval, func, args=()): + t = threading.Timer(0.01, func, args) + real_timer_args.append(t) + return t + + with unittest.mock.patch("flexstack.geonet.router.Timer", side_effect=fast_timer): + router.gn_area_cbf_forwarding(bh, ch, gbc, b"payload") + + fired_event.wait(timeout=2.0) + link_layer.send.assert_called_once() + + # ── §F.2 Simple forwarding via gn_data_forward_gbc ────────────────────── + + def test_simple_forwarding_sends_immediately(self): + """§F.2: SIMPLE algorithm → AREA_FORWARDING sends immediately via BCAST.""" + router = self._make_router_inside(algo=AreaForwardingAlgorithm.SIMPLE) + link_layer = Mock() + router.link_layer = link_layer + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + bh, ch, gbc = self._make_gbc_headers(so_pv, rhl=3) + router.gn_forwarding_algorithm_selection = Mock( + return_value=GNForwardingAlgorithmResponse.AREA_FORWARDING) + + router.gn_data_forward_gbc(bh, ch, gbc, b"payload") + + link_layer.send.assert_called_once() + + def test_cbf_forwarding_does_not_send_immediately_via_data_forward(self): + """§F.3: CBF algorithm → gn_data_forward_gbc does NOT call link_layer.send immediately.""" + router = self._make_router_inside(algo=AreaForwardingAlgorithm.CBF) + link_layer = Mock() + router.link_layer = link_layer + so_pv = LongPositionVector(gn_addr=_make_gn_addr()) + bh, ch, gbc = self._make_gbc_headers(so_pv, rhl=3) + router.gn_forwarding_algorithm_selection = Mock( + return_value=GNForwardingAlgorithmResponse.AREA_FORWARDING) + + router.gn_data_forward_gbc(bh, ch, gbc, b"payload") + + link_layer.send.assert_not_called() + # Clean up timer + key = (gbc.so_pv.gn_addr, gbc.sn) + with router._cbf_lock: + if key in router._cbf_buffer: + router._cbf_buffer.pop(key).cancel() + + def test_source_always_sends_immediately_regardless_of_cbf(self): + """§F.3 lines 1-3: source (gn_data_request_gbc) with ego inside area → immediate BCAST.""" + mib = MIB(itsGnAreaForwardingAlgorithm=AreaForwardingAlgorithm.CBF) + router = Router(mib) + router.ego_position_vector = LongPositionVector( + latitude=self._LAT_INSIDE, longitude=self._LON_INSIDE) + link_layer = Mock() + router.link_layer = link_layer + + request = GNDataRequest( + packet_transport_type=PacketTransportType( + header_type=HeaderType.GEOBROADCAST, + header_subtype=GeoBroadcastHST.GEOBROADCAST_CIRCLE, + ), + area=self._AREA, + data=b"source-data", + ) + confirm = router.gn_data_request_gbc(request) + + self.assertEqual(confirm.result_code, ResultCode.ACCEPTED) + link_layer.send.assert_called_once() # §F.3: source always sends immediately diff --git a/tests/flexstack/geonet/test_service_access_point.py b/tests/flexstack/geonet/test_service_access_point.py index b0d21e8..37395be 100644 --- a/tests/flexstack/geonet/test_service_access_point.py +++ b/tests/flexstack/geonet/test_service_access_point.py @@ -138,6 +138,8 @@ def test_to_dict(self): "traffic_class": "AA==", "length": 0, "data": "", + 'max_hop_limit': 1, + "max_packet_lifetime": None, "area": {"latitude": 0, "longitude": 0, "a": 0, "b": 0, "angle": 0}, }, ) @@ -187,8 +189,11 @@ def test_to_dict(self): { "upper_protocol_entity": 0, "packet_transport_type": {"header_type": 5, "header_subtype": 0}, + "destination_area": None, "source_position_vector": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "traffic_class": "AA==", + "remaining_packet_lifetime": None, + "remaining_hop_limit": None, "length": 0, "data": "", }, diff --git a/tests/flexstack/geonet/test_tsb_extended_header.py b/tests/flexstack/geonet/test_tsb_extended_header.py new file mode 100644 index 0000000..1fdf71d --- /dev/null +++ b/tests/flexstack/geonet/test_tsb_extended_header.py @@ -0,0 +1,57 @@ +import unittest + +from flexstack.geonet.tsb_extended_header import TSBExtendedHeader +from flexstack.geonet.position_vector import LongPositionVector +from flexstack.geonet.service_access_point import GNDataRequest +from flexstack.geonet.exceptions import DecodeError + + +class TestTSBExtendedHeader(unittest.TestCase): + + def test_encode_decode_roundtrip(self): + """Encoding then decoding must return an equal header.""" + lpv = LongPositionVector(latitude=421255850, longitude=27601710) + original = TSBExtendedHeader(sn=42, reserved=0, so_pv=lpv) + encoded = original.encode() + self.assertEqual(len(encoded), 28) + decoded = TSBExtendedHeader.decode(encoded) + self.assertEqual(decoded.sn, 42) + self.assertEqual(decoded.reserved, 0) + self.assertEqual(decoded.so_pv.latitude, lpv.latitude) + self.assertEqual(decoded.so_pv.longitude, lpv.longitude) + + def test_encode_length(self): + """Encoded header must be exactly 28 bytes.""" + header = TSBExtendedHeader(sn=1) + self.assertEqual(len(header.encode()), 28) + + def test_decode_too_short_raises(self): + """Decoding fewer than 28 bytes must raise DecodeError.""" + with self.assertRaises(DecodeError): + TSBExtendedHeader.decode(bytes(27)) + + def test_initialize_with_request_sequence_number_ego_pv(self): + """Factory method must set sn and so_pv from the given arguments.""" + lpv = LongPositionVector(latitude=100, longitude=200) + request = GNDataRequest() + header = TSBExtendedHeader.initialize_with_request_sequence_number_ego_pv( + request, sequence_number=7, ego_pv=lpv + ) + self.assertEqual(header.sn, 7) + self.assertEqual(header.so_pv.latitude, 100) + self.assertEqual(header.so_pv.longitude, 200) + + def test_sn_byte_order(self): + """SN must be stored in big-endian at bytes 0-1 of the encoded header.""" + header = TSBExtendedHeader(sn=0x0102) + encoded = header.encode() + self.assertEqual(encoded[0], 0x01) + self.assertEqual(encoded[1], 0x02) + + def test_reserved_zero_by_default(self): + """Reserved field must default to 0.""" + header = TSBExtendedHeader() + self.assertEqual(header.reserved, 0) + encoded = header.encode() + self.assertEqual(encoded[2], 0x00) + self.assertEqual(encoded[3], 0x00) diff --git a/tests/flexstack/management/__init__.py b/tests/flexstack/management/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/flexstack/management/test_dcc_adaptive.py b/tests/flexstack/management/test_dcc_adaptive.py new file mode 100644 index 0000000..6397054 --- /dev/null +++ b/tests/flexstack/management/test_dcc_adaptive.py @@ -0,0 +1,444 @@ +import unittest + +from flexstack.management.dcc_adaptive import ( + DccAdaptive, + DccAdaptiveParameters, + GateKeeper, +) + + +class TestDccAdaptiveInit(unittest.TestCase): + """Tests for DccAdaptive initialisation.""" + + def test_default_parameters(self): + """Default parameters match Table 3 of §5.4.""" + alg = DccAdaptive() + p = alg.parameters + self.assertAlmostEqual(p.alpha, 0.016) + self.assertAlmostEqual(p.beta, 0.0012) + self.assertAlmostEqual(p.cbr_target, 0.68) + self.assertAlmostEqual(p.delta_max, 0.03) + self.assertAlmostEqual(p.delta_min, 0.0006) + self.assertAlmostEqual(p.delta_up_max, 0.0005) + self.assertAlmostEqual(p.delta_down_max, -0.00025) + + def test_initial_cbr_its_s_is_zero(self): + """cbr_its_s starts at 0.0 before the first update.""" + alg = DccAdaptive() + self.assertEqual(alg.cbr_its_s, 0.0) + + def test_initial_delta_is_delta_min(self): + """delta is initialised to delta_min (conservative start).""" + alg = DccAdaptive() + self.assertAlmostEqual(alg.delta, alg.parameters.delta_min) + + def test_custom_parameters_accepted(self): + """Custom DccAdaptiveParameters are stored correctly.""" + params = DccAdaptiveParameters(alpha=0.1, beta=0.05, cbr_target=0.5) + alg = DccAdaptive(parameters=params) + self.assertAlmostEqual(alg.parameters.alpha, 0.1) + self.assertAlmostEqual(alg.parameters.cbr_target, 0.5) + + +class TestDccAdaptiveStep1(unittest.TestCase): + """Step 1: CBR smoothing (equation 1 of §5.4).""" + + def test_cbr_its_s_uses_local_average(self): + """ + cbr_its_s = 0.5*cbr_its_s + 0.5*((cbr_local + cbr_local_previous)/2). + Starting from 0.0 with both measurements = 0.4, result = 0.1. + """ + alg = DccAdaptive() + alg.update(cbr_local=0.4, cbr_local_previous=0.4) + # 0.5*0.0 + 0.5*(0.4+0.4)/2 = 0.5*0.4 = 0.2 + self.assertAlmostEqual(alg.cbr_its_s, 0.2) + + def test_cbr_its_s_accumulates_over_calls(self): + """cbr_its_s accumulates correctly across multiple updates.""" + alg = DccAdaptive() + alg.update(cbr_local=0.4, cbr_local_previous=0.4) + # After first call: cbr_its_s = 0.2 + alg.update(cbr_local=0.4, cbr_local_previous=0.4) + # After second call: 0.5*0.2 + 0.5*0.4 = 0.1 + 0.2 = 0.3 + self.assertAlmostEqual(alg.cbr_its_s, 0.3) + + def test_cbr_its_s_uses_global_when_provided(self): + """ + When global CBR is supplied, it replaces local in step 1 (NOTE 1 of §5.4). + """ + alg = DccAdaptive() + alg.update( + cbr_local=0.1, # should be ignored + cbr_local_previous=0.1, # should be ignored + cbr_global=0.6, + cbr_global_previous=0.6, + ) + # 0.5*0.0 + 0.5*(0.6+0.6)/2 = 0.5*0.6 = 0.3 + self.assertAlmostEqual(alg.cbr_its_s, 0.3) + + def test_cbr_its_s_ignores_global_when_only_one_provided(self): + """ + Only one global measurement → local values are used instead. + """ + alg = DccAdaptive() + alg.update( + cbr_local=0.4, + cbr_local_previous=0.4, + cbr_global=0.9, # cbr_global_previous is missing + ) + # Falls back to local: 0.5*(0.4+0.4)/2 = 0.2 + self.assertAlmostEqual(alg.cbr_its_s, 0.2) + + +class TestDccAdaptiveStep2(unittest.TestCase): + """Step 2: delta_offset clamping (equations 2–3 of §5.4).""" + + def _alg_with_cbr(self, cbr_its_s: float) -> DccAdaptive: + """Return an algorithm whose cbr_its_s is already set to cbr_its_s.""" + # Inject the value directly to isolate step 2 behaviour. + alg = DccAdaptive() + alg.cbr_its_s = cbr_its_s + return alg + + def test_positive_diff_uses_equation_2(self): + """ + When cbr_its_s < cbr_target: delta_offset = min(β*(target-cbr), upmax). + With cbr_its_s=0.0, diff=0.68, β*diff=0.000816 > delta_up_max=0.0005 + → clamped to 0.0005. + """ + alg = self._alg_with_cbr(0.0) + alg.update(cbr_local=0.0, cbr_local_previous=0.0) + # After update, delta should have been incremented by delta_up_max + # delta = (1-0.016)*delta_min + 0.0005 + expected_delta = (1 - 0.016) * 0.0006 + 0.0005 + self.assertAlmostEqual(alg.delta, expected_delta, places=10) + + def test_negative_diff_uses_equation_3(self): + """ + When cbr_its_s > cbr_target: delta_offset = max(β*(target-cbr), downmax). + With cbr_its_s=1.0, diff=-0.32, β*diff=-0.000384 > delta_down_max=-0.00025 + → delta_offset=-0.000384, which is between downmax and 0. + """ + alg = self._alg_with_cbr(1.0) + initial_delta = alg.delta + alg.update(cbr_local=1.0, cbr_local_previous=1.0) + # cbr_its_s after step 1: 0.5*1.0 + 0.5*(1.0+1.0)/2 = 1.0 + # diff = 0.68 - 1.0 = -0.32; beta*diff = -0.000384; downmax=-0.00025 + # Since -0.000384 < -0.00025, delta_offset = -0.00025 + expected_delta = (1 - 0.016) * initial_delta + (-0.00025) + expected_delta = max(0.0006, min(0.03, expected_delta)) + self.assertAlmostEqual(alg.delta, expected_delta, places=10) + + def test_small_positive_diff_not_clamped(self): + """ + A small positive difference produces delta_offset = β*diff (no clamping). + With cbr_its_s=0.679, diff=0.001, β*diff=0.0000012 < upmax=0.0005 + → offset=0.0000012 (not clamped). + """ + p = DccAdaptiveParameters() + alg = DccAdaptive(parameters=p) + alg.cbr_its_s = 0.679 + alg.delta = 0.01 # known value to simplify assertion + alg.update(cbr_local=0.679, cbr_local_previous=0.679) + # step 1: cbr_its_s = 0.5*0.679 + 0.5*0.679 = 0.679 + # step 2: diff=0.001, offset=0.0000012 + # step 3: delta = (1-0.016)*0.01 + 0.0000012 ≈ 0.0098412 + diff = 0.68 - 0.679 + expected_offset = p.beta * diff + expected_delta = (1 - p.alpha) * 0.01 + expected_offset + self.assertAlmostEqual(alg.delta, expected_delta, places=10) + + def test_large_negative_diff_clamped_to_down_max(self): + """ + A large negative diff is clamped to delta_down_max. + cbr_its_s set to 0.99, diff=-0.31, β*diff=-0.000372 < downmax=-0.00025 + → delta_offset = -0.00025. + """ + alg = DccAdaptive() + alg.cbr_its_s = 0.99 + alg.delta = 0.02 + alg.update(cbr_local=0.99, cbr_local_previous=0.99) + # cbr_its_s after step1: 0.5*0.99 + 0.5*0.99 = 0.99 + # diff=0.68-0.99=-0.31; beta*diff=-0.000372; clamped to -0.00025 + expected_delta = (1 - 0.016) * 0.02 + (-0.00025) + self.assertAlmostEqual(alg.delta, expected_delta, places=10) + + +class TestDccAdaptiveSteps3to5(unittest.TestCase): + """Steps 3–5: exponential filter and clamping.""" + + def test_step3_exponential_filter(self): + """delta = (1-α)*delta + delta_offset.""" + alg = DccAdaptive() + alg.cbr_its_s = 0.68 # exactly at target → diff=0, offset=0 + alg.delta = 0.01 + alg.update(cbr_local=0.68, cbr_local_previous=0.68) + # offset will be 0 (diff=0 → positive branch → min(0, upmax)=0) + expected = (1 - 0.016) * 0.01 + 0.0 + self.assertAlmostEqual(alg.delta, expected, places=10) + + def test_step4_clamps_delta_to_max(self): + """delta greater than delta_max is clamped to delta_max.""" + alg = DccAdaptive() + alg.delta = 0.029 # close to max + alg.cbr_its_s = 0.0 # far below target → large positive offset + alg.update(cbr_local=0.0, cbr_local_previous=0.0) + self.assertLessEqual(alg.delta, alg.parameters.delta_max) + + def test_step5_clamps_delta_to_min(self): + """delta less than delta_min is clamped to delta_min.""" + alg = DccAdaptive() + alg.delta = alg.parameters.delta_min # at floor + alg.cbr_its_s = 1.0 # far above target → large negative offset + # Force cbr_its_s to stay at 1.0 so step 1 keeps it there + alg.update(cbr_local=1.0, cbr_local_previous=1.0) + self.assertGreaterEqual(alg.delta, alg.parameters.delta_min) + + def test_delta_converges_toward_target(self): + """ + After many evaluations at exactly cbr_target, delta stabilises + within the permitted range. + """ + alg = DccAdaptive() + cbr = 0.68 + for _ in range(500): + alg.update(cbr_local=cbr, cbr_local_previous=cbr) + self.assertGreaterEqual(alg.delta, alg.parameters.delta_min) + self.assertLessEqual(alg.delta, alg.parameters.delta_max) + + def test_delta_increases_when_cbr_below_target(self): + """ + Sustained low CBR causes delta to increase toward delta_max over time. + """ + alg = DccAdaptive() + prev_delta = alg.delta + for _ in range(50): + alg.update(cbr_local=0.0, cbr_local_previous=0.0) + self.assertGreater(alg.delta, prev_delta) + + def test_delta_decreases_when_cbr_above_target(self): + """ + Sustained high CBR causes delta to decrease toward delta_min. + """ + alg = DccAdaptive() + alg.delta = 0.02 # start above min + for _ in range(200): + alg.update(cbr_local=1.0, cbr_local_previous=1.0) + self.assertEqual(alg.delta, alg.parameters.delta_min) + + +class TestDccAdaptiveValidation(unittest.TestCase): + """Input validation tests for DccAdaptive.update.""" + + def test_cbr_local_below_zero_raises(self): + alg = DccAdaptive() + with self.assertRaises(ValueError): + alg.update(cbr_local=-0.01, cbr_local_previous=0.5) + + def test_cbr_local_above_one_raises(self): + alg = DccAdaptive() + with self.assertRaises(ValueError): + alg.update(cbr_local=1.01, cbr_local_previous=0.5) + + def test_cbr_local_previous_below_zero_raises(self): + alg = DccAdaptive() + with self.assertRaises(ValueError): + alg.update(cbr_local=0.5, cbr_local_previous=-0.01) + + def test_cbr_local_previous_above_one_raises(self): + alg = DccAdaptive() + with self.assertRaises(ValueError): + alg.update(cbr_local=0.5, cbr_local_previous=1.01) + + def test_boundary_values_valid(self): + alg = DccAdaptive() + alg.update(cbr_local=0.0, cbr_local_previous=0.0) + alg.update(cbr_local=1.0, cbr_local_previous=1.0) + + +# --------------------------------------------------------------------------- +# GateKeeper tests +# --------------------------------------------------------------------------- + +class TestGateKeeperInit(unittest.TestCase): + """Tests for GateKeeper initialisation.""" + + def test_gate_is_open_initially(self): + """Before any packet is admitted the gate is open.""" + gk = GateKeeper(delta=0.01) + self.assertTrue(gk.is_open(t=0.0)) + + def test_gate_is_open_at_any_time_initially(self): + """is_open() returns True at any time before first admission.""" + gk = GateKeeper(delta=0.01) + for t in [0.0, 100.0, -1.0]: + self.assertTrue(gk.is_open(t=t)) + + +class TestGateKeeperAdmit(unittest.TestCase): + """Tests for GateKeeper.admit_packet (equation B.1).""" + + def test_first_packet_admitted(self): + """The first packet is always admitted when the gate is open.""" + gk = GateKeeper(delta=0.01) + self.assertTrue(gk.admit_packet(t=0.0, t_on=0.001)) + + def test_gate_closed_after_admission(self): + """Gate closes immediately after a packet is admitted.""" + gk = GateKeeper(delta=0.01) + gk.admit_packet(t=0.0, t_on=0.001) + self.assertFalse(gk.is_open(t=0.0)) + + def test_second_packet_rejected_when_gate_closed(self): + """A second admission attempt while the gate is closed is rejected.""" + gk = GateKeeper(delta=0.01) + gk.admit_packet(t=0.0, t_on=0.001) + self.assertFalse(gk.admit_packet(t=0.0, t_on=0.001)) + + def test_gate_opens_after_t_go(self): + """ + Gate opens exactly at t_go = t_pg + t_on/delta. + delta=0.01, t_on=0.001 → interval=0.1 s ≥ 0.025 → t_go=0.1. + """ + gk = GateKeeper(delta=0.01) + gk.admit_packet(t=0.0, t_on=0.001) # t_go = 0.0 + 0.1 = 0.1 + self.assertFalse(gk.is_open(t=0.099)) + self.assertTrue(gk.is_open(t=0.1)) + + def test_minimum_interval_enforced(self): + """ + t_on/delta < 0.025 s → interval clamped to 0.025 s. + delta=1.0, t_on=0.001 → t_on/delta=0.001 < 0.025 → t_go=0.025. + """ + gk = GateKeeper(delta=1.0) + gk.admit_packet(t=0.0, t_on=0.001) + self.assertFalse(gk.is_open(t=0.024)) + self.assertTrue(gk.is_open(t=0.025)) + + def test_maximum_interval_enforced(self): + """ + t_on/delta > 1 s → interval clamped to 1.0 s. + delta=0.0006, t_on=0.001 → t_on/delta ≈ 1.667 > 1.0 → t_go=1.0. + """ + gk = GateKeeper(delta=0.0006) + gk.admit_packet(t=0.0, t_on=0.001) + self.assertFalse(gk.is_open(t=0.99)) + self.assertTrue(gk.is_open(t=1.0)) + + def test_t_on_zero_raises(self): + gk = GateKeeper(delta=0.01) + with self.assertRaises(ValueError): + gk.admit_packet(t=0.0, t_on=0.0) + + def test_t_on_negative_raises(self): + gk = GateKeeper(delta=0.01) + with self.assertRaises(ValueError): + gk.admit_packet(t=0.0, t_on=-0.001) + + +class TestGateKeeperUpdateDelta(unittest.TestCase): + """Tests for GateKeeper.update_delta (equation B.2).""" + + def test_update_delta_when_gate_open_only_changes_delta(self): + """ + When the gate is open, update_delta only changes the stored delta + without rescheduling t_go. + """ + gk = GateKeeper(delta=0.01) + # Gate is open (no packet admitted) → no rescheduling + gk.update_delta(t=0.0, delta_new=0.02) + # Gate should still be open + self.assertTrue(gk.is_open(t=0.0)) + + def test_update_delta_rescales_interval_when_delta_increases(self): + """ + Increasing delta reduces the remaining gate-closed interval per B.2. + Original: delta=0.01, t_on=0.001 → interval=0.1 s. + New delta=0.02 → new_interval = (0.01/0.02)*0.1 = 0.05 s. + """ + gk = GateKeeper(delta=0.01) + gk.admit_packet(t=0.0, t_on=0.001) # t_go = 0.1 + gk.update_delta(t=0.0, delta_new=0.02) # t_go → 0.05 + self.assertFalse(gk.is_open(t=0.049)) + self.assertTrue(gk.is_open(t=0.05)) + + def test_update_delta_rescales_interval_when_delta_decreases(self): + """ + Decreasing delta increases the remaining gate-closed interval per B.2. + Original: delta=0.02, t_on=0.001 → interval=0.05 s. + New delta=0.01 → new_interval = (0.02/0.01)*0.05 = 0.1 s. + """ + gk = GateKeeper(delta=0.02) + gk.admit_packet(t=0.0, t_on=0.001) # t_go = 0.05 + gk.update_delta(t=0.0, delta_new=0.01) # t_go → 0.1 + self.assertFalse(gk.is_open(t=0.099)) + self.assertTrue(gk.is_open(t=0.1)) + + def test_update_delta_minimum_interval_enforced(self): + """ + New interval below 0.025 s is clamped to 0.025 s in B.2. + Original: delta=0.01, interval=0.1. + New delta=0.1 → (0.01/0.1)*0.1 = 0.01 < 0.025 → clamped to 0.025. + """ + gk = GateKeeper(delta=0.01) + gk.admit_packet(t=0.0, t_on=0.001) + gk.update_delta(t=0.0, delta_new=0.1) + self.assertFalse(gk.is_open(t=0.024)) + self.assertTrue(gk.is_open(t=0.025)) + + def test_update_delta_maximum_interval_enforced(self): + """ + New interval above 1.0 s is clamped to 1.0 s in B.2. + Original: delta=0.02, interval=0.05. + New delta=0.0001 → (0.02/0.0001)*0.05 = 10 > 1.0 → clamped to 1.0. + """ + gk = GateKeeper(delta=0.02) + gk.admit_packet(t=0.0, t_on=0.001) + gk.update_delta(t=0.0, delta_new=0.0001) + self.assertFalse(gk.is_open(t=0.99)) + self.assertTrue(gk.is_open(t=1.0)) + + def test_update_delta_zero_raises(self): + gk = GateKeeper(delta=0.01) + with self.assertRaises(ValueError): + gk.update_delta(t=0.0, delta_new=0.0) + + def test_update_delta_negative_raises(self): + gk = GateKeeper(delta=0.01) + with self.assertRaises(ValueError): + gk.update_delta(t=0.0, delta_new=-0.01) + + def test_update_after_gate_reopens_no_rescheduling(self): + """ + If the gate has already opened by the time update_delta is called, + only the stored delta value changes (no rescheduling needed). + """ + gk = GateKeeper(delta=0.01) + gk.admit_packet(t=0.0, t_on=0.001) # t_go = 0.1 + # Advance time past t_go so the gate is now open + gk.update_delta(t=0.5, delta_new=0.005) + # Gate should still be open + self.assertTrue(gk.is_open(t=0.5)) + + +class TestGateKeeperAndAdaptiveIntegration(unittest.TestCase): + """Integration: GateKeeper paces transmissions driven by DccAdaptive.""" + + def test_gate_keeper_uses_updated_delta(self): + """ + After update_delta, subsequent admissions use the new delta to compute + t_go per equation B.1. + """ + gk = GateKeeper(delta=0.01) + gk.admit_packet(t=0.0, t_on=0.001) # t_go = 0.1 (first admission) + + # Gate opens at 0.1; admit a second packet + self.assertTrue(gk.is_open(t=0.1)) + gk.update_delta(t=0.1, delta_new=0.02) # delta now 0.02 + gk.admit_packet(t=0.1, t_on=0.001) # t_go = 0.1 + 0.001/0.02 = 0.1+0.05=0.15 + self.assertFalse(gk.is_open(t=0.14)) + self.assertTrue(gk.is_open(t=0.15)) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/flexstack/management/test_dcc_reactive.py b/tests/flexstack/management/test_dcc_reactive.py new file mode 100644 index 0000000..43ce4ff --- /dev/null +++ b/tests/flexstack/management/test_dcc_reactive.py @@ -0,0 +1,266 @@ +import unittest + +from flexstack.management.dcc_reactive import ( + DccReactive, + DccReactiveOutput, + DccState, +) + + +class TestDccReactiveInit(unittest.TestCase): + """Tests for DccReactive initialisation.""" + + def test_initial_state_is_relaxed(self): + """Algorithm starts in the RELAXED state.""" + dcc = DccReactive() + self.assertEqual(dcc.state, DccState.RELAXED) + + def test_table_a1_selected_by_default(self): + """t_on_max_us=1000 (default) selects Table A.1 (10 Hz relaxed rate).""" + dcc = DccReactive(t_on_max_us=1000) + out = dcc.update(cbr=0.0) + self.assertEqual(out.packet_rate_hz, 10.0) + self.assertEqual(out.t_off_ms, 100.0) + + def test_table_a2_selected_for_500us(self): + """t_on_max_us=500 selects Table A.2 (20 Hz relaxed rate).""" + dcc = DccReactive(t_on_max_us=500) + out = dcc.update(cbr=0.0) + self.assertEqual(out.packet_rate_hz, 20.0) + self.assertEqual(out.t_off_ms, 50.0) + + def test_table_a2_selected_for_below_500us(self): + """t_on_max_us < 500 also selects Table A.2.""" + dcc = DccReactive(t_on_max_us=250) + out = dcc.update(cbr=0.0) + self.assertEqual(out.packet_rate_hz, 20.0) + + +class TestDccReactiveTableA1(unittest.TestCase): + """Tests for the Table A.1 parameter set (T_on ≤ 1 ms).""" + + def _dcc(self) -> DccReactive: + return DccReactive(t_on_max_us=1000) + + # ------------------------------------------------------------------ + # Output parameter correctness per state + # ------------------------------------------------------------------ + + def test_relaxed_output_params(self): + """RELAXED → 10 Hz, 100 ms T_off (Table A.1).""" + dcc = self._dcc() + out = dcc.update(cbr=0.0) + self.assertEqual(out.state, DccState.RELAXED) + self.assertEqual(out.packet_rate_hz, 10.0) + self.assertEqual(out.t_off_ms, 100.0) + + def test_active1_output_params(self): + """ACTIVE_1 → 5 Hz, 200 ms T_off (Table A.1).""" + dcc = self._dcc() + out = dcc.update(cbr=0.35) + self.assertEqual(out.state, DccState.ACTIVE_1) + self.assertEqual(out.packet_rate_hz, 5.0) + self.assertEqual(out.t_off_ms, 200.0) + + def test_active2_output_params(self): + """ACTIVE_2 → 2.5 Hz, 400 ms T_off (Table A.1).""" + dcc = self._dcc() + dcc.update(cbr=0.45) # Relaxed → Active1 + out = dcc.update(cbr=0.45) # Active1 → Active2 + self.assertEqual(out.state, DccState.ACTIVE_2) + self.assertEqual(out.packet_rate_hz, 2.5) + self.assertEqual(out.t_off_ms, 400.0) + + def test_active3_output_params(self): + """ACTIVE_3 → 2 Hz, 500 ms T_off (Table A.1).""" + dcc = self._dcc() + # Step up: Relaxed→A1→A2→A3 + for _ in range(3): + dcc.update(cbr=0.55) + out = dcc.update(cbr=0.55) + self.assertEqual(out.state, DccState.ACTIVE_3) + self.assertEqual(out.packet_rate_hz, 2.0) + self.assertEqual(out.t_off_ms, 500.0) + + def test_restrictive_output_params(self): + """RESTRICTIVE → 1 Hz, 1000 ms T_off (Table A.1).""" + dcc = self._dcc() + # Step up to Restrictive (4 steps needed from Relaxed) + for _ in range(4): + dcc.update(cbr=0.65) + out = dcc.update(cbr=0.65) + self.assertEqual(out.state, DccState.RESTRICTIVE) + self.assertEqual(out.packet_rate_hz, 1.0) + self.assertEqual(out.t_off_ms, 1000.0) + + # ------------------------------------------------------------------ + # Transition correctness + # ------------------------------------------------------------------ + + def test_low_cbr_stays_relaxed(self): + """Repeated low-CBR evaluations keep the state RELAXED.""" + dcc = self._dcc() + for _ in range(5): + out = dcc.update(cbr=0.10) + self.assertEqual(out.state, DccState.RELAXED) + + def test_cbr_at_lower_threshold_triggers_active1(self): + """CBR exactly at 0.30 transitions RELAXED → ACTIVE_1.""" + dcc = self._dcc() + out = dcc.update(cbr=0.30) + self.assertEqual(out.state, DccState.ACTIVE_1) + + def test_adjacent_only_from_relaxed_to_active1(self): + """ + From RELAXED, even a very high CBR advances only one step to ACTIVE_1 + (adjacency constraint of §5.3). + """ + dcc = self._dcc() + out = dcc.update(cbr=0.99) + self.assertEqual(out.state, DccState.ACTIVE_1) + + def test_two_steps_from_relaxed_to_active2(self): + """Two consecutive high-CBR evaluations from RELAXED reach ACTIVE_2.""" + dcc = self._dcc() + dcc.update(cbr=0.55) # Relaxed → Active1 + out = dcc.update(cbr=0.55) # Active1 → Active2 + self.assertEqual(out.state, DccState.ACTIVE_2) + + def test_step_down_from_active1_to_relaxed(self): + """Low CBR from ACTIVE_1 steps down to RELAXED.""" + dcc = self._dcc() + dcc.update(cbr=0.35) # → Active1 + out = dcc.update(cbr=0.10) # → Relaxed + self.assertEqual(out.state, DccState.RELAXED) + + def test_same_state_when_cbr_in_band(self): + """No transition when CBR stays within the current state's band.""" + dcc = self._dcc() + dcc.update(cbr=0.35) # → Active1 + out = dcc.update(cbr=0.32) # still in Active1 band + self.assertEqual(out.state, DccState.ACTIVE_1) + + def test_upward_then_downward_transition_sequence(self): + """Full round-trip: Relaxed → A1 → A2 → A1 → Relaxed.""" + dcc = self._dcc() + states = [] + for cbr in [0.45, 0.45, 0.10, 0.10]: + states.append(dcc.update(cbr=cbr).state) + self.assertEqual(states, [ + DccState.ACTIVE_1, + DccState.ACTIVE_2, + DccState.ACTIVE_1, + DccState.RELAXED, + ]) + + def test_cannot_skip_states_going_up(self): + """No state is ever skipped regardless of CBR magnitude.""" + dcc = self._dcc() + previous_idx = 0 + for _ in range(6): + out = dcc.update(cbr=1.0) # always maximum CBR + current_idx = list(DccState).index(out.state) + self.assertLessEqual(current_idx - previous_idx, 1) + previous_idx = current_idx + + def test_cannot_skip_states_going_down(self): + """No state is ever skipped when descending.""" + dcc = self._dcc() + # Drive to Restrictive + for _ in range(5): + dcc.update(cbr=1.0) + previous_idx = list(DccState).index(dcc.state) + for _ in range(6): + out = dcc.update(cbr=0.0) # always minimum CBR + current_idx = list(DccState).index(out.state) + self.assertGreaterEqual(previous_idx - current_idx, 0) + self.assertLessEqual(previous_idx - current_idx, 1) + previous_idx = current_idx + + def test_cbr_boundary_at_060_table_a1(self): + """CBR = 0.60 (Table A.1) should target RESTRICTIVE (>= 0.60).""" + dcc = self._dcc() + # Drive to Active3 first + for _ in range(3): + dcc.update(cbr=0.65) + out = dcc.update(cbr=0.60) + self.assertEqual(out.state, DccState.RESTRICTIVE) + + +class TestDccReactiveTableA2(unittest.TestCase): + """Tests for Table A.2 parameter set (T_on ≤ 500 µs).""" + + def _dcc(self) -> DccReactive: + return DccReactive(t_on_max_us=500) + + def test_relaxed_output_params_table_a2(self): + """RELAXED → 20 Hz, 50 ms T_off (Table A.2).""" + dcc = self._dcc() + out = dcc.update(cbr=0.10) + self.assertEqual(out.state, DccState.RELAXED) + self.assertEqual(out.packet_rate_hz, 20.0) + self.assertEqual(out.t_off_ms, 50.0) + + def test_active3_output_params_table_a2(self): + """ACTIVE_3 → 4 Hz, 250 ms T_off (Table A.2).""" + dcc = self._dcc() + for _ in range(3): + dcc.update(cbr=0.60) + out = dcc.update(cbr=0.60) + self.assertEqual(out.state, DccState.ACTIVE_3) + self.assertEqual(out.packet_rate_hz, 4.0) + self.assertEqual(out.t_off_ms, 250.0) + + def test_restrictive_threshold_table_a2(self): + """Table A.2 RESTRICTIVE threshold is 0.65 (not 0.60).""" + dcc = self._dcc() + # Drive to Active3 (0.60 is still in A3 band for Table A.2) + for _ in range(3): + dcc.update(cbr=0.70) + out = dcc.update(cbr=0.65) + self.assertEqual(out.state, DccState.RESTRICTIVE) + + +class TestDccReactiveValidation(unittest.TestCase): + """Tests for input validation.""" + + def test_cbr_below_zero_raises(self): + dcc = DccReactive() + with self.assertRaises(ValueError): + dcc.update(cbr=-0.01) + + def test_cbr_above_one_raises(self): + dcc = DccReactive() + with self.assertRaises(ValueError): + dcc.update(cbr=1.01) + + def test_cbr_zero_valid(self): + dcc = DccReactive() + out = dcc.update(cbr=0.0) + self.assertEqual(out.state, DccState.RELAXED) + + def test_cbr_one_valid(self): + dcc = DccReactive() + out = dcc.update(cbr=1.0) + self.assertEqual(out.state, DccState.ACTIVE_1) + + +class TestDccReactiveOutput(unittest.TestCase): + """Tests for DccReactiveOutput dataclass.""" + + def test_output_is_frozen(self): + """DccReactiveOutput instances are immutable.""" + out = DccReactiveOutput( + state=DccState.RELAXED, packet_rate_hz=10.0, t_off_ms=100.0 + ) + with self.assertRaises(Exception): + out.state = DccState.ACTIVE_1 # type: ignore[misc] + + def test_output_equality(self): + a = DccReactiveOutput(DccState.ACTIVE_1, 5.0, 200.0) + b = DccReactiveOutput(DccState.ACTIVE_1, 5.0, 200.0) + self.assertEqual(a, b) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/flexstack/security/test_certificate.py b/tests/flexstack/security/test_certificate.py index c579a2c..899478d 100644 --- a/tests/flexstack/security/test_certificate.py +++ b/tests/flexstack/security/test_certificate.py @@ -17,7 +17,7 @@ def test__init__(self): def test_from_dict(self): # Given - certificate_dict = {'version': 3, 'type': 'explicit', 'issuer': ('self', 'sha256'), 'toBeSigned': {'id': ('name', 'root'), 'cracaId': b'\xa4\x95\x99', 'crlSeries': 0, 'validityPeriod': {'start': 0, 'duration': ('seconds', 30)}, 'certIssuePermissions': [{'subjectPermissions': ('all', None), 'minChainLength': 2, 'chainLengthRange': 0, 'eeType': (b'\x00', 1)}], 'verifyKeyIndicator': ('verificationKey', ('ecdsaNistP256', ('uncompressedP256', { + certificate_dict = {'version': 3, 'type': 'explicit', 'issuer': ('self', 'sha256'), 'toBeSigned': {'id': ('name', 'root'), 'cracaId': b'\x00\x00\x00', 'crlSeries': 0, 'validityPeriod': {'start': 0, 'duration': ('seconds', 30)}, 'certIssuePermissions': [{'subjectPermissions': ('all', None), 'minChainLength': 2, 'chainLengthRange': 0, 'eeType': (b'\x00', 1)}], 'verifyKeyIndicator': ('verificationKey', ('ecdsaNistP256', ('uncompressedP256', { 'x': b'\xbc\x0b\x0e\xd4\xd1\rRY\xa7\xb9\xff@\x89\xb9\xbc\xf0\x16)\x9b\xed\xa3Ni\x19\x06\xc6\xa3VG\x92\xdd^', 'y': b'\xfd\xd8\xca\x19\xa8xO\xae\xc9\xcd\xcc\xfa2@\x87\x07\x8b\xaf\xb9\x9d\xbdp\xe0\r"E\xd3FEx\xfbj'})))}, 'signature': ('ecdsaNistP256Signature', {'rSig': ('x-only', b"\x89\x03>\x04'\xdd\xd0W\xb5\xf2\xda\x9b\xcbY\x10p\x94\xd1}\xfcD\x15\xb6\xfb\x12\rd\x7f\x9cj\xc4\xb7"), 'sSig': b'8li\n\xa1e\xef\xb8\xa9\n\xb0\x8a\xd4A\x8f\xfb\x10\xb3\x06\x13|_j\x14\xda-\xce\xa9&r\xd9\x9c'})} # When cert = Certificate.from_dict(certificate_dict) @@ -55,7 +55,7 @@ def test_as_clear_certificate(self): "issuer": ("sha256AndDigest", (0xa495991b7852b855).to_bytes(8, byteorder='big')), "toBeSigned": { "id": ("name", "i2cat.net"), - "cracaId": (0xa49599).to_bytes(3, byteorder='big'), + "cracaId": b'\x00\x00\x00', "crlSeries": 0, "validityPeriod": { "start": 0, @@ -200,7 +200,7 @@ def test_verify(self): backend = self.backend to_be_signed = { "id": ("name", "root"), - "cracaId": (0xa49599).to_bytes(3, byteorder='big'), + "cracaId": b'\x00\x00\x00', "crlSeries": 0, "validityPeriod": {"start": 0, "duration": ("seconds", 30)}, "certIssuePermissions": [ @@ -217,6 +217,141 @@ def test_verify(self): backend, to_be_signed) self.assertTrue(root_certificate.verify(backend)) + # ------------------------------------------------------------------ §7.2 + def _make_cert(self, issuer, id_choice, tbs_extras=None): + """Helper: build a Certificate dict for profile tests.""" + tbs = { + "id": id_choice, + "cracaId": b'\x00\x00\x00', + "crlSeries": 0, + "validityPeriod": {"start": 0, "duration": ("seconds", 30)}, + "appPermissions": [{"psid": 36}], + "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", ("fill", None))), + } + if tbs_extras: + tbs.update(tbs_extras) + return Certificate(certificate={ + "version": 3, + "type": "explicit", + "issuer": issuer, + "toBeSigned": tbs, + "signature": ("ecdsaNistP256Signature", {"rSig": ("fill", None), "sSig": b'\x00' * 32}), + }) + + def test_is_authorization_ticket_valid(self): + """§7.2.1: issued cert, id=none, no certIssuePermissions → True.""" + cert = self._make_cert( + issuer=("sha256AndDigest", b'\x00' * 8), + id_choice=("none", None), + ) + self.assertTrue(cert.is_authorization_ticket()) + + def test_is_authorization_ticket_rejects_self_signed(self): + """§7.2.1: self-signed issuer is not an AT.""" + cert = self._make_cert( + issuer=("self", "sha256"), + id_choice=("none", None), + ) + self.assertFalse(cert.is_authorization_ticket()) + + def test_is_authorization_ticket_rejects_id_name(self): + """§7.2.1: id=name is not an AT (must be none).""" + cert = self._make_cert( + issuer=("sha256AndDigest", b'\x00' * 8), + id_choice=("name", "some-name"), + ) + self.assertFalse(cert.is_authorization_ticket()) + + def test_is_authorization_ticket_rejects_cert_issue_permissions(self): + """§7.2.1: certIssuePermissions SHALL be absent in an AT.""" + cert = self._make_cert( + issuer=("sha256AndDigest", b'\x00' * 8), + id_choice=("none", None), + tbs_extras={"certIssuePermissions": [{"subjectPermissions": ("all", None), "minChainLength": 1, "chainLengthRange": 0, "eeType": (b'\x00', 1)}]}, + ) + self.assertFalse(cert.is_authorization_ticket()) + + def test_is_enrolment_credential_valid(self): + """§7.2.2: explicit, issued, id=name, no certIssuePermissions → True.""" + cert = self._make_cert( + issuer=("sha256AndDigest", b'\x00' * 8), + id_choice=("name", "ec-name"), + ) + self.assertTrue(cert.is_enrolment_credential()) + + def test_is_enrolment_credential_rejects_id_none(self): + """§7.2.2: id=none is not an EC (must be name).""" + cert = self._make_cert( + issuer=("sha256AndDigest", b'\x00' * 8), + id_choice=("none", None), + ) + self.assertFalse(cert.is_enrolment_credential()) + + def test_is_enrolment_credential_rejects_cert_issue_permissions(self): + """§7.2.2: certIssuePermissions SHALL be absent in an EC.""" + cert = self._make_cert( + issuer=("sha256AndDigest", b'\x00' * 8), + id_choice=("name", "ec-name"), + tbs_extras={"certIssuePermissions": [{"subjectPermissions": ("all", None), "minChainLength": 1, "chainLengthRange": 0, "eeType": (b'\x00', 1)}]}, + ) + self.assertFalse(cert.is_enrolment_credential()) + + def test_is_root_ca_certificate_valid(self): + """§7.2.3: explicit, self-signed, id=name, certIssuePermissions + appPermissions present → True.""" + cert = self._make_cert( + issuer=("self", "sha256"), + id_choice=("name", "root-ca"), + tbs_extras={"certIssuePermissions": [{"subjectPermissions": ("all", None), "minChainLength": 2, "chainLengthRange": 0, "eeType": (b'\x00', 1)}]}, + ) + self.assertTrue(cert.is_root_ca_certificate()) + + def test_is_root_ca_certificate_rejects_issued(self): + """§7.2.3: issued cert (sha256AndDigest) is not a Root CA.""" + cert = self._make_cert( + issuer=("sha256AndDigest", b'\x00' * 8), + id_choice=("name", "root-ca"), + tbs_extras={"certIssuePermissions": [{"subjectPermissions": ("all", None), "minChainLength": 2, "chainLengthRange": 0, "eeType": (b'\x00', 1)}]}, + ) + self.assertFalse(cert.is_root_ca_certificate()) + + def test_is_root_ca_certificate_rejects_missing_cert_issue_permissions(self): + """§7.2.3: certIssuePermissions SHALL be present in Root CA.""" + cert = self._make_cert( + issuer=("self", "sha256"), + id_choice=("name", "root-ca"), + ) + self.assertFalse(cert.is_root_ca_certificate()) + + def test_is_subordinate_ca_certificate_valid(self): + """§7.2.4: explicit, issued, id=name, encryptionKey + certIssuePermissions present → True.""" + cert = self._make_cert( + issuer=("sha256AndDigest", b'\x00' * 8), + id_choice=("name", "sub-ca"), + tbs_extras={ + "encryptionKey": {"supportedSymmAlg": "aes128Ccm", "publicKey": ("eciesNistP256", ("x-only", b'\x00' * 32))}, + "certIssuePermissions": [{"subjectPermissions": ("all", None), "minChainLength": 1, "chainLengthRange": 0, "eeType": (b'\x00', 1)}], + }, + ) + self.assertTrue(cert.is_subordinate_ca_certificate()) + + def test_is_subordinate_ca_certificate_rejects_missing_encryption_key(self): + """§7.2.4: encryptionKey SHALL be present in a subordinate CA.""" + cert = self._make_cert( + issuer=("sha256AndDigest", b'\x00' * 8), + id_choice=("name", "sub-ca"), + tbs_extras={"certIssuePermissions": [{"subjectPermissions": ("all", None), "minChainLength": 1, "chainLengthRange": 0, "eeType": (b'\x00', 1)}]}, + ) + self.assertFalse(cert.is_subordinate_ca_certificate()) + + def test_is_subordinate_ca_certificate_rejects_missing_cert_issue_permissions(self): + """§7.2.4: certIssuePermissions SHALL be present in a subordinate CA.""" + cert = self._make_cert( + issuer=("sha256AndDigest", b'\x00' * 8), + id_choice=("name", "sub-ca"), + tbs_extras={"encryptionKey": {"supportedSymmAlg": "aes128Ccm", "publicKey": ("eciesNistP256", ("x-only", b'\x00' * 32))}}, + ) + self.assertFalse(cert.is_subordinate_ca_certificate()) + class TestOwnCertificate(unittest.TestCase): def setUp(self) -> None: @@ -232,7 +367,7 @@ def test_initialize_certificate(self): # Given to_be_signed = { "id": ("name", "test.com"), - "cracaId": (0xa49599).to_bytes(3, byteorder='big'), + "cracaId": b'\x00\x00\x00', "crlSeries": 0, "validityPeriod": { "start": 0, @@ -265,7 +400,7 @@ def test_verify_to_be_signed_certificate(self): # Given acceptable_to_be_signed = { "id": ("name", "i2cat.net"), - "cracaId": (0xa49599).to_bytes(3, byteorder='big'), + "cracaId": b'\x00\x00\x00', "crlSeries": 0, "validityPeriod": { "start": 0, @@ -285,7 +420,7 @@ def test_verify_to_be_signed_certificate(self): "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", ("fill", None))) } bad_to_be_signed = { - "cracaId": (0xa49599).to_bytes(3, byteorder='big'), + "cracaId": b'\x00\x00\x00', "crlSeries": 0, "validityPeriod": { "start": 0, @@ -296,6 +431,38 @@ def test_verify_to_be_signed_certificate(self): }], "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", ("fill", None))) } + bad_wrong_cracaid = { + "id": ("name", "test"), + "cracaId": b'\xa4\x95\x99', + "crlSeries": 0, + "validityPeriod": {"start": 0, "duration": ("seconds", 30)}, + "appPermissions": [{"psid": 0}], + "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", ("fill", None))) + } + bad_wrong_id_choice = { + "id": ("linkageId", {"iCert": b'\x00\x01\x02\x03\x04\x05\x06\x07', "linkage-value": b'\x00\x01\x02\x03\x04\x05\x06\x07\x08'}), + "cracaId": b'\x00\x00\x00', + "crlSeries": 0, + "validityPeriod": {"start": 0, "duration": ("seconds", 30)}, + "appPermissions": [{"psid": 0}], + "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", ("fill", None))) + } + bad_no_permissions = { + "id": ("name", "test"), + "cracaId": b'\x00\x00\x00', + "crlSeries": 0, + "validityPeriod": {"start": 0, "duration": ("seconds", 30)}, + "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", ("fill", None))) + } + bad_has_cert_request_permissions = { + "id": ("name", "test"), + "cracaId": b'\x00\x00\x00', + "crlSeries": 0, + "validityPeriod": {"start": 0, "duration": ("seconds", 30)}, + "appPermissions": [{"psid": 0}], + "certRequestPermissions": [{"subjectPermissions": ("all", None), "minChainLength": 1, "chainLengthRange": 0, "eeType": (b'\x00', 1)}], + "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", ("fill", None))) + } # When validity_good = OwnCertificate.verify_to_be_signed_certificate( acceptable_to_be_signed) @@ -304,12 +471,16 @@ def test_verify_to_be_signed_certificate(self): # Then self.assertTrue(validity_good) self.assertFalse(validity_bad) + self.assertFalse(OwnCertificate.verify_to_be_signed_certificate(bad_wrong_cracaid)) + self.assertFalse(OwnCertificate.verify_to_be_signed_certificate(bad_wrong_id_choice)) + self.assertFalse(OwnCertificate.verify_to_be_signed_certificate(bad_no_permissions)) + self.assertFalse(OwnCertificate.verify_to_be_signed_certificate(bad_has_cert_request_permissions)) def test_issue_certificate(self): # Given to_be_signed_to_issue = { "id": ("name", "root"), - "cracaId": (0xa23).to_bytes(3, byteorder='big'), + "cracaId": b'\x00\x00\x00', "crlSeries": 0, "validityPeriod": { "start": 0, @@ -330,7 +501,7 @@ def test_issue_certificate(self): to_be_signed_to_be_issued = { "id": ("name", "issued"), - "cracaId": (0xa49).to_bytes(3, byteorder='big'), + "cracaId": b'\x00\x00\x00', "crlSeries": 0, "validityPeriod": { "start": 0, @@ -350,7 +521,7 @@ def test_sign_message(self): # Given to_be_signed = { "id": ("name", "test"), - "cracaId": (0xa49599).to_bytes(3, byteorder='big'), + "cracaId": b'\x00\x00\x00', "crlSeries": 0, "validityPeriod": {"start": 0, "duration": ("seconds", 30)}, "certIssuePermissions": [ diff --git a/tests/flexstack/security/test_certificate_library.py b/tests/flexstack/security/test_certificate_library.py index 9542f37..7e3b8e6 100644 --- a/tests/flexstack/security/test_certificate_library.py +++ b/tests/flexstack/security/test_certificate_library.py @@ -314,6 +314,28 @@ def test_verify_sequence_of_certificates_three_with_known_root(self, mock_from_d [{"at": "data"}, {"aa": "data"}, {"root": "data"}], self.backend ) + def test_get_ca_certificate_by_hashedid3_found_in_aa(self): + """get_ca_certificate_by_hashedid3 finds AA cert by last 3 bytes of its HashedId8.""" + library = CertificateLibrary( + self.backend, [self.root_cert], [self.aa_cert], [] + ) + # AA hashedid8 = b'\x11\x12\x13\x14\x15\x16\x17\x18' → hashedid3 = b'\x16\x17\x18' + result = library.get_ca_certificate_by_hashedid3(b'\x16\x17\x18') + self.assertEqual(result, self.aa_cert) + + def test_get_ca_certificate_by_hashedid3_found_in_root(self): + """get_ca_certificate_by_hashedid3 finds Root CA cert by last 3 bytes of its HashedId8.""" + library = CertificateLibrary(self.backend, [self.root_cert], [], []) + # Root hashedid8 = b'\x01\x02\x03\x04\x05\x06\x07\x08' → hashedid3 = b'\x06\x07\x08' + result = library.get_ca_certificate_by_hashedid3(b'\x06\x07\x08') + self.assertEqual(result, self.root_cert) + + def test_get_ca_certificate_by_hashedid3_not_found(self): + """get_ca_certificate_by_hashedid3 returns None when no certificate matches.""" + library = CertificateLibrary(self.backend, [], [], []) + result = library.get_ca_certificate_by_hashedid3(b'\xff\xff\xff') + self.assertIsNone(result) + if __name__ == '__main__': unittest.main() diff --git a/tests/flexstack/security/test_ecdsa_backend.py b/tests/flexstack/security/test_ecdsa_backend.py index cf0d4a8..5a6ed84 100644 --- a/tests/flexstack/security/test_ecdsa_backend.py +++ b/tests/flexstack/security/test_ecdsa_backend.py @@ -147,3 +147,31 @@ def test_sign_with_sha256_3(self, sigencode_mock, from_public_point_mock, point_ data=b'whatever', hashfunc=hashlib.sha256 ) + + def test_export_signing_key(self): + """Test that export_signing_key returns a valid PEM-encoded private key.""" + identifier = self.backend.create_key() + + pem = self.backend.export_signing_key(identifier) + + self.assertIsInstance(pem, bytes) + self.assertIn(b"-----BEGIN EC PRIVATE KEY-----", pem) + + def test_import_signing_key(self): + """Test that import_signing_key loads a PEM key and returns a usable identifier.""" + # Create a key, export it, then import it under a fresh backend instance + original_id = self.backend.create_key() + pem = self.backend.export_signing_key(original_id) + + imported_id = self.backend.import_signing_key(pem) + + # The imported key must be a new distinct identifier + self.assertNotEqual(imported_id, original_id) + # Public keys derived from both identifiers must be identical + self.assertEqual( + self.backend.get_public_key(original_id), + self.backend.get_public_key(imported_id), + ) + # The imported key must be able to produce a valid signature + signature = self.backend.sign(b"test_data", imported_id) + self.assertTrue(self.backend.verify(b"test_data", signature, imported_id)) diff --git a/tests/flexstack/security/test_sign_service.py b/tests/flexstack/security/test_sign_service.py index 3b150b2..aa491c0 100644 --- a/tests/flexstack/security/test_sign_service.py +++ b/tests/flexstack/security/test_sign_service.py @@ -2,6 +2,7 @@ from unittest.mock import Mock, MagicMock, patch from flexstack.security.sn_sap import SNSIGNRequest, SNSIGNConfirm from flexstack.security.certificate import OwnCertificate +from flexstack.security.certificate_library import CertificateLibrary from flexstack.security.ecdsa_backend import ECDSABackend from flexstack.utils.time_service import TimeService from flexstack.security.sign_service import ( @@ -41,11 +42,12 @@ def test_sign(self, mock_coder): def test_set_up_signer(self): self.certificate.as_hashedid8.return_value = b"hashedid8" - self.certificate.encode.return_value = b"encoded_certificate" + cert_dict = {"version": 3} + self.certificate.certificate = cert_dict TimeService.time = MagicMock(return_value=100) signer = self.handler.set_up_signer(self.certificate) - self.assertEqual(signer, ("certificate", b"encoded_certificate")) + self.assertEqual(signer, ("certificate", [cert_dict])) TimeService.time = MagicMock(return_value=101) self.handler.last_signer_full_certificate_time = 100 @@ -56,24 +58,36 @@ def test_set_up_signer(self): self.handler.requested_own_certificate = True signer = self.handler.set_up_signer(self.certificate) - self.assertEqual(signer, ("certificate", b"encoded_certificate")) + self.assertEqual(signer, ("certificate", [cert_dict])) + self.assertFalse(self.handler.requested_own_certificate) class TestSignService(unittest.TestCase): def setUp(self): self.backend = Mock(spec=ECDSABackend) - self.sign_service = SignService(self.backend) + self.certificate_library = MagicMock(spec=CertificateLibrary) + self.certificate_library.own_certificates = {} + self.sign_service = SignService(self.backend, self.certificate_library) + + def test_init(self): + """Test that SignService initialises correctly with backend and certificate library.""" + self.assertIs(self.sign_service.ecdsa_backend, self.backend) + self.assertIs(self.sign_service.certificate_library, self.certificate_library) + self.assertEqual(self.sign_service.unknown_ats, []) + self.assertEqual(self.sign_service.requested_ats, []) def test_sign_request_not_implemented(self): - for aid in [36, 37, 137, 138, 139, 141, 540, 801, 639, 638]: - request = Mock(spec=SNSIGNRequest) - request.its_aid = aid - with self.assertRaises(NotImplementedError): - self.sign_service.sign_request(request) + """Only AID 36 (CAM/PKI) remains unimplemented via sign_request(); everything + else is handled by sign_denm() (§7.1.2) or sign_other() (§7.1.3).""" + request = Mock(spec=SNSIGNRequest) + request.its_aid = 36 + with self.assertRaises(NotImplementedError): + self.sign_service.sign_request(request) @patch('flexstack.security.sign_service.SECURITY_CODER') def test_sign_cam(self, mock_coder): + """Test that sign_cam returns a correct SNSIGNConfirm.""" request = Mock(spec=SNSIGNRequest) request.its_aid = 999 request.tbs_message = b"test_message" @@ -81,6 +95,7 @@ def test_sign_cam(self, mock_coder): present_at = Mock(spec=OwnCertificate) present_at.as_hashedid8.return_value = b"hashedid8" present_at.sign_message.return_value = b"signed_message" + present_at.certificate = {"version": 3} self.sign_service.get_present_at_for_signging = MagicMock( return_value=present_at ) @@ -95,17 +110,310 @@ def test_sign_cam(self, mock_coder): self.assertEqual(confirm.sec_message_length, len(b"encoded_signed_data")) def test_get_present_at_for_signging(self): - self.sign_service.present_ats = {1: Mock(spec=OwnCertificate)} - self.sign_service.present_ats[1].get_list_of_its_aid.return_value = [999] + """Test that get_present_at_for_signging returns the correct AT from the library.""" + mock_cert = Mock(spec=OwnCertificate) + mock_cert.get_list_of_its_aid.return_value = [999] + self.certificate_library.own_certificates = {b"hashid8": mock_cert} cert = self.sign_service.get_present_at_for_signging(999) self.assertIsNotNone(cert) - self.assertEqual(cert, self.sign_service.present_ats[1]) + self.assertEqual(cert, mock_cert) - def test_get_known_at_for_request_not_implemented(self): - with self.assertRaises(NotImplementedError): - self.sign_service.get_known_at_for_request(b"hashedid3") + def test_get_present_at_for_signging_not_found(self): + """Test that get_present_at_for_signging returns None when no matching AT exists.""" + mock_cert = Mock(spec=OwnCertificate) + mock_cert.get_list_of_its_aid.return_value = [36] + self.certificate_library.own_certificates = {b"hashid8": mock_cert} + + cert = self.sign_service.get_present_at_for_signging(999) + + self.assertIsNone(cert) + + def test_add_own_certificate(self): + """Test that add_own_certificate delegates to the certificate library.""" + mock_cert = Mock(spec=OwnCertificate) + + self.sign_service.add_own_certificate(mock_cert) + + self.certificate_library.add_own_certificate.assert_called_once_with(mock_cert) + + def test_get_known_at_for_request_not_found_raises(self): + """Test that get_known_at_for_request raises RuntimeError when cert not found.""" + self.certificate_library.get_ca_certificate_by_hashedid3.return_value = None + with self.assertRaises(RuntimeError): + self.sign_service.get_known_at_for_request(b'\x12\x34\x56') + + def test_get_known_at_for_request_found(self): + """Test that get_known_at_for_request returns the certificate dict.""" + mock_ca = MagicMock() + mock_ca.certificate = {"version": 3} + self.certificate_library.get_ca_certificate_by_hashedid3.return_value = mock_ca + result = self.sign_service.get_known_at_for_request(b'\x12\x34\x56') + self.assertEqual(result, {"version": 3}) + + def test_notify_unknown_at_adds_hashedid3(self): + """notify_unknown_at adds the last 3 bytes as HashedId3 to unknown_ats.""" + hashedid8 = b'\xaa\xbb\xcc\xdd\xee\xff\x00\x11' + self.sign_service.notify_unknown_at(hashedid8) + self.assertIn(b'\xff\x00\x11', self.sign_service.unknown_ats) + + def test_notify_unknown_at_sets_requested_flag(self): + """notify_unknown_at sets requested_own_certificate on cam_handler.""" + hashedid8 = b'\xaa\xbb\xcc\xdd\xee\xff\x00\x11' + self.sign_service.notify_unknown_at(hashedid8) + self.assertTrue(self.sign_service.cam_handler.requested_own_certificate) + + def test_notify_unknown_at_no_duplicates(self): + """notify_unknown_at does not add duplicate HashedId3 values.""" + hashedid8 = b'\xaa\xbb\xcc\xdd\xee\xff\x00\x11' + self.sign_service.notify_unknown_at(hashedid8) + self.sign_service.notify_unknown_at(hashedid8) + self.assertEqual(len(self.sign_service.unknown_ats), 1) + + def test_notify_inline_p2pcd_request_own_cert_match(self): + """notify_inline_p2pcd_request sets requested_own_certificate when own AT listed.""" + mock_cert = MagicMock(spec=OwnCertificate) + mock_cert.as_hashedid8.return_value = b'\xaa\xbb\xcc\xdd\xee\xff\x00\x11' + self.certificate_library.own_certificates = { + b'\xaa\xbb\xcc\xdd\xee\xff\x00\x11': mock_cert + } + self.sign_service.notify_inline_p2pcd_request([b'\xff\x00\x11']) + self.assertTrue(self.sign_service.cam_handler.requested_own_certificate) + + def test_notify_inline_p2pcd_request_ca_cert_match(self): + """notify_inline_p2pcd_request appends hashedid3 to requested_ats when CA cert found.""" + self.certificate_library.own_certificates = {} + mock_ca = MagicMock() + self.certificate_library.get_ca_certificate_by_hashedid3.return_value = mock_ca + self.sign_service.notify_inline_p2pcd_request([b'\x12\x34\x56']) + self.assertIn(b'\x12\x34\x56', self.sign_service.requested_ats) + + def test_notify_inline_p2pcd_request_no_match(self): + """notify_inline_p2pcd_request does nothing when no match found.""" + self.certificate_library.own_certificates = {} + self.certificate_library.get_ca_certificate_by_hashedid3.return_value = None + self.sign_service.notify_inline_p2pcd_request([b'\x12\x34\x56']) + self.assertFalse(self.sign_service.cam_handler.requested_own_certificate) + self.assertEqual(self.sign_service.requested_ats, []) + + @patch('flexstack.security.sign_service.Certificate') + def test_notify_received_ca_certificate_removes_pending_requests(self, mock_cert_cls): + """notify_received_ca_certificate removes cert from requested_ats, unknown_ats and adds to library.""" + mock_cert = MagicMock() + mock_cert.as_hashedid8.return_value = b'\xaa\xbb\xcc\xdd\xee\xff\x00\x11' + mock_cert_cls.from_dict.return_value = mock_cert + self.sign_service.requested_ats = [b'\xff\x00\x11'] + self.sign_service.unknown_ats = [b'\xff\x00\x11'] + self.sign_service.notify_received_ca_certificate({"cert": "data"}) + self.assertEqual(self.sign_service.requested_ats, []) + self.assertEqual(self.sign_service.unknown_ats, []) + self.certificate_library.add_authorization_authority.assert_called_once_with(mock_cert) + + @patch('flexstack.security.sign_service.SECURITY_CODER') + def test_sign_denm_returns_confirm(self, mock_coder): + """sign_denm returns a valid SNSIGNConfirm when location is provided.""" + request = Mock(spec=SNSIGNRequest) + request.its_aid = 37 + request.tbs_message = b"denm_bytes" + request.generation_location = { + "latitude": 473400000, + "longitude": 85500000, + "elevation": 0xF000, + } + mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbsData" + present_at = Mock(spec=OwnCertificate) + present_at.as_hashedid8.return_value = b"hashedid8" + present_at.sign_message.return_value = b"sig" + present_at.certificate = {"version": 3} + self.sign_service.get_present_at_for_signging = MagicMock(return_value=present_at) + mock_coder.encode_etsi_ts_103097_data_signed.return_value = b"signed_denm" + + confirm = self.sign_service.sign_denm(request) + + self.assertIsInstance(confirm, SNSIGNConfirm) + self.assertEqual(confirm.sec_message, b"signed_denm") + + @patch('flexstack.security.sign_service.SECURITY_CODER') + def test_sign_denm_signer_is_always_certificate(self, mock_coder): + """§7.1.2: sign_denm always uses 'certificate' signer, never 'digest'.""" + request = Mock(spec=SNSIGNRequest) + request.its_aid = 37 + request.tbs_message = b"denm_bytes" + request.generation_location = {"latitude": 0, "longitude": 0, "elevation": 0xF000} + mock_coder.encode_to_be_signed_data.return_value = b"tbs" + + captured = {} + + def capture_signed(data): + captured["signer"] = data["content"][1]["signer"] + return b"out" + + mock_coder.encode_etsi_ts_103097_data_signed.side_effect = capture_signed + present_at = Mock(spec=OwnCertificate) + present_at.sign_message.return_value = b"sig" + present_at.certificate = {"version": 3} + self.sign_service.get_present_at_for_signging = MagicMock(return_value=present_at) + + self.sign_service.sign_denm(request) + + self.assertEqual(captured["signer"][0], "certificate") + + @patch('flexstack.security.sign_service.SECURITY_CODER') + def test_sign_denm_includes_generation_location(self, mock_coder): + """§7.1.2: sign_denm embeds generationLocation in headerInfo.""" + loc = {"latitude": 473400000, "longitude": 85500000, "elevation": 0xF000} + request = Mock(spec=SNSIGNRequest) + request.its_aid = 37 + request.tbs_message = b"denm_bytes" + request.generation_location = loc + mock_coder.encode_to_be_signed_data.return_value = b"tbs" + + captured = {} + + def capture_signed(data): + captured["header_info"] = data["content"][1]["tbsData"]["headerInfo"] + return b"out" + + mock_coder.encode_etsi_ts_103097_data_signed.side_effect = capture_signed + present_at = Mock(spec=OwnCertificate) + present_at.sign_message.return_value = b"sig" + present_at.certificate = {"version": 3} + self.sign_service.get_present_at_for_signging = MagicMock(return_value=present_at) + + self.sign_service.sign_denm(request) + + self.assertEqual(captured["header_info"]["generationLocation"], loc) + + def test_sign_denm_raises_without_generation_location(self): + """sign_denm raises ValueError when generation_location is None.""" + request = Mock(spec=SNSIGNRequest) + request.its_aid = 37 + request.tbs_message = b"denm_bytes" + request.generation_location = None + + with self.assertRaises(ValueError): + self.sign_service.sign_denm(request) + + @patch('flexstack.security.sign_service.SECURITY_CODER') + def test_sign_denm_headerinfo_has_no_extra_fields(self, mock_coder): + """§7.1.2: headerInfo must not contain inlineP2pcdRequest, expiryTime, etc.""" + loc = {"latitude": 0, "longitude": 0, "elevation": 0xF000} + request = Mock(spec=SNSIGNRequest) + request.its_aid = 37 + request.tbs_message = b"d" + request.generation_location = loc + mock_coder.encode_to_be_signed_data.return_value = b"tbs" + + captured = {} + + def capture_signed(data): + captured["header_info"] = data["content"][1]["tbsData"]["headerInfo"] + return b"out" + + mock_coder.encode_etsi_ts_103097_data_signed.side_effect = capture_signed + present_at = Mock(spec=OwnCertificate) + present_at.sign_message.return_value = b"sig" + present_at.certificate = {"version": 3} + self.sign_service.get_present_at_for_signging = MagicMock(return_value=present_at) + + self.sign_service.sign_denm(request) + + hi = captured["header_info"] + for forbidden in ("expiryTime", "encryptionKey", "inlineP2pcdRequest", "requestedCertificate"): + self.assertNotIn(forbidden, hi, f"{forbidden} must not appear in DENM headerInfo") + + def test_sign_request_delegates_denm_to_sign_denm(self): + """sign_request(its_aid=37) delegates to sign_denm.""" + request = Mock(spec=SNSIGNRequest) + request.its_aid = 37 + self.sign_service.sign_denm = MagicMock(return_value=Mock(spec=SNSIGNConfirm)) + + result = self.sign_service.sign_request(request) + + self.sign_service.sign_denm.assert_called_once_with(request) + self.assertIsNotNone(result) + + @patch('flexstack.security.sign_service.SECURITY_CODER') + def test_sign_other_returns_confirm(self, mock_coder): + """sign_other returns a valid SNSIGNConfirm for a generic ITS-AID.""" + request = Mock(spec=SNSIGNRequest) + request.its_aid = 139 + request.tbs_message = b"ivim_bytes" + mock_coder.encode_to_be_signed_data.return_value = b"tbs" + mock_coder.encode_etsi_ts_103097_data_signed.return_value = b"signed_other" + present_at = Mock(spec=OwnCertificate) + present_at.as_hashedid8.return_value = b"hashedid8" + present_at.sign_message.return_value = b"sig" + self.sign_service.get_present_at_for_signging = MagicMock(return_value=present_at) + + confirm = self.sign_service.sign_other(request) + + self.assertIsInstance(confirm, SNSIGNConfirm) + self.assertEqual(confirm.sec_message, b"signed_other") + + @patch('flexstack.security.sign_service.SECURITY_CODER') + def test_sign_other_headerinfo_has_psid_and_generation_time(self, mock_coder): + """§7.1.3 / §5.2: headerInfo SHALL contain psid and generationTime.""" + request = Mock(spec=SNSIGNRequest) + request.its_aid = 139 + request.tbs_message = b"ivim_bytes" + mock_coder.encode_to_be_signed_data.return_value = b"tbs" + + captured = {} + + def capture(data): + captured["hi"] = data["content"][1]["tbsData"]["headerInfo"] + return b"out" + + mock_coder.encode_etsi_ts_103097_data_signed.side_effect = capture + present_at = Mock(spec=OwnCertificate) + present_at.as_hashedid8.return_value = b"hashedid8" + present_at.sign_message.return_value = b"sig" + self.sign_service.get_present_at_for_signging = MagicMock(return_value=present_at) + + self.sign_service.sign_other(request) + + hi = captured["hi"] + self.assertEqual(hi["psid"], 139) + self.assertIn("generationTime", hi) + + @patch('flexstack.security.sign_service.SECURITY_CODER') + def test_sign_other_signer_is_digest(self, mock_coder): + """§7.1.3: sign_other uses 'digest' signer (hashedId8 of the AT).""" + request = Mock(spec=SNSIGNRequest) + request.its_aid = 638 + request.tbs_message = b"vru_bytes" + mock_coder.encode_to_be_signed_data.return_value = b"tbs" + + captured = {} + + def capture(data): + captured["signer"] = data["content"][1]["signer"] + return b"out" + + mock_coder.encode_etsi_ts_103097_data_signed.side_effect = capture + present_at = Mock(spec=OwnCertificate) + present_at.as_hashedid8.return_value = b"\xaa" * 8 + present_at.sign_message.return_value = b"sig" + self.sign_service.get_present_at_for_signging = MagicMock(return_value=present_at) + + self.sign_service.sign_other(request) + + self.assertEqual(captured["signer"][0], "digest") + self.assertEqual(captured["signer"][1], b"\xaa" * 8) + + def test_sign_request_delegates_other_to_sign_other(self): + """sign_request delegates any AID other than 36/37 to sign_other().""" + for aid in [137, 138, 139, 141, 540, 801, 639, 638, 9999]: + request = Mock(spec=SNSIGNRequest) + request.its_aid = aid + self.sign_service.sign_other = MagicMock(return_value=Mock(spec=SNSIGNConfirm)) + + result = self.sign_service.sign_request(request) + + self.sign_service.sign_other.assert_called_once_with(request) + self.assertIsNotNone(result) if __name__ == "__main__": diff --git a/tests/flexstack/security/test_sn_sap.py b/tests/flexstack/security/test_sn_sap.py index 7e63c4c..b70fd3a 100644 --- a/tests/flexstack/security/test_sn_sap.py +++ b/tests/flexstack/security/test_sn_sap.py @@ -238,7 +238,7 @@ def test_repr(self): its_aid=self.its_aid, permissions=self.permissions, ) - expected_repr = f"SNVERIFYConfirm(report={self.report}, certificate_id={self.certificate_id}, its_aid_length={self.its_aid_length}, its_aid={self.its_aid}, permissions={self.permissions})" + expected_repr = f"SNVERIFYConfirm(report={self.report}, certificate_id={self.certificate_id}, its_aid_length={self.its_aid_length}, its_aid={self.its_aid}, permissions={self.permissions}, plain_message={b''})" self.assertEqual(repr(confirm), expected_repr) def test_str(self): @@ -249,9 +249,33 @@ def test_str(self): its_aid=self.its_aid, permissions=self.permissions, ) - expected_str = f"SNVERIFYConfirm(report={self.report}, certificate_id={self.certificate_id}, its_aid_length={self.its_aid_length}, its_aid={self.its_aid}, permissions={self.permissions})" + expected_str = f"SNVERIFYConfirm(report={self.report}, certificate_id={self.certificate_id}, its_aid_length={self.its_aid_length}, its_aid={self.its_aid}, permissions={self.permissions}, plain_message={b''})" self.assertEqual(str(confirm), expected_str) + def test_plain_message_defaults_to_empty(self): + """Test that plain_message defaults to empty bytes when not provided.""" + confirm = SNVERIFYConfirm( + report=self.report, + certificate_id=self.certificate_id, + its_aid_length=self.its_aid_length, + its_aid=self.its_aid, + permissions=self.permissions, + ) + self.assertEqual(confirm.plain_message, b"") + + def test_plain_message_can_be_set(self): + """Test that plain_message is stored correctly when explicitly provided.""" + payload = b"inner_gn_payload" + confirm = SNVERIFYConfirm( + report=self.report, + certificate_id=self.certificate_id, + its_aid_length=self.its_aid_length, + its_aid=self.its_aid, + permissions=self.permissions, + plain_message=payload, + ) + self.assertEqual(confirm.plain_message, payload) + class TestSNENCRYPTRequest(unittest.TestCase): diff --git a/tests/flexstack/security/test_verify_service.py b/tests/flexstack/security/test_verify_service.py index 406d87e..84c822c 100644 --- a/tests/flexstack/security/test_verify_service.py +++ b/tests/flexstack/security/test_verify_service.py @@ -2,6 +2,7 @@ from unittest.mock import MagicMock, patch from flexstack.security.verify_service import VerifyService +from flexstack.security.sign_service import SignService from flexstack.security.sn_sap import SNVERIFYRequest, ReportVerify from flexstack.security.certificate import Certificate @@ -20,6 +21,7 @@ def test_init(self): self.assertIs(self.verify_service.backend, self.backend) self.assertIs(self.verify_service.certificate_library, self.certificate_library) + self.assertIsNone(self.verify_service.sign_service) @patch('flexstack.security.verify_service.SECURITY_CODER') def test_verify_with_certificate_signer_success(self, mock_coder): @@ -34,12 +36,26 @@ def test_verify_with_certificate_signer_success(self, mock_coder): } } + inner_payload = b"cam_payload_bytes" mock_coder.decode_etsi_ts_103097_data_signed.return_value = { - "toBeSigned": {"some": "data"}, + "protocolVersion": 3, "content": ("signedData", { - "signer": ("certificate", [{"cert": "data"}]) - }), - "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + "hashId": "sha256", + "tbsData": { + "payload": { + "data": { + "protocolVersion": 3, + "content": ("unsecuredData", inner_payload) + } + }, + "headerInfo": { + "psid": 36, + "generationTime": 123456789000, + }, + }, + "signer": ("certificate", [{"cert": "data"}]), + "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + }) } mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" @@ -60,6 +76,9 @@ def test_verify_with_certificate_signer_success(self, mock_coder): self.assertEqual(result.report, ReportVerify.SUCCESS) self.assertEqual(result.certificate_id, b'\x12\x34\x56\x78\x9a\xbc\xde\xf0') + self.assertEqual(result.plain_message, inner_payload) + self.assertEqual(result.its_aid, (36).to_bytes(1, 'big')) + self.assertEqual(result.its_aid_length, 1) self.certificate_library.verify_sequence_of_certificates.assert_called_once() self.backend.verify_with_pk.assert_called_once() @@ -68,11 +87,13 @@ def test_verify_with_certificate_signer_inconsistent_chain(self, mock_coder): """Test verification fails with inconsistent certificate chain""" # Given mock_coder.decode_etsi_ts_103097_data_signed.return_value = { - "toBeSigned": {"some": "data"}, + "protocolVersion": 3, "content": ("signedData", { - "signer": ("certificate", [{"cert": "data"}]) - }), - "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + "hashId": "sha256", + "tbsData": {"some": "data"}, + "signer": ("certificate", [{"cert": "data"}]), + "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + }) } mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" @@ -105,13 +126,27 @@ def test_verify_with_digest_signer_success(self, mock_coder): } } + inner_payload = b"cam_digest_payload" digest = b'\xaa\xbb\xcc\xdd\xee\xff\x00\x11' mock_coder.decode_etsi_ts_103097_data_signed.return_value = { - "toBeSigned": {"some": "data"}, + "protocolVersion": 3, "content": ("signedData", { - "signer": ("digest", digest) - }), - "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + "hashId": "sha256", + "tbsData": { + "payload": { + "data": { + "protocolVersion": 3, + "content": ("unsecuredData", inner_payload) + } + }, + "headerInfo": { + "psid": 36, + "generationTime": 123456789000, + }, + }, + "signer": ("digest", digest), + "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + }) } mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" @@ -132,6 +167,7 @@ def test_verify_with_digest_signer_success(self, mock_coder): self.assertEqual(result.report, ReportVerify.SUCCESS) self.assertEqual(result.certificate_id, b'\x12\x34\x56\x78\x9a\xbc\xde\xf0') + self.assertEqual(result.plain_message, inner_payload) self.certificate_library.get_authorization_ticket_by_hashedid8.assert_called_once_with( digest) @@ -141,11 +177,13 @@ def test_verify_with_digest_signer_invalid_certificate(self, mock_coder): # Given digest = b'\xaa\xbb\xcc\xdd\xee\xff\x00\x11' mock_coder.decode_etsi_ts_103097_data_signed.return_value = { - "toBeSigned": {"some": "data"}, + "protocolVersion": 3, "content": ("signedData", { - "signer": ("digest", digest) - }), - "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + "hashId": "sha256", + "tbsData": {"some": "data"}, + "signer": ("digest", digest), + "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + }) } mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" @@ -162,7 +200,7 @@ def test_verify_with_digest_signer_invalid_certificate(self, mock_coder): result = self.verify_service.verify(request) # Then - self.assertEqual(result.report, ReportVerify.INVALID_CERTIFICATE) + self.assertEqual(result.report, ReportVerify.SIGNER_CERTIFICATE_NOT_FOUND) self.assertEqual(result.certificate_id, b'') @patch('flexstack.security.verify_service.SECURITY_CODER') @@ -170,11 +208,13 @@ def test_verify_with_unknown_signer_type_raises_exception(self, mock_coder): """Test verification raises exception with unknown signer type""" # Given mock_coder.decode_etsi_ts_103097_data_signed.return_value = { - "toBeSigned": {"some": "data"}, + "protocolVersion": 3, "content": ("signedData", { - "signer": ("unknown_type", b"data") - }), - "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + "hashId": "sha256", + "tbsData": {"some": "data"}, + "signer": ("unknown_type", b"data"), + "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + }) } mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" @@ -204,11 +244,18 @@ def test_verify_with_false_signature(self, mock_coder): } mock_coder.decode_etsi_ts_103097_data_signed.return_value = { - "toBeSigned": {"some": "data"}, + "protocolVersion": 3, "content": ("signedData", { - "signer": ("certificate", [{"cert": "data"}]) - }), - "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + "hashId": "sha256", + "tbsData": { + "headerInfo": { + "psid": 36, + "generationTime": 123456789000, + }, + }, + "signer": ("certificate", [{"cert": "data"}]), + "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + }) } mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" @@ -243,11 +290,13 @@ def test_verify_with_invalid_certificate_verification(self, mock_coder): } mock_coder.decode_etsi_ts_103097_data_signed.return_value = { - "toBeSigned": {"some": "data"}, + "protocolVersion": 3, "content": ("signedData", { - "signer": ("certificate", [{"cert": "data"}]) - }), - "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + "hashId": "sha256", + "tbsData": {"some": "data"}, + "signer": ("certificate", [{"cert": "data"}]), + "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + }) } mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" @@ -280,11 +329,13 @@ def test_verify_with_non_verification_key_indicator(self, mock_coder): } mock_coder.decode_etsi_ts_103097_data_signed.return_value = { - "toBeSigned": {"some": "data"}, + "protocolVersion": 3, "content": ("signedData", { - "signer": ("certificate", [{"cert": "data"}]) - }), - "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + "hashId": "sha256", + "tbsData": {"some": "data"}, + "signer": ("certificate", [{"cert": "data"}]), + "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + }) } mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" @@ -303,6 +354,459 @@ def test_verify_with_non_verification_key_indicator(self, mock_coder): # Then self.assertEqual(result.report, ReportVerify.INVALID_CERTIFICATE) + @patch('flexstack.security.verify_service.SECURITY_CODER') + def test_verify_missing_generation_time_returns_invalid_timestamp(self, mock_coder): + """Test §5.2: generationTime MUST be present; absent → INVALID_TIMESTAMP""" + # Given + mock_certificate = MagicMock(spec=Certificate) + mock_certificate.verify.return_value = True + mock_certificate.as_hashedid8.return_value = b'\x12\x34\x56\x78\x9a\xbc\xde\xf0' + mock_certificate.certificate = { + "toBeSigned": { + "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", {"x": b"x", "y": b"y"})) + } + } + + mock_coder.decode_etsi_ts_103097_data_signed.return_value = { + "protocolVersion": 3, + "content": ("signedData", { + "hashId": "sha256", + "tbsData": { + "payload": {"data": {"protocolVersion": 3, "content": ("unsecuredData", b"payload")}}, + "headerInfo": { + "psid": 36, + # generationTime intentionally absent + }, + }, + "signer": ("certificate", [{"cert": "data"}]), + "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + }) + } + mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" + + self.certificate_library.verify_sequence_of_certificates.return_value = mock_certificate + + request = SNVERIFYRequest( + sec_header_length=10, + sec_header=b"header", + message_length=20, + message=b"signed_message" + ) + + # When + result = self.verify_service.verify(request) + + # Then + self.assertEqual(result.report, ReportVerify.INVALID_TIMESTAMP) + self.backend.verify_with_pk.assert_not_called() + + @patch('flexstack.security.verify_service.SECURITY_CODER') + def test_verify_forbidden_fields_present_returns_incompatible_protocol(self, mock_coder): + """Test §5.2: p2pcdLearningRequest and missingCrlIdentifier SHALL be absent""" + # Given + mock_certificate = MagicMock(spec=Certificate) + mock_certificate.verify.return_value = True + mock_certificate.as_hashedid8.return_value = b'\x12\x34\x56\x78\x9a\xbc\xde\xf0' + mock_certificate.certificate = { + "toBeSigned": { + "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", {"x": b"x", "y": b"y"})) + } + } + + mock_coder.decode_etsi_ts_103097_data_signed.return_value = { + "protocolVersion": 3, + "content": ("signedData", { + "hashId": "sha256", + "tbsData": { + "payload": {"data": {"protocolVersion": 3, "content": ("unsecuredData", b"payload")}}, + "headerInfo": { + "psid": 36, + "generationTime": 123456789000, + "p2pcdLearningRequest": b'\x00\x00\x00\x00\x00\x00\x00\x00', + }, + }, + "signer": ("certificate", [{"cert": "data"}]), + "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + }) + } + mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" + + self.certificate_library.verify_sequence_of_certificates.return_value = mock_certificate + + request = SNVERIFYRequest( + sec_header_length=10, + sec_header=b"header", + message_length=20, + message=b"signed_message" + ) + + # When + result = self.verify_service.verify(request) + + # Then + self.assertEqual(result.report, ReportVerify.INCOMPATIBLE_PROTOCOL) + self.backend.verify_with_pk.assert_not_called() + + @patch('flexstack.security.verify_service.SECURITY_CODER') + def test_verify_with_multiple_certificates_in_signer(self, mock_coder): + """Test §5.2: signer.certificate SHALL contain exactly one entry; >1 → UNSUPPORTED_SIGNER_IDENTIFIER_TYPE""" + # Given + mock_coder.decode_etsi_ts_103097_data_signed.return_value = { + "protocolVersion": 3, + "content": ("signedData", { + "hashId": "sha256", + "tbsData": { + "payload": {"data": {"protocolVersion": 3, "content": ("unsecuredData", b"payload")}}, + "headerInfo": {"psid": 36, "generationTime": 123456789000}, + }, + "signer": ("certificate", [{"cert": "data1"}, {"cert": "data2"}]), + "signature": {"rSig": ("x-only", b"r"), "sSig": b"s"} + }) + } + mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" + + request = SNVERIFYRequest( + sec_header_length=10, sec_header=b"header", + message_length=20, message=b"signed_message" + ) + + # When + result = self.verify_service.verify(request) + + # Then + self.assertEqual(result.report, ReportVerify.UNSUPPORTED_SIGNER_IDENTIFIER_TYPE) + self.backend.verify_with_pk.assert_not_called() + + @patch('flexstack.security.verify_service.SECURITY_CODER') + def test_verify_unknown_digest_notifies_sign_service(self, mock_coder): + """§7.1.1: unknown AT digest triggers sign_service.notify_unknown_at""" + sign_service = MagicMock(spec=SignService) + vs = VerifyService( + backend=self.backend, + certificate_library=self.certificate_library, + sign_service=sign_service, + ) + digest = b'\xaa\xbb\xcc\xdd\xee\xff\x00\x11' + mock_coder.decode_etsi_ts_103097_data_signed.return_value = { + "protocolVersion": 3, + "content": ("signedData", { + "hashId": "sha256", + "tbsData": {"some": "data"}, + "signer": ("digest", digest), + "signature": {} + }) + } + mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" + self.certificate_library.get_authorization_ticket_by_hashedid8.return_value = None + + result = vs.verify(SNVERIFYRequest( + sec_header_length=0, sec_header=b"", + message_length=0, message=b"" + )) + + self.assertEqual(result.report, ReportVerify.SIGNER_CERTIFICATE_NOT_FOUND) + sign_service.notify_unknown_at.assert_called_once_with(digest) + + @patch('flexstack.security.verify_service.SECURITY_CODER') + def test_verify_inconsistent_chain_notifies_sign_service(self, mock_coder): + """§7.1.1: inconsistent chain triggers sign_service.notify_unknown_at for the AA issuer""" + sign_service = MagicMock(spec=SignService) + vs = VerifyService( + backend=self.backend, + certificate_library=self.certificate_library, + sign_service=sign_service, + ) + aa_hashedid8 = b'\x11\x12\x13\x14\x15\x16\x17\x18' + mock_coder.decode_etsi_ts_103097_data_signed.return_value = { + "protocolVersion": 3, + "content": ("signedData", { + "hashId": "sha256", + "tbsData": {"some": "data"}, + "signer": ("certificate", [ + {"issuer": ("sha256AndDigest", aa_hashedid8)} + ]), + "signature": {} + }) + } + mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" + self.certificate_library.verify_sequence_of_certificates.return_value = None + + result = vs.verify(SNVERIFYRequest( + sec_header_length=0, sec_header=b"", + message_length=0, message=b"" + )) + + self.assertEqual(result.report, ReportVerify.INCONSISTENT_CHAIN) + sign_service.notify_unknown_at.assert_called_once_with(aa_hashedid8) + + @patch('flexstack.security.verify_service.SECURITY_CODER') + def test_verify_success_processes_inline_p2pcd_request(self, mock_coder): + """§7.1.1: on success, inlineP2pcdRequest in headerInfo is forwarded to sign_service""" + sign_service = MagicMock(spec=SignService) + vs = VerifyService( + backend=self.backend, + certificate_library=self.certificate_library, + sign_service=sign_service, + ) + mock_certificate = MagicMock(spec=Certificate) + mock_certificate.verify.return_value = True + mock_certificate.as_hashedid8.return_value = b'\x12\x34\x56\x78\x9a\xbc\xde\xf0' + mock_certificate.certificate = { + "toBeSigned": { + "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", {"x": b"x", "y": b"y"})) + } + } + p2pcd_list = [b'\xaa\xbb\xcc'] + mock_coder.decode_etsi_ts_103097_data_signed.return_value = { + "protocolVersion": 3, + "content": ("signedData", { + "hashId": "sha256", + "tbsData": { + "payload": {"data": {"protocolVersion": 3, "content": ("unsecuredData", b"payload")}}, + "headerInfo": { + "psid": 36, + "generationTime": 123456789000, + "inlineP2pcdRequest": p2pcd_list, + }, + }, + "signer": ("certificate", [{"cert": "data"}]), + "signature": {} + }) + } + mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" + self.certificate_library.verify_sequence_of_certificates.return_value = mock_certificate + self.backend.verify_with_pk.return_value = True + + result = vs.verify(SNVERIFYRequest( + sec_header_length=0, sec_header=b"", + message_length=0, message=b"" + )) + + self.assertEqual(result.report, ReportVerify.SUCCESS) + sign_service.notify_inline_p2pcd_request.assert_called_once_with(p2pcd_list) + + @patch('flexstack.security.verify_service.SECURITY_CODER') + def test_verify_success_processes_requested_certificate(self, mock_coder): + """§7.1.1: on success, requestedCertificate in headerInfo is forwarded to sign_service""" + sign_service = MagicMock(spec=SignService) + vs = VerifyService( + backend=self.backend, + certificate_library=self.certificate_library, + sign_service=sign_service, + ) + mock_certificate = MagicMock(spec=Certificate) + mock_certificate.verify.return_value = True + mock_certificate.as_hashedid8.return_value = b'\x12\x34\x56\x78\x9a\xbc\xde\xf0' + mock_certificate.certificate = { + "toBeSigned": { + "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", {"x": b"x", "y": b"y"})) + } + } + requested_cert = {"version": 3, "toBeSigned": {"id": ("name", "aa")}} + mock_coder.decode_etsi_ts_103097_data_signed.return_value = { + "protocolVersion": 3, + "content": ("signedData", { + "hashId": "sha256", + "tbsData": { + "payload": {"data": {"protocolVersion": 3, "content": ("unsecuredData", b"payload")}}, + "headerInfo": { + "psid": 36, + "generationTime": 123456789000, + "requestedCertificate": requested_cert, + }, + }, + "signer": ("certificate", [{"cert": "data"}]), + "signature": {} + }) + } + mock_coder.encode_to_be_signed_data.return_value = b"encoded_tbs_data" + self.certificate_library.verify_sequence_of_certificates.return_value = mock_certificate + self.backend.verify_with_pk.return_value = True + + result = vs.verify(SNVERIFYRequest( + sec_header_length=0, sec_header=b"", + message_length=0, message=b"" + )) + + self.assertEqual(result.report, ReportVerify.SUCCESS) + sign_service.notify_received_ca_certificate.assert_called_once_with(requested_cert) + + @patch('flexstack.security.verify_service.SECURITY_CODER') + def test_verify_denm_with_digest_signer_returns_unsupported(self, mock_coder): + """§7.1.2: DENM (psid=37) signed with digest signer → UNSUPPORTED_SIGNER_IDENTIFIER_TYPE.""" + mock_coder.decode_etsi_ts_103097_data_signed.return_value = { + "protocolVersion": 3, + "content": ("signedData", { + "hashId": "sha256", + "tbsData": { + "headerInfo": {"psid": 37, "generationTime": 123456789000}, + "payload": {}, + }, + "signer": ("digest", b'\xaa\xbb\xcc\xdd\xee\xff\x00\x11'), + "signature": {} + }) + } + mock_coder.encode_to_be_signed_data.return_value = b"tbs" + + result = self.verify_service.verify(SNVERIFYRequest( + sec_header_length=0, sec_header=b"", + message_length=0, message=b"" + )) + + self.assertEqual(result.report, ReportVerify.UNSUPPORTED_SIGNER_IDENTIFIER_TYPE) + self.backend.verify_with_pk.assert_not_called() + + @patch('flexstack.security.verify_service.SECURITY_CODER') + def test_verify_denm_missing_generation_location_returns_incompatible_protocol(self, mock_coder): + """§7.1.2: DENM without generationLocation → INCOMPATIBLE_PROTOCOL.""" + mock_certificate = MagicMock(spec=Certificate) + mock_certificate.verify.return_value = True + mock_certificate.as_hashedid8.return_value = b'\x12\x34\x56\x78\x9a\xbc\xde\xf0' + mock_certificate.certificate = { + "toBeSigned": { + "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", {})) + } + } + mock_coder.decode_etsi_ts_103097_data_signed.return_value = { + "protocolVersion": 3, + "content": ("signedData", { + "hashId": "sha256", + "tbsData": { + "payload": {"data": {"protocolVersion": 3, "content": ("unsecuredData", b"d")}}, + "headerInfo": { + "psid": 37, + "generationTime": 123456789000, + # generationLocation intentionally absent + }, + }, + "signer": ("certificate", [{"cert": "data"}]), + "signature": {} + }) + } + mock_coder.encode_to_be_signed_data.return_value = b"tbs" + self.certificate_library.verify_sequence_of_certificates.return_value = mock_certificate + + result = self.verify_service.verify(SNVERIFYRequest( + sec_header_length=0, sec_header=b"", + message_length=0, message=b"" + )) + + self.assertEqual(result.report, ReportVerify.INCOMPATIBLE_PROTOCOL) + self.backend.verify_with_pk.assert_not_called() + + @patch('flexstack.security.verify_service.SECURITY_CODER') + def test_verify_denm_with_forbidden_field_returns_incompatible_protocol(self, mock_coder): + """§7.1.2: DENM with inlineP2pcdRequest present → INCOMPATIBLE_PROTOCOL.""" + mock_certificate = MagicMock(spec=Certificate) + mock_certificate.verify.return_value = True + mock_certificate.as_hashedid8.return_value = b'\x12\x34\x56\x78\x9a\xbc\xde\xf0' + mock_certificate.certificate = { + "toBeSigned": { + "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", {})) + } + } + mock_coder.decode_etsi_ts_103097_data_signed.return_value = { + "protocolVersion": 3, + "content": ("signedData", { + "hashId": "sha256", + "tbsData": { + "payload": {"data": {"protocolVersion": 3, "content": ("unsecuredData", b"d")}}, + "headerInfo": { + "psid": 37, + "generationTime": 123456789000, + "generationLocation": {"latitude": 0, "longitude": 0, "elevation": 0xF000}, + "inlineP2pcdRequest": [b'\xaa\xbb\xcc'], # forbidden in DENM + }, + }, + "signer": ("certificate", [{"cert": "data"}]), + "signature": {} + }) + } + mock_coder.encode_to_be_signed_data.return_value = b"tbs" + self.certificate_library.verify_sequence_of_certificates.return_value = mock_certificate + + result = self.verify_service.verify(SNVERIFYRequest( + sec_header_length=0, sec_header=b"", + message_length=0, message=b"" + )) + + self.assertEqual(result.report, ReportVerify.INCOMPATIBLE_PROTOCOL) + self.backend.verify_with_pk.assert_not_called() + + @patch('flexstack.security.verify_service.SECURITY_CODER') + def test_verify_denm_valid_succeeds(self, mock_coder): + """§7.1.2: valid DENM with certificate signer and generationLocation → SUCCESS.""" + mock_certificate = MagicMock(spec=Certificate) + mock_certificate.verify.return_value = True + mock_certificate.as_hashedid8.return_value = b'\x12\x34\x56\x78\x9a\xbc\xde\xf0' + mock_certificate.certificate = { + "toBeSigned": { + "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", {"x": b"x", "y": b"y"})) + } + } + payload = b"denm_payload" + mock_coder.decode_etsi_ts_103097_data_signed.return_value = { + "protocolVersion": 3, + "content": ("signedData", { + "hashId": "sha256", + "tbsData": { + "payload": {"data": {"protocolVersion": 3, "content": ("unsecuredData", payload)}}, + "headerInfo": { + "psid": 37, + "generationTime": 123456789000, + "generationLocation": {"latitude": 473400000, "longitude": 85500000, "elevation": 0xF000}, + }, + }, + "signer": ("certificate", [{"cert": "data"}]), + "signature": {} + }) + } + mock_coder.encode_to_be_signed_data.return_value = b"tbs" + self.certificate_library.verify_sequence_of_certificates.return_value = mock_certificate + self.backend.verify_with_pk.return_value = True + + result = self.verify_service.verify(SNVERIFYRequest( + sec_header_length=0, sec_header=b"", + message_length=0, message=b"" + )) + + self.assertEqual(result.report, ReportVerify.SUCCESS) + self.assertEqual(result.plain_message, payload) + + @patch('flexstack.security.verify_service.SECURITY_CODER') + def test_verify_non_at_profile_returns_invalid_certificate(self, mock_coder): + """§7.2.1: cert that fails the AT profile check → INVALID_CERTIFICATE.""" + mock_certificate = MagicMock(spec=Certificate) + mock_certificate.verify.return_value = True + mock_certificate.is_authorization_ticket.return_value = False + mock_certificate.as_hashedid8.return_value = b'\x12\x34\x56\x78\x9a\xbc\xde\xf0' + mock_certificate.certificate = { + "toBeSigned": { + "verifyKeyIndicator": ("verificationKey", ("ecdsaNistP256", {"x": b"x", "y": b"y"})) + } + } + mock_coder.decode_etsi_ts_103097_data_signed.return_value = { + "protocolVersion": 3, + "content": ("signedData", { + "hashId": "sha256", + "tbsData": { + "payload": {"data": {"protocolVersion": 3, "content": ("unsecuredData", b"data")}}, + "headerInfo": {"psid": 36, "generationTime": 123456789000}, + }, + "signer": ("digest", b'\x12\x34\x56\x78\x9a\xbc\xde\xf0'), + "signature": {} + }) + } + mock_coder.encode_to_be_signed_data.return_value = b"tbs" + self.certificate_library.get_authorization_ticket_by_hashedid8.return_value = mock_certificate + + result = self.verify_service.verify(SNVERIFYRequest( + sec_header_length=0, sec_header=b"", + message_length=0, message=b"" + )) + + self.assertEqual(result.report, ReportVerify.INVALID_CERTIFICATE) + if __name__ == '__main__': unittest.main()