diff --git a/scripts/audit-validity-dates.py b/scripts/audit-validity-dates.py new file mode 100755 index 000000000..c55571369 --- /dev/null +++ b/scripts/audit-validity-dates.py @@ -0,0 +1,468 @@ +#!/usr/bin/env python3 +# +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +"""Audit validity date of X509 crt/crl/csr. + +This script is used to audit the validity date of crt/crl/csr used for testing. +It prints the information about X.509 objects excluding the objects that +are valid throughout the desired validity period. The data are collected +from tests/data_files/ and tests/suites/*.data files by default. +""" + +import os +import re +import typing +import argparse +import datetime +import glob +import logging +import hashlib +from enum import Enum + +# The script requires cryptography >= 35.0.0 which is only available +# for Python >= 3.6. +import cryptography +from cryptography import x509 + +from generate_test_code import FileWrapper + +from framework_dev import build_tree +from framework_dev import logging_util + +def check_cryptography_version(): + match = re.match(r'^[0-9]+', cryptography.__version__) + if match is None or int(match.group(0)) < 35: + raise Exception("audit-validity-dates requires cryptography >= 35.0.0" + + "({} is too old)".format(cryptography.__version__)) + +class DataType(Enum): + CRT = 1 # Certificate + CRL = 2 # Certificate Revocation List + CSR = 3 # Certificate Signing Request + + +class DataFormat(Enum): + PEM = 1 # Privacy-Enhanced Mail + DER = 2 # Distinguished Encoding Rules + + +class AuditData: + """Store data location, type and validity period of X.509 objects.""" + #pylint: disable=too-few-public-methods + def __init__(self, data_type: DataType, x509_obj): + self.data_type = data_type + # the locations that the x509 object could be found + self.locations = [] # type: typing.List[str] + self.fill_validity_duration(x509_obj) + self._obj = x509_obj + encoding = cryptography.hazmat.primitives.serialization.Encoding.DER + self._identifier = hashlib.sha1(self._obj.public_bytes(encoding)).hexdigest() + + @property + def identifier(self): + """ + Identifier of the underlying X.509 object, which is consistent across + different runs. + """ + return self._identifier + + def fill_validity_duration(self, x509_obj): + """Read validity period from an X.509 object.""" + # Certificate expires after "not_valid_after" + # Certificate is invalid before "not_valid_before" + if self.data_type == DataType.CRT: + self.not_valid_after = x509_obj.not_valid_after + self.not_valid_before = x509_obj.not_valid_before + # CertificateRevocationList expires after "next_update" + # CertificateRevocationList is invalid before "last_update" + elif self.data_type == DataType.CRL: + self.not_valid_after = x509_obj.next_update + self.not_valid_before = x509_obj.last_update + # CertificateSigningRequest is always valid. + elif self.data_type == DataType.CSR: + self.not_valid_after = datetime.datetime.max + self.not_valid_before = datetime.datetime.min + else: + raise ValueError("Unsupported file_type: {}".format(self.data_type)) + + +class X509Parser: + """A parser class to parse crt/crl/csr file or data in PEM/DER format.""" + PEM_REGEX = br'-{5}BEGIN (?P.*?)-{5}(?P.*?)-{5}END (?P=type)-{5}' + PEM_TAG_REGEX = br'-{5}BEGIN (?P.*?)-{5}\n' + PEM_TAGS = { + DataType.CRT: 'CERTIFICATE', + DataType.CRL: 'X509 CRL', + DataType.CSR: 'CERTIFICATE REQUEST' + } + + def __init__(self, + backends: + typing.Dict[DataType, + typing.Dict[DataFormat, + typing.Callable[[bytes], object]]]) \ + -> None: + self.backends = backends + self.__generate_parsers() + + def __generate_parser(self, data_type: DataType): + """Parser generator for a specific DataType""" + tag = self.PEM_TAGS[data_type] + pem_loader = self.backends[data_type][DataFormat.PEM] + der_loader = self.backends[data_type][DataFormat.DER] + def wrapper(data: bytes): + pem_type = X509Parser.pem_data_type(data) + # It is in PEM format with target tag + if pem_type == tag: + return pem_loader(data) + # It is in PEM format without target tag + if pem_type: + return None + # It might be in DER format + try: + result = der_loader(data) + except ValueError: + result = None + return result + wrapper.__name__ = "{}.parser[{}]".format(type(self).__name__, tag) + return wrapper + + def __generate_parsers(self): + """Generate parsers for all support DataType""" + self.parsers = {} + for data_type, _ in self.PEM_TAGS.items(): + self.parsers[data_type] = self.__generate_parser(data_type) + + def __getitem__(self, item): + return self.parsers[item] + + @staticmethod + def pem_data_type(data: bytes) -> typing.Optional[str]: + """Get the tag from the data in PEM format + + :param data: data to be checked in binary mode. + :return: PEM tag or "" when no tag detected. + """ + m = re.search(X509Parser.PEM_TAG_REGEX, data) + if m is not None: + return m.group('type').decode('UTF-8') + else: + return None + + @staticmethod + def check_hex_string(hex_str: str) -> bool: + """Check if the hex string is possibly DER data.""" + hex_len = len(hex_str) + # At least 6 hex char for 3 bytes: Type + Length + Content + if hex_len < 6: + return False + # Check if Type (1 byte) is SEQUENCE. + if hex_str[0:2] != '30': + return False + # Check LENGTH (1 byte) value + content_len = int(hex_str[2:4], base=16) + consumed = 4 + if content_len in (128, 255): + # Indefinite or Reserved + return False + elif content_len > 127: + # Definite, Long + length_len = (content_len - 128) * 2 + content_len = int(hex_str[consumed:consumed+length_len], base=16) + consumed += length_len + # Check LENGTH + if hex_len != content_len * 2 + consumed: + return False + return True + + +class Auditor: + """ + A base class that uses X509Parser to parse files to a list of AuditData. + + A subclass must implement the following methods: + - collect_default_files: Return a list of file names that are defaultly + used for parsing (auditing). The list will be stored in + Auditor.default_files. + - parse_file: Method that parses a single file to a list of AuditData. + + A subclass may override the following methods: + - parse_bytes: Defaultly, it parses `bytes` that contains only one valid + X.509 data(DER/PEM format) to an X.509 object. + - walk_all: Defaultly, it iterates over all the files in the provided + file name list, calls `parse_file` for each file and stores the results + by extending the `results` passed to the function. + """ + def __init__(self, logger): + self.logger = logger + self.default_files = self.collect_default_files() + self.parser = X509Parser({ + DataType.CRT: { + DataFormat.PEM: x509.load_pem_x509_certificate, + DataFormat.DER: x509.load_der_x509_certificate + }, + DataType.CRL: { + DataFormat.PEM: x509.load_pem_x509_crl, + DataFormat.DER: x509.load_der_x509_crl + }, + DataType.CSR: { + DataFormat.PEM: x509.load_pem_x509_csr, + DataFormat.DER: x509.load_der_x509_csr + }, + }) + + def collect_default_files(self) -> typing.List[str]: + """Collect the default files for parsing.""" + raise NotImplementedError + + def parse_file(self, filename: str) -> typing.List[AuditData]: + """ + Parse a list of AuditData from file. + + :param filename: name of the file to parse. + :return list of AuditData parsed from the file. + """ + raise NotImplementedError + + def parse_bytes(self, data: bytes): + """Parse AuditData from bytes.""" + for data_type in list(DataType): + try: + result = self.parser[data_type](data) + except ValueError as val_error: + result = None + self.logger.warning(val_error) + if result is not None: + audit_data = AuditData(data_type, result) + return audit_data + return None + + def walk_all(self, + results: typing.Dict[str, AuditData], + file_list: typing.Optional[typing.List[str]] = None) \ + -> None: + """ + Iterate over all the files in the list and get audit data. The + results will be written to `results` passed to this function. + + :param results: The dictionary used to store the parsed + AuditData. The keys of this dictionary should + be the identifier of the AuditData. + """ + if file_list is None: + file_list = self.default_files + for filename in file_list: + data_list = self.parse_file(filename) + for d in data_list: + if d.identifier in results: + results[d.identifier].locations.extend(d.locations) + else: + results[d.identifier] = d + + @staticmethod + def find_test_dir(): + """Get the relative path for the Mbed TLS test directory.""" + return os.path.relpath(build_tree.guess_mbedtls_root() + '/tests') + + +class TestDataAuditor(Auditor): + """Class for auditing files in `tests/data_files/`""" + + def collect_default_files(self): + """Collect all files in `tests/data_files/`""" + test_dir = self.find_test_dir() + test_data_glob = os.path.join(test_dir, 'data_files/**') + data_files = [f for f in glob.glob(test_data_glob, recursive=True) + if os.path.isfile(f)] + return data_files + + def parse_file(self, filename: str) -> typing.List[AuditData]: + """ + Parse a list of AuditData from data file. + + :param filename: name of the file to parse. + :return list of AuditData parsed from the file. + """ + with open(filename, 'rb') as f: + data = f.read() + + results = [] + # Try to parse all PEM blocks. + is_pem = False + for idx, m in enumerate(re.finditer(X509Parser.PEM_REGEX, data, flags=re.S), 1): + is_pem = True + result = self.parse_bytes(data[m.start():m.end()]) + if result is not None: + result.locations.append("{}#{}".format(filename, idx)) + results.append(result) + + # Might be DER format. + if not is_pem: + result = self.parse_bytes(data) + if result is not None: + result.locations.append("{}".format(filename)) + results.append(result) + + return results + + +def parse_suite_data(data_f): + """ + Parses .data file for test arguments that possiblly have a + valid X.509 data. If you need a more precise parser, please + use generate_test_code.parse_test_data instead. + + :param data_f: file object of the data file. + :return: Generator that yields test function argument list. + """ + for line in data_f: + line = line.strip() + # Skip comments + if line.startswith('#'): + continue + + # Check parameters line + match = re.search(r'\A\w+(.*:)?\"', line) + if match: + # Read test vectors + parts = re.split(r'(?[0-9a-fA-F]+)"', test_arg) + if not match: + continue + if not X509Parser.check_hex_string(match.group('data')): + continue + audit_data = self.parse_bytes(bytes.fromhex(match.group('data'))) + if audit_data is None: + continue + audit_data.locations.append("{}:{}:#{}".format(filename, + data_f.line_no, + idx + 1)) + audit_data_list.append(audit_data) + + return audit_data_list + + +def list_all(audit_data: AuditData): + for loc in audit_data.locations: + print("{}\t{:20}\t{:20}\t{:3}\t{}".format( + audit_data.identifier, + audit_data.not_valid_before.isoformat(timespec='seconds'), + audit_data.not_valid_after.isoformat(timespec='seconds'), + audit_data.data_type.name, + loc)) + + +def main(): + """ + Perform argument parsing. + """ + parser = argparse.ArgumentParser(description=__doc__) + + parser.add_argument('-a', '--all', + action='store_true', + help='list the information of all the files') + parser.add_argument('-v', '--verbose', + action='store_true', dest='verbose', + help='show logs') + parser.add_argument('--from', dest='start_date', + help=('Start of desired validity period (UTC, YYYY-MM-DD). ' + 'Default: today'), + metavar='DATE') + parser.add_argument('--to', dest='end_date', + help=('End of desired validity period (UTC, YYYY-MM-DD). ' + 'Default: --from'), + metavar='DATE') + parser.add_argument('--data-files', action='append', nargs='*', + help='data files to audit', + metavar='FILE') + parser.add_argument('--suite-data-files', action='append', nargs='*', + help='suite data files to audit', + metavar='FILE') + + args = parser.parse_args() + + # start main routine + # setup logger + logger = logging.getLogger() + logging_util.configure_logger(logger) + logger.setLevel(logging.DEBUG if args.verbose else logging.ERROR) + + td_auditor = TestDataAuditor(logger) + sd_auditor = SuiteDataAuditor(logger) + + data_files = [] + suite_data_files = [] + if args.data_files is None and args.suite_data_files is None: + data_files = td_auditor.default_files + suite_data_files = sd_auditor.default_files + else: + if args.data_files is not None: + data_files = [x for l in args.data_files for x in l] + if args.suite_data_files is not None: + suite_data_files = [x for l in args.suite_data_files for x in l] + + # validity period start date + if args.start_date: + start_date = datetime.datetime.fromisoformat(args.start_date) + else: + start_date = datetime.datetime.today() + # validity period end date + if args.end_date: + end_date = datetime.datetime.fromisoformat(args.end_date) + else: + end_date = start_date + + # go through all the files + audit_results = {} + td_auditor.walk_all(audit_results, data_files) + sd_auditor.walk_all(audit_results, suite_data_files) + + logger.info("Total: {} objects found!".format(len(audit_results))) + + # we filter out the files whose validity duration covers the provided + # duration. + filter_func = lambda d: (start_date < d.not_valid_before) or \ + (d.not_valid_after < end_date) + + sortby_end = lambda d: d.not_valid_after + + if args.all: + filter_func = None + + # filter and output the results + for d in sorted(filter(filter_func, audit_results.values()), key=sortby_end): + list_all(d) + + logger.debug("Done!") + +check_cryptography_version() +if __name__ == "__main__": + main() diff --git a/scripts/code_style.py b/scripts/code_style.py new file mode 100755 index 000000000..07952b6cb --- /dev/null +++ b/scripts/code_style.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python3 +"""Check or fix the code style by running Uncrustify. + +This script must be run from the root of a Git work tree containing Mbed TLS. +""" +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +import argparse +import os +import re +import subprocess +import sys +from typing import FrozenSet, List, Optional + +UNCRUSTIFY_SUPPORTED_VERSION = "0.75.1" +CONFIG_FILE = ".uncrustify.cfg" +UNCRUSTIFY_EXE = "uncrustify" +UNCRUSTIFY_ARGS = ["-c", CONFIG_FILE] +CHECK_GENERATED_FILES = "tests/scripts/check-generated-files.sh" + +def print_err(*args): + print("Error: ", *args, file=sys.stderr) + +# Print the file names that will be skipped and the help message +def print_skip(files_to_skip): + print() + print(*files_to_skip, sep=", SKIP\n", end=", SKIP\n") + print("Warning: The listed files will be skipped because\n" + "they are not known to git.") + print() + +# Match FILENAME(s) in "check SCRIPT (FILENAME...)" +CHECK_CALL_RE = re.compile(r"\n\s*check\s+[^\s#$&*?;|]+([^\n#$&*?;|]+)", + re.ASCII) +def list_generated_files() -> FrozenSet[str]: + """Return the names of generated files. + + We don't reformat generated files, since the result might be different + from the output of the generator. Ideally the result of the generator + would conform to the code style, but this would be difficult, especially + with respect to the placement of line breaks in long logical lines. + """ + # Parse check-generated-files.sh to get an up-to-date list of + # generated files. Read the file rather than calling it so that + # this script only depends on Git, Python and uncrustify, and not other + # tools such as sh or grep which might not be available on Windows. + # This introduces a limitation: check-generated-files.sh must have + # the expected format and must list the files explicitly, not through + # wildcards or command substitution. + content = open(CHECK_GENERATED_FILES, encoding="utf-8").read() + checks = re.findall(CHECK_CALL_RE, content) + return frozenset(word for s in checks for word in s.split()) + +# Check for comment string indicating an auto-generated file +AUTOGEN_RE = re.compile(r"Warning[ :-]+This file is (now )?auto[ -]?generated", + re.ASCII | re.IGNORECASE) +def is_file_autogenerated(filename): + content = open(filename, encoding="utf-8").read() + return AUTOGEN_RE.search(content) is not None + +def get_src_files(since: Optional[str]) -> List[str]: + """ + Use git to get a list of the source files. + + The optional argument since is a commit, indicating to only list files + that have changed since that commit. Without this argument, list all + files known to git. + + Only C files are included, and certain files (generated, or 3rdparty) + are excluded. + """ + file_patterns = ["*.[hc]", + "tests/suites/*.function", + "scripts/data_files/*.fmt"] + output = subprocess.check_output(["git", "ls-files"] + file_patterns, + universal_newlines=True) + src_files = output.split() + if since: + # get all files changed in commits since the starting point + cmd = ["git", "log", since + "..HEAD", "--name-only", "--pretty=", "--"] + src_files + output = subprocess.check_output(cmd, universal_newlines=True) + committed_changed_files = output.split() + # and also get all files with uncommitted changes + cmd = ["git", "diff", "--name-only", "--"] + src_files + output = subprocess.check_output(cmd, universal_newlines=True) + uncommitted_changed_files = output.split() + src_files = list(set(committed_changed_files + uncommitted_changed_files)) + + generated_files = list_generated_files() + # Don't correct style for third-party files (and, for simplicity, + # companion files in the same subtree), or for automatically + # generated files (we're correcting the templates instead). + src_files = [filename for filename in src_files + if not (filename.startswith("3rdparty/") or + filename in generated_files or + is_file_autogenerated(filename))] + return src_files + +def get_uncrustify_version() -> str: + """ + Get the version string from Uncrustify + """ + result = subprocess.run([UNCRUSTIFY_EXE, "--version"], + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + check=False) + if result.returncode != 0: + print_err("Could not get Uncrustify version:", str(result.stderr, "utf-8")) + return "" + else: + return str(result.stdout, "utf-8") + +def check_style_is_correct(src_file_list: List[str]) -> bool: + """ + Check the code style and output a diff for each file whose style is + incorrect. + """ + style_correct = True + for src_file in src_file_list: + uncrustify_cmd = [UNCRUSTIFY_EXE] + UNCRUSTIFY_ARGS + [src_file] + result = subprocess.run(uncrustify_cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, check=False) + if result.returncode != 0: + print_err("Uncrustify returned " + str(result.returncode) + + " correcting file " + src_file) + return False + + # Uncrustify makes changes to the code and places the result in a new + # file with the extension ".uncrustify". To get the changes (if any) + # simply diff the 2 files. + diff_cmd = ["diff", "-u", src_file, src_file + ".uncrustify"] + cp = subprocess.run(diff_cmd, check=False) + + if cp.returncode == 1: + print(src_file + " changed - code style is incorrect.") + style_correct = False + elif cp.returncode != 0: + raise subprocess.CalledProcessError(cp.returncode, cp.args, + cp.stdout, cp.stderr) + + # Tidy up artifact + os.remove(src_file + ".uncrustify") + + return style_correct + +def fix_style_single_pass(src_file_list: List[str]) -> bool: + """ + Run Uncrustify once over the source files. + """ + code_change_args = UNCRUSTIFY_ARGS + ["--no-backup"] + for src_file in src_file_list: + uncrustify_cmd = [UNCRUSTIFY_EXE] + code_change_args + [src_file] + result = subprocess.run(uncrustify_cmd, check=False) + if result.returncode != 0: + print_err("Uncrustify with file returned: " + + str(result.returncode) + " correcting file " + + src_file) + return False + return True + +def fix_style(src_file_list: List[str]) -> int: + """ + Fix the code style. This takes 2 passes of Uncrustify. + """ + if not fix_style_single_pass(src_file_list): + return 1 + if not fix_style_single_pass(src_file_list): + return 1 + + # Guard against future changes that cause the codebase to require + # more passes. + if not check_style_is_correct(src_file_list): + print_err("Code style still incorrect after second run of Uncrustify.") + return 1 + else: + return 0 + +def main() -> int: + """ + Main with command line arguments. + """ + uncrustify_version = get_uncrustify_version().strip() + if UNCRUSTIFY_SUPPORTED_VERSION not in uncrustify_version: + print("Warning: Using unsupported Uncrustify version '" + + uncrustify_version + "'") + print("Note: The only supported version is " + + UNCRUSTIFY_SUPPORTED_VERSION) + + parser = argparse.ArgumentParser() + parser.add_argument('-f', '--fix', action='store_true', + help=('modify source files to fix the code style ' + '(default: print diff, do not modify files)')) + parser.add_argument('-s', '--since', metavar='COMMIT', const='development', nargs='?', + help=('only check files modified since the specified commit' + ' (e.g. --since=HEAD~3 or --since=development). If no' + ' commit is specified, default to development.')) + # --subset is almost useless: it only matters if there are no files + # ('code_style.py' without arguments checks all files known to Git, + # 'code_style.py --subset' does nothing). In particular, + # 'code_style.py --fix --subset ...' is intended as a stable ("porcelain") + # way to restyle a possibly empty set of files. + parser.add_argument('--subset', action='store_true', + help='only check the specified files (default with non-option arguments)') + parser.add_argument('operands', nargs='*', metavar='FILE', + help='files to check (files MUST be known to git, if none: check all)') + + args = parser.parse_args() + + covered = frozenset(get_src_files(args.since)) + # We only check files that are known to git + if args.subset or args.operands: + src_files = [f for f in args.operands if f in covered] + skip_src_files = [f for f in args.operands if f not in covered] + if skip_src_files: + print_skip(skip_src_files) + else: + src_files = list(covered) + + if args.fix: + # Fix mode + return fix_style(src_files) + else: + # Check mode + if check_style_is_correct(src_files): + print("Checked {} files, style ok.".format(len(src_files))) + return 0 + else: + return 1 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/scripts/framework_dev/__init__.py b/scripts/framework_dev/__init__.py new file mode 100644 index 000000000..df48c282c --- /dev/null +++ b/scripts/framework_dev/__init__.py @@ -0,0 +1,3 @@ +# This file needs to exist to make framework_dev a package. +# Among other things, this allows modules in this directory to make +# relative imports. diff --git a/scripts/framework_dev/asymmetric_key_data.py b/scripts/framework_dev/asymmetric_key_data.py new file mode 100644 index 000000000..8ca675878 --- /dev/null +++ b/scripts/framework_dev/asymmetric_key_data.py @@ -0,0 +1,157 @@ +"""Sample key material for asymmetric key types. + +Meant for use in crypto_knowledge.py. +""" + +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +import binascii +import re +from typing import Dict + +STR_TRANS_REMOVE_BLANKS = str.maketrans('', '', ' \t\n\r') + +def unhexlify(text: str) -> bytes: + return binascii.unhexlify(text.translate(STR_TRANS_REMOVE_BLANKS)) + +def construct_asymmetric_key_data(src) -> Dict[str, Dict[int, bytes]]: + """Split key pairs into separate table entries and convert hex to bytes. + + Input format: src[abbreviated_type][size] = (private_key_hex, public_key_hex) + Output format: dst['PSA_KEY_TYPE_xxx'][size] = key_bytes + """ + dst = {} #type: Dict[str, Dict[int, bytes]] + for typ in src: + private = 'PSA_KEY_TYPE_' + re.sub(r'(\(|\Z)', r'_KEY_PAIR\1', typ, 1) + public = 'PSA_KEY_TYPE_' + re.sub(r'(\(|\Z)', r'_PUBLIC_KEY\1', typ, 1) + dst[private] = {} + dst[public] = {} + for size in src[typ]: + dst[private][size] = unhexlify(src[typ][size][0]) + dst[public][size] = unhexlify(src[typ][size][1]) + return dst + +## These are valid keys that don't try to exercise any edge cases. They're +## either test vectors from some specification, or randomly generated. All +## pairs consist of a private key and its public key. +#pylint: disable=line-too-long +ASYMMETRIC_KEY_DATA = construct_asymmetric_key_data({ + 'ECC(PSA_ECC_FAMILY_SECP_K1)': { + 192: ("297ac1722ccac7589ecb240dc719842538ca974beb79f228", + "0426b7bb38da649ac2138fc050c6548b32553dab68afebc36105d325b75538c12323cb0764789ecb992671beb2b6bef2f5"), + 225: ("0024122bf020fa113f6c0ac978dfbd41f749257a9468febdbe0dc9f7e8", + "042cc7335f4b76042bed44ef45959a62aa215f7a5ff0c8111b8c44ed654ee71c1918326ad485b2d599fe2a6eab096ee26d977334d2bac6d61d"), + 256: ("7fa06fa02d0e911b9a47fdc17d2d962ca01e2f31d60c6212d0ed7e3bba23a7b9", + "045c39154579efd667adc73a81015a797d2c8682cdfbd3c3553c4a185d481cdc50e42a0e1cbc3ca29a32a645e927f54beaed14c9dbbf8279d725f5495ca924b24d"), + }, + 'ECC(PSA_ECC_FAMILY_SECP_R1)': { + 192: ("d83b57a59c51358d9c8bbb898aff507f44dd14cf16917190", + "04e35fcbee11cec3154f80a1a61df7d7612de4f2fd70c5608d0ee3a4a1a5719471adb33966dd9b035fdb774feeba94b04c"), + 224: ("872f203b3ad35b7f2ecc803c3a0e1e0b1ed61cc1afe71b189cd4c995", + "046f00eadaa949fee3e9e1c7fa1247eecec86a0dce46418b9bd3117b981d4bd0ae7a990de912f9d060d6cb531a42d22e394ac29e81804bf160"), + 256: ("49c9a8c18c4b885638c431cf1df1c994131609b580d4fd43a0cab17db2f13eee", + "047772656f814b399279d5e1f1781fac6f099a3c5ca1b0e35351834b08b65e0b572590cdaf8f769361bcf34acfc11e5e074e8426bdde04be6e653945449617de45"), + 384: ("3f5d8d9be280b5696cc5cc9f94cf8af7e6b61dd6592b2ab2b3a4c607450417ec327dcdcaed7c10053d719a0574f0a76a", + "04d9c662b50ba29ca47990450e043aeaf4f0c69b15676d112f622a71c93059af999691c5680d2b44d111579db12f4a413a2ed5c45fcfb67b5b63e00b91ebe59d09a6b1ac2c0c4282aa12317ed5914f999bc488bb132e8342cc36f2ca5e3379c747"), + 521: ("01b1b6ad07bb79e7320da59860ea28e055284f6058f279de666e06d435d2af7bda28d99fa47b7dd0963e16b0073078ee8b8a38d966a582f46d19ff95df3ad9685aae", + "04001de142d54f69eb038ee4b7af9d3ca07736fd9cf719eb354d69879ee7f3c136fb0fbf9f08f86be5fa128ec1a051d3e6c643e85ada8ffacf3663c260bd2c844b6f5600cee8e48a9e65d09cadd89f235dee05f3b8a646be715f1f67d5b434e0ff23a1fc07ef7740193e40eeff6f3bcdfd765aa9155033524fe4f205f5444e292c4c2f6ac1"), + }, + 'ECC(PSA_ECC_FAMILY_SECP_R2)': { + 160: ("00bf539a1cdda0d7f71a50a3f98aec0a2e8e4ced1e", + "049570d541398665adb5cfa16f5af73b3196926bbd4b876bdb80f8eab20d0f540c22f4de9c140f6d7b"), + }, + 'ECC(PSA_ECC_FAMILY_SECT_K1)': { + 163: ("03ebc8fcded2d6ab72ec0f75bdb4fd080481273e71", + "0406f88f90b4b65950f06ce433afdb097e320f433dc2062b8a65db8fafd3c110f46bc45663fbf021ee7eb9"), + 233: ("41f08485ce587b06061c087e76e247c359de2ba9927ee013b2f1ed9ca8", + "0401e9d7189189f773bd8f71be2c10774ba18842434dfa9312595ea545104400f45a9d5675647513ba75b079fe66a29daac2ec86a6a5d4e75c5f290c1f"), + 239: ("1a8069ce2c2c8bdd7087f2a6ab49588797e6294e979495602ab9650b9c61", + "04068d76b9f4508762c2379db9ee8b87ad8d86d9535132ffba3b5680440cfa28eb133d4232faf1c9aba96af11aefe634a551440800d5f8185105d3072d"), + 283: ("006d627885dd48b9ec6facb5b3865377d755b75a5d51440e45211c1f600e15eff8a881a0", + "0405f48374debceaadb46ba385fd92048fcc5b9af1a1c90408bf94a68b9378df1cbfdfb6fb026a96bea06d8f181bf10c020adbcc88b6ecff96bdc564a9649c247cede601c4be63afc3"), + 409: ("3ff5e74d932fa77db139b7c948c81e4069c72c24845574064beea8976b70267f1c6f9a503e3892ea1dcbb71fcea423faa370a8", + "04012c587f69f68b308ba6dcb238797f4e22290ca939ae806604e2b5ab4d9caef5a74a98fd87c4f88d292dd39d92e556e16c6ecc3c019a105826eef507cd9a04119f54d5d850b3720b3792d5d03410e9105610f7e4b420166ed45604a7a1f229d80975ba6be2060e8b"), + 571: ("005008c97b4a161c0db1bac6452c72846d57337aa92d8ecb4a66eb01d2f29555ffb61a5317225dcc8ca6917d91789e227efc0bfe9eeda7ee21998cd11c3c9885056b0e55b4f75d51", + "04050172a7fd7adf98e4e2ed2742faa5cd12731a15fb0dbbdf75b1c3cc771a4369af6f2fa00e802735650881735759ea9c79961ded18e0daa0ac59afb1d513b5bbda9962e435f454fc020b4afe1445c2302ada07d295ec2580f8849b2dfa7f956b09b4cbe4c88d3b1c217049f75d3900d36df0fa12689256b58dd2ef784ebbeb0564600cf47a841485f8cf897a68accd5a"), + }, + 'ECC(PSA_ECC_FAMILY_SECT_R1)': { + 163: ("009b05dc82d46d64a04a22e6e5ca70ca1231e68c50", + "0400465eeb9e7258b11e33c02266bfe834b20bcb118700772796ee4704ec67651bd447e3011959a79a04cb"), + 233: ("00e5e42834e3c78758088b905deea975f28dc20ef6173e481f96e88afe7f", + "0400cd68c8af4430c92ec7a7048becfdf00a6bae8d1b4c37286f2d336f2a0e017eca3748f4ad6d435c85867aa014eea1bd6d9d005bbd8319cab629001d"), + 283: ("004cecad915f6f3c9bbbd92d1eb101eda23f16c7dad60a57c87c7e1fd2b29b22f6d666ad", + "04052f9ff887254c2d1440ba9e30f13e2185ba53c373b2c410dae21cf8c167f796c08134f601cbc4c570bffbc2433082cf4d9eb5ba173ecb8caec15d66a02673f60807b2daa729b765"), + 409: ("00c22422d265721a3ae2b3b2baeb77bee50416e19877af97b5fc1c700a0a88916ecb9050135883accb5e64edc77a3703f4f67a64", + "0401aa25466b1d291846db365957b25431591e50d9c109fe2106e93bb369775896925b15a7bfec397406ab4fe6f6b1a13bf8fdcb9300fa5500a813228676b0a6c572ed96b0f4aec7e87832e7e20f17ca98ecdfd36f59c82bddb8665f1f357a73900e827885ec9e1f22"), + 571: ("026ac1cdf92a13a1b8d282da9725847908745138f5c6706b52d164e3675fcfbf86fc3e6ab2de732193267db029dd35a0599a94a118f480231cfc6ccca2ebfc1d8f54176e0f5656a1", + "040708f3403ee9948114855c17572152a08f8054d486defef5f29cbffcfb7cfd9280746a1ac5f751a6ad902ec1e0525120e9be56f03437af196fbe60ee7856e3542ab2cf87880632d80290e39b1a2bd03c6bbf6225511c567bd2ff41d2325dc58346f2b60b1feee4dc8b2af2296c2dc52b153e0556b5d24152b07f690c3fa24e4d1d19efbdeb1037833a733654d2366c74"), + }, + 'ECC(PSA_ECC_FAMILY_SECT_R2)': { + 163: ("0210b482a458b4822d0cb21daa96819a67c8062d34", + "0403692601144c32a6cfa369ae20ae5d43c1c764678c037bafe80c6fd2e42b7ced96171d9c5367fd3dca6f"), + }, + 'ECC(PSA_ECC_FAMILY_BRAINPOOL_P_R1)': { + 160: ("69502c4fdaf48d4fa617bdd24498b0406d0eeaac", + "04d4b9186816358e2f9c59cf70748cb70641b22fbab65473db4b4e22a361ed7e3de7e8a8ddc4130c5c"), + 192: ("1688a2c5fbf4a3c851d76a98c3ec88f445a97996283db59f", + "043fdd168c179ff5363dd71dcd58de9617caad791ae0c37328be9ca0bfc79cebabf6a95d1c52df5b5f3c8b1a2441cf6c88"), + 224: ("a69835dafeb5da5ab89c59860dddebcfd80b529a99f59b880882923c", + "045fbea378fc8583b3837e3f21a457c31eaf20a54e18eb11d104b3adc47f9d1c97eb9ea4ac21740d70d88514b98bf0bc31addac1d19c4ab3cc"), + 256: ("2161d6f2db76526fa62c16f356a80f01f32f776784b36aa99799a8b7662080ff", + "04768c8cae4abca6306db0ed81b0c4a6215c378066ec6d616c146e13f1c7df809b96ab6911c27d8a02339f0926840e55236d3d1efbe2669d090e4c4c660fada91d"), + 320: ("61b8daa7a6e5aa9fccf1ef504220b2e5a5b8c6dc7475d16d3172d7db0b2778414e4f6e8fa2032ead", + "049caed8fb4742956cc2ad12a9a1c995e21759ef26a07bc2054136d3d2f28bb331a70e26c4c687275ab1f434be7871e115d2350c0c5f61d4d06d2bcdb67f5cb63fdb794e5947c87dc6849a58694e37e6cd"), + 384: ("3dd92e750d90d7d39fc1885cd8ad12ea9441f22b9334b4d965202adb1448ce24c5808a85dd9afc229af0a3124f755bcb", + "04719f9d093a627e0d350385c661cebf00c61923566fe9006a3107af1d871bc6bb68985fd722ea32be316f8e783b7cd1957785f66cfc0cb195dd5c99a8e7abaa848553a584dfd2b48e76d445fe00dd8be59096d877d4696d23b4bc8db14724e66a"), + 512: ("372c9778f69f726cbca3f4a268f16b4d617d10280d79a6a029cd51879fe1012934dfe5395455337df6906dc7d6d2eea4dbb2065c0228f73b3ed716480e7d71d2", + "0438b7ec92b61c5c6c7fbc28a4ec759d48fcd4e2e374defd5c4968a54dbef7510e517886fbfc38ea39aa529359d70a7156c35d3cbac7ce776bdb251dd64bce71234424ee7049eed072f0dbc4d79996e175d557e263763ae97095c081e73e7db2e38adc3d4c9a0487b1ede876dc1fca61c902e9a1d8722b8612928f18a24845591a"), + }, + 'ECC(PSA_ECC_FAMILY_MONTGOMERY)': { + 255: ("70076d0a7318a57d3c16c17251b26645df4c2f87ebc0992ab177fba51db92c6a", + "8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98eaa9b4e6a"), + 448: ("e4e49f52686f9ee3b638528f721f1596196ffd0a1cddb64c3f216f06541805cfeb1a286dc78018095cdfec050e8007b5f4908962ba20d6c1", + "c0d3a5a2b416a573dc9909f92f134ac01323ab8f8e36804e578588ba2d09fe7c3e737f771ca112825b548a0ffded6d6a2fd09a3e77dec30e"), + }, + 'ECC(PSA_ECC_FAMILY_TWISTED_EDWARDS)': { + 255: ("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a"), + 448: ("6c82a562cb808d10d632be89c8513ebf6c929f34ddfa8c9f63c9960ef6e348a3528c8a3fcc2f044e39a3fc5b94492f8f032e7549a20098f95b", + "5fd7449b59b461fd2ce787ec616ad46a1da1342485a70e1f8a0ea75d80e96778edf124769b46c7061bd6783df1e50f6cd1fa1abeafe8256180"), + }, + 'RSA': { + 1024: (""" +3082025e + 020100 + 02818100af057d396ee84fb75fdbb5c2b13c7fe5a654aa8aa2470b541ee1feb0b12d25c79711531249e1129628042dbbb6c120d1443524ef4c0e6e1d8956eeb2077af12349ddeee54483bc06c2c61948cd02b202e796aebd94d3a7cbf859c2c1819c324cb82b9cd34ede263a2abffe4733f077869e8660f7d6834da53d690ef7985f6bc3 + 0203010001 + 02818100874bf0ffc2f2a71d14671ddd0171c954d7fdbf50281e4f6d99ea0e1ebcf82faa58e7b595ffb293d1abe17f110b37c48cc0f36c37e84d876621d327f64bbe08457d3ec4098ba2fa0a319fba411c2841ed7be83196a8cdf9daa5d00694bc335fc4c32217fe0488bce9cb7202e59468b1ead119000477db2ca797fac19eda3f58c1 + 024100e2ab760841bb9d30a81d222de1eb7381d82214407f1b975cbbfe4e1a9467fd98adbd78f607836ca5be1928b9d160d97fd45c12d6b52e2c9871a174c66b488113 + 024100c5ab27602159ae7d6f20c3c2ee851e46dc112e689e28d5fcbbf990a99ef8a90b8bb44fd36467e7fc1789ceb663abda338652c3c73f111774902e840565927091 + 024100b6cdbd354f7df579a63b48b3643e353b84898777b48b15f94e0bfc0567a6ae5911d57ad6409cf7647bf96264e9bd87eb95e263b7110b9a1f9f94acced0fafa4d + 024071195eec37e8d257decfc672b07ae639f10cbb9b0c739d0c809968d644a94e3fd6ed9287077a14583f379058f76a8aecd43c62dc8c0f41766650d725275ac4a1 + 024100bb32d133edc2e048d463388b7be9cb4be29f4b6250be603e70e3647501c97ddde20a4e71be95fd5e71784e25aca4baf25be5738aae59bbfe1c997781447a2b24 +""", """ + 308189 + 02818100af057d396ee84fb75fdbb5c2b13c7fe5a654aa8aa2470b541ee1feb0b12d25c79711531249e1129628042dbbb6c120d1443524ef4c0e6e1d8956eeb2077af12349ddeee54483bc06c2c61948cd02b202e796aebd94d3a7cbf859c2c1819c324cb82b9cd34ede263a2abffe4733f077869e8660f7d6834da53d690ef7985f6bc3 + 0203010001 +"""), + 1536: (""" +3082037b + 020100 + 0281c100c870feb6ca6b1d2bd9f2dd99e20f1fe2d7e5192de662229dbe162bd1ba66336a7182903ca0b72796cd441c83d24bcdc3e9a2f5e4399c8a043f1c3ddf04754a66d4cfe7b3671a37dd31a9b4c13bfe06ee90f9d94ddaa06de67a52ac863e68f756736ceb014405a6160579640f831dddccc34ad0b05070e3f9954a58d1815813e1b83bcadba814789c87f1ef2ba5d738b793ec456a67360eea1b5faf1c7cc7bf24f3b2a9d0f8958b1096e0f0c335f8888d0c63a51c3c0337214fa3f5efdf6dcc35 + 0203010001 + 0281c06d2d670047973a87752a9d5bc14f3dae00acb01f593aa0e24cf4a49f932931de4bbfb332e2d38083da80bc0b6d538edba479f7f77d0deffb4a28e6e67ff6273585bb4cd862535c946605ab0809d65f0e38f76e4ec2c3d9b8cd6e14bcf667943892cd4b34cc6420a439abbf3d7d35ef73976dd6f9cbde35a51fa5213f0107f83e3425835d16d3c9146fc9e36ce75a09bb66cdff21dd5a776899f1cb07e282cca27be46510e9c799f0d8db275a6be085d9f3f803218ee3384265bfb1a3640e8ca1 + 026100e6848c31d466fffefc547e3a3b0d3785de6f78b0dd12610843512e495611a0675509b1650b27415009838dd8e68eec6e7530553b637d602424643b33e8bc5b762e1799bc79d56b13251d36d4f201da2182416ce13574e88278ff04467ad602d9 + 026100de994fdf181f02be2bf9e5f5e4e517a94993b827d1eaf609033e3a6a6f2396ae7c44e9eb594cf1044cb3ad32ea258f0c82963b27bb650ed200cde82cb993374be34be5b1c7ead5446a2b82a4486e8c1810a0b01551609fb0841d474bada802bd + 026076ddae751b73a959d0bfb8ff49e7fcd378e9be30652ecefe35c82cb8003bc29cc60ae3809909baf20c95db9516fe680865417111d8b193dbcf30281f1249de57c858bf1ba32f5bb1599800e8398a9ef25c7a642c95261da6f9c17670e97265b1 + 0260732482b837d5f2a9443e23c1aa0106d83e82f6c3424673b5fdc3769c0f992d1c5c93991c7038e882fcda04414df4d7a5f4f698ead87851ce37344b60b72d7b70f9c60cae8566e7a257f8e1bef0e89df6e4c2f9d24d21d9f8889e4c7eccf91751 + 026009050d94493da8f00a4ddbe9c800afe3d44b43f78a48941a79b2814a1f0b81a18a8b2347642a03b27998f5a18de9abc9ae0e54ab8294feac66dc87e854cce6f7278ac2710cb5878b592ffeb1f4f0a1853e4e8d1d0561b6efcc831a296cf7eeaf +""", """ +3081c9 + 0281c100c870feb6ca6b1d2bd9f2dd99e20f1fe2d7e5192de662229dbe162bd1ba66336a7182903ca0b72796cd441c83d24bcdc3e9a2f5e4399c8a043f1c3ddf04754a66d4cfe7b3671a37dd31a9b4c13bfe06ee90f9d94ddaa06de67a52ac863e68f756736ceb014405a6160579640f831dddccc34ad0b05070e3f9954a58d1815813e1b83bcadba814789c87f1ef2ba5d738b793ec456a67360eea1b5faf1c7cc7bf24f3b2a9d0f8958b1096e0f0c335f8888d0c63a51c3c0337214fa3f5efdf6dcc35 + 0203010001 +"""), + }, +}) diff --git a/scripts/framework_dev/bignum_common.py b/scripts/framework_dev/bignum_common.py new file mode 100644 index 000000000..eebc858b2 --- /dev/null +++ b/scripts/framework_dev/bignum_common.py @@ -0,0 +1,406 @@ +"""Common features for bignum in test generation framework.""" +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +from abc import abstractmethod +import enum +from typing import Iterator, List, Tuple, TypeVar, Any +from copy import deepcopy +from itertools import chain +from math import ceil + +from . import test_case +from . import test_data_generation +from .bignum_data import INPUTS_DEFAULT, MODULI_DEFAULT + +T = TypeVar('T') #pylint: disable=invalid-name + +def invmod(a: int, n: int) -> int: + """Return inverse of a to modulo n. + + Equivalent to pow(a, -1, n) in Python 3.8+. Implementation is equivalent + to long_invmod() in CPython. + """ + b, c = 1, 0 + while n: + q, r = divmod(a, n) + a, b, c, n = n, c, b - q*c, r + # at this point a is the gcd of the original inputs + if a == 1: + return b + raise ValueError("Not invertible") + +def invmod_positive(a: int, n: int) -> int: + """Return a non-negative inverse of a to modulo n.""" + inv = invmod(a, n) + return inv if inv >= 0 else inv + n + +def hex_to_int(val: str) -> int: + """Implement the syntax accepted by mbedtls_test_read_mpi(). + + This is a superset of what is accepted by mbedtls_test_read_mpi_core(). + """ + if val in ['', '-']: + return 0 + return int(val, 16) + +def quote_str(val: str) -> str: + return "\"{}\"".format(val) + +def bound_mpi(val: int, bits_in_limb: int) -> int: + """First number exceeding number of limbs needed for given input value.""" + return bound_mpi_limbs(limbs_mpi(val, bits_in_limb), bits_in_limb) + +def bound_mpi_limbs(limbs: int, bits_in_limb: int) -> int: + """First number exceeding maximum of given number of limbs.""" + bits = bits_in_limb * limbs + return 1 << bits + +def limbs_mpi(val: int, bits_in_limb: int) -> int: + """Return the number of limbs required to store value.""" + bit_length = max(val.bit_length(), 1) + return (bit_length + bits_in_limb - 1) // bits_in_limb + +def combination_pairs(values: List[T]) -> List[Tuple[T, T]]: + """Return all pair combinations from input values.""" + return [(x, y) for x in values for y in values] + +def bits_to_limbs(bits: int, bits_in_limb: int) -> int: + """ Return the appropriate ammount of limbs needed to store + a number contained in input bits""" + return ceil(bits / bits_in_limb) + +def hex_digits_for_limb(limbs: int, bits_in_limb: int) -> int: + """ Return the hex digits need for a number of limbs. """ + return 2 * ((limbs * bits_in_limb) // 8) + +def hex_digits_max_int(val: str, bits_in_limb: int) -> int: + """ Return the first number exceeding maximum the limb space + required to store the input hex-string value. This method + weights on the input str_len rather than numerical value + and works with zero-padded inputs""" + n = ((1 << (len(val) * 4)) - 1) + l = limbs_mpi(n, bits_in_limb) + return bound_mpi_limbs(l, bits_in_limb) + +def zfill_match(reference: str, target: str) -> str: + """ Zero pad target hex-string to match the limb size of + the reference input """ + lt = len(target) + lr = len(reference) + target_len = lr if lt < lr else lt + return "{:x}".format(int(target, 16)).zfill(target_len) + +class OperationCommon(test_data_generation.BaseTest): + """Common features for bignum binary operations. + + This adds functionality common in binary operation tests. + + Attributes: + symbol: Symbol to use for the operation in case description. + input_values: List of values to use as test case inputs. These are + combined to produce pairs of values. + input_cases: List of tuples containing pairs of test case inputs. This + can be used to implement specific pairs of inputs. + unique_combinations_only: Boolean to select if test case combinations + must be unique. If True, only A,B or B,A would be included as a test + case. If False, both A,B and B,A would be included. + input_style: Controls the way how test data is passed to the functions + in the generated test cases. "variable" passes them as they are + defined in the python source. "arch_split" pads the values with + zeroes depending on the architecture/limb size. If this is set, + test cases are generated for all architectures. + arity: the number of operands for the operation. Currently supported + values are 1 and 2. + """ + symbol = "" + input_values = INPUTS_DEFAULT # type: List[str] + input_cases = [] # type: List[Any] + dependencies = [] # type: List[Any] + unique_combinations_only = False + input_styles = ["variable", "fixed", "arch_split"] # type: List[str] + input_style = "variable" # type: str + limb_sizes = [32, 64] # type: List[int] + arities = [1, 2] + arity = 2 + suffix = False # for arity = 1, symbol can be prefix (default) or suffix + + def __init__(self, val_a: str, val_b: str = "0", bits_in_limb: int = 32) -> None: + self.val_a = val_a + self.val_b = val_b + # Setting the int versions here as opposed to making them @properties + # provides earlier/more robust input validation. + self.int_a = hex_to_int(val_a) + self.int_b = hex_to_int(val_b) + self.dependencies = deepcopy(self.dependencies) + if bits_in_limb not in self.limb_sizes: + raise ValueError("Invalid number of bits in limb!") + if self.input_style == "arch_split": + self.dependencies.append("MBEDTLS_HAVE_INT{:d}".format(bits_in_limb)) + self.bits_in_limb = bits_in_limb + + @property + def boundary(self) -> int: + if self.arity == 1: + return self.int_a + elif self.arity == 2: + return max(self.int_a, self.int_b) + raise ValueError("Unsupported number of operands!") + + @property + def limb_boundary(self) -> int: + return bound_mpi(self.boundary, self.bits_in_limb) + + @property + def limbs(self) -> int: + return limbs_mpi(self.boundary, self.bits_in_limb) + + @property + def hex_digits(self) -> int: + return hex_digits_for_limb(self.limbs, self.bits_in_limb) + + def format_arg(self, val: str) -> str: + if self.input_style not in self.input_styles: + raise ValueError("Unknown input style!") + if self.input_style == "variable": + return val + else: + return val.zfill(self.hex_digits) + + def format_result(self, res: int) -> str: + res_str = '{:x}'.format(res) + return quote_str(self.format_arg(res_str)) + + @property + def arg_a(self) -> str: + return self.format_arg(self.val_a) + + @property + def arg_b(self) -> str: + if self.arity == 1: + raise AttributeError("Operation is unary and doesn't have arg_b!") + return self.format_arg(self.val_b) + + def arguments(self) -> List[str]: + args = [quote_str(self.arg_a)] + if self.arity == 2: + args.append(quote_str(self.arg_b)) + return args + self.result() + + def description(self) -> str: + """Generate a description for the test case. + + If not set, case_description uses the form A `symbol` B, where symbol + is used to represent the operation. Descriptions of each value are + generated to provide some context to the test case. + """ + if not self.case_description: + if self.arity == 1: + format_string = "{1:x} {0}" if self.suffix else "{0} {1:x}" + self.case_description = format_string.format( + self.symbol, self.int_a + ) + elif self.arity == 2: + self.case_description = "{:x} {} {:x}".format( + self.int_a, self.symbol, self.int_b + ) + return super().description() + + @property + def is_valid(self) -> bool: + return True + + @abstractmethod + def result(self) -> List[str]: + """Get the result of the operation. + + This could be calculated during initialization and stored as `_result` + and then returned, or calculated when the method is called. + """ + raise NotImplementedError + + @classmethod + def get_value_pairs(cls) -> Iterator[Tuple[str, str]]: + """Generator to yield pairs of inputs. + + Combinations are first generated from all input values, and then + specific cases provided. + """ + if cls.arity == 1: + yield from ((a, "0") for a in cls.input_values) + elif cls.arity == 2: + if cls.unique_combinations_only: + yield from combination_pairs(cls.input_values) + else: + yield from ( + (a, b) + for a in cls.input_values + for b in cls.input_values + ) + else: + raise ValueError("Unsupported number of operands!") + + @classmethod + def generate_function_tests(cls) -> Iterator[test_case.TestCase]: + if cls.input_style not in cls.input_styles: + raise ValueError("Unknown input style!") + if cls.arity not in cls.arities: + raise ValueError("Unsupported number of operands!") + if cls.input_style == "arch_split": + test_objects = (cls(a, b, bits_in_limb=bil) + for a, b in cls.get_value_pairs() + for bil in cls.limb_sizes) + special_cases = (cls(*args, bits_in_limb=bil) # type: ignore + for args in cls.input_cases + for bil in cls.limb_sizes) + else: + test_objects = (cls(a, b) + for a, b in cls.get_value_pairs()) + special_cases = (cls(*args) for args in cls.input_cases) + yield from (valid_test_object.create_test_case() + for valid_test_object in filter( + lambda test_object: test_object.is_valid, + chain(test_objects, special_cases) + ) + ) + + +class ModulusRepresentation(enum.Enum): + """Representation selector of a modulus.""" + # Numerical values aligned with the type mbedtls_mpi_mod_rep_selector + INVALID = 0 + MONTGOMERY = 2 + OPT_RED = 3 + + def symbol(self) -> str: + """The C symbol for this representation selector.""" + return 'MBEDTLS_MPI_MOD_REP_' + self.name + + @classmethod + def supported_representations(cls) -> List['ModulusRepresentation']: + """Return all representations that are supported in positive test cases.""" + return [cls.MONTGOMERY, cls.OPT_RED] + + +class ModOperationCommon(OperationCommon): + #pylint: disable=abstract-method + """Target for bignum mod_raw test case generation.""" + moduli = MODULI_DEFAULT # type: List[str] + montgomery_form_a = False + disallow_zero_a = False + + def __init__(self, val_n: str, val_a: str, val_b: str = "0", + bits_in_limb: int = 64) -> None: + super().__init__(val_a=val_a, val_b=val_b, bits_in_limb=bits_in_limb) + self.val_n = val_n + # Setting the int versions here as opposed to making them @properties + # provides earlier/more robust input validation. + self.int_n = hex_to_int(val_n) + + def to_montgomery(self, val: int) -> int: + return (val * self.r) % self.int_n + + def from_montgomery(self, val: int) -> int: + return (val * self.r_inv) % self.int_n + + def convert_from_canonical(self, canonical: int, + rep: ModulusRepresentation) -> int: + """Convert values from canonical representation to the given representation.""" + if rep is ModulusRepresentation.MONTGOMERY: + return self.to_montgomery(canonical) + elif rep is ModulusRepresentation.OPT_RED: + return canonical + else: + raise ValueError('Modulus representation not supported: {}' + .format(rep.name)) + + @property + def boundary(self) -> int: + return self.int_n + + @property + def arg_a(self) -> str: + if self.montgomery_form_a: + value_a = self.to_montgomery(self.int_a) + else: + value_a = self.int_a + return self.format_arg('{:x}'.format(value_a)) + + @property + def arg_n(self) -> str: + return self.format_arg(self.val_n) + + def format_arg(self, val: str) -> str: + return super().format_arg(val).zfill(self.hex_digits) + + def arguments(self) -> List[str]: + return [quote_str(self.arg_n)] + super().arguments() + + @property + def r(self) -> int: # pylint: disable=invalid-name + l = limbs_mpi(self.int_n, self.bits_in_limb) + return bound_mpi_limbs(l, self.bits_in_limb) + + @property + def r_inv(self) -> int: + return invmod(self.r, self.int_n) + + @property + def r2(self) -> int: # pylint: disable=invalid-name + return pow(self.r, 2) + + @property + def is_valid(self) -> bool: + if self.int_a >= self.int_n: + return False + if self.disallow_zero_a and self.int_a == 0: + return False + if self.arity == 2 and self.int_b >= self.int_n: + return False + return True + + def description(self) -> str: + """Generate a description for the test case. + + It uses the form A `symbol` B mod N, where symbol is used to represent + the operation. + """ + + if not self.case_description: + return super().description() + " mod {:x}".format(self.int_n) + return super().description() + + @classmethod + def input_cases_args(cls) -> Iterator[Tuple[Any, Any, Any]]: + if cls.arity == 1: + yield from ((n, a, "0") for a, n in cls.input_cases) + elif cls.arity == 2: + yield from ((n, a, b) for a, b, n in cls.input_cases) + else: + raise ValueError("Unsupported number of operands!") + + @classmethod + def generate_function_tests(cls) -> Iterator[test_case.TestCase]: + if cls.input_style not in cls.input_styles: + raise ValueError("Unknown input style!") + if cls.arity not in cls.arities: + raise ValueError("Unsupported number of operands!") + if cls.input_style == "arch_split": + test_objects = (cls(n, a, b, bits_in_limb=bil) + for n in cls.moduli + for a, b in cls.get_value_pairs() + for bil in cls.limb_sizes) + special_cases = (cls(*args, bits_in_limb=bil) + for args in cls.input_cases_args() + for bil in cls.limb_sizes) + else: + test_objects = (cls(n, a, b) + for n in cls.moduli + for a, b in cls.get_value_pairs()) + special_cases = (cls(*args) for args in cls.input_cases_args()) + yield from (valid_test_object.create_test_case() + for valid_test_object in filter( + lambda test_object: test_object.is_valid, + chain(test_objects, special_cases) + )) diff --git a/scripts/framework_dev/bignum_core.py b/scripts/framework_dev/bignum_core.py new file mode 100644 index 000000000..909f6a306 --- /dev/null +++ b/scripts/framework_dev/bignum_core.py @@ -0,0 +1,896 @@ +"""Framework classes for generation of bignum core test cases.""" +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +import random + +from typing import Dict, Iterator, List, Tuple + +from . import test_case +from . import test_data_generation +from . import bignum_common +from .bignum_data import ADD_SUB_DATA + +class BignumCoreTarget(test_data_generation.BaseTarget): + #pylint: disable=abstract-method, too-few-public-methods + """Target for bignum core test case generation.""" + target_basename = 'test_suite_bignum_core.generated' + + +class BignumCoreShiftR(BignumCoreTarget, test_data_generation.BaseTest): + """Test cases for mbedtls_bignum_core_shift_r().""" + count = 0 + test_function = "mpi_core_shift_r" + test_name = "Core shift right" + + DATA = [ + ('00', '0', [0, 1, 8]), + ('01', '1', [0, 1, 2, 8, 64]), + ('dee5ca1a7ef10a75', '64-bit', + list(range(11)) + [31, 32, 33, 63, 64, 65, 71, 72]), + ('002e7ab0070ad57001', '[leading 0 limb]', + [0, 1, 8, 63, 64]), + ('a1055eb0bb1efa1150ff', '80-bit', + [0, 1, 8, 63, 64, 65, 72, 79, 80, 81, 88, 128, 129, 136]), + ('020100000000000000001011121314151617', '138-bit', + [0, 1, 8, 9, 16, 72, 73, 136, 137, 138, 144]), + ] + + def __init__(self, input_hex: str, descr: str, count: int) -> None: + self.input_hex = input_hex + self.number_description = descr + self.shift_count = count + self.result = bignum_common.hex_to_int(input_hex) >> count + + def arguments(self) -> List[str]: + return ['"{}"'.format(self.input_hex), + str(self.shift_count), + '"{:0{}x}"'.format(self.result, len(self.input_hex))] + + def description(self) -> str: + return 'Core shift {} >> {}'.format(self.number_description, + self.shift_count) + + @classmethod + def generate_function_tests(cls) -> Iterator[test_case.TestCase]: + for input_hex, descr, counts in cls.DATA: + for count in counts: + yield cls(input_hex, descr, count).create_test_case() + + +class BignumCoreShiftL(BignumCoreTarget, bignum_common.ModOperationCommon): + """Test cases for mbedtls_bignum_core_shift_l().""" + + BIT_SHIFT_VALUES = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', + '1f', '20', '21', '3f', '40', '41', '47', '48', '4f', + '50', '51', '58', '80', '81', '88'] + DATA = ["0", "1", "40", "dee5ca1a7ef10a75", "a1055eb0bb1efa1150ff", + "002e7ab0070ad57001", "020100000000000000001011121314151617", + "1946e2958a85d8863ae21f4904fcc49478412534ed53eaf321f63f2a222" + "7a3c63acbf50b6305595f90cfa8327f6db80d986fe96080bcbb5df1bdbe" + "9b74fb8dedf2bddb3f8215b54dffd66409323bcc473e45a8fe9d08e77a51" + "1698b5dad0416305db7fcf"] + arity = 1 + test_function = "mpi_core_shift_l" + test_name = "Core shift(L)" + input_style = "arch_split" + symbol = "<<" + input_values = BIT_SHIFT_VALUES + moduli = DATA + + @property + def val_n_max_limbs(self) -> int: + """ Return the limb count required to store the maximum number that can + fit in a the number of digits used by val_n """ + m = bignum_common.hex_digits_max_int(self.val_n, self.bits_in_limb) - 1 + return bignum_common.limbs_mpi(m, self.bits_in_limb) + + def arguments(self) -> List[str]: + return [bignum_common.quote_str(self.val_n), + str(self.int_a) + ] + self.result() + + def description(self) -> str: + """ Format the output as: + #{count} {hex input} ({input bits} {limbs capacity}) << {bit shift} """ + bits = "({} bits in {} limbs)".format(self.int_n.bit_length(), self.val_n_max_limbs) + return "{} #{} {} {} {} {}".format(self.test_name, + self.count, + self.val_n, + bits, + self.symbol, + self.int_a) + + def format_result(self, res: int) -> str: + # Override to match zero-pading for leading digits between the output and input. + res_str = bignum_common.zfill_match(self.val_n, "{:x}".format(res)) + return bignum_common.quote_str(res_str) + + def result(self) -> List[str]: + result = (self.int_n << self.int_a) + # Calculate if there is space for shifting to the left(leading zero limbs) + mx = bignum_common.hex_digits_max_int(self.val_n, self.bits_in_limb) + # If there are empty limbs ahead, adjust the bitmask accordingly + result = result & (mx - 1) + return [self.format_result(result)] + + @property + def is_valid(self) -> bool: + return True + + +class BignumCoreCTLookup(BignumCoreTarget, test_data_generation.BaseTest): + """Test cases for mbedtls_mpi_core_ct_uint_table_lookup().""" + test_function = "mpi_core_ct_uint_table_lookup" + test_name = "Constant time MPI table lookup" + + bitsizes = [ + (32, "One limb"), + (192, "Smallest curve sized"), + (512, "Largest curve sized"), + (2048, "Small FF/RSA sized"), + (4096, "Large FF/RSA sized"), + ] + + window_sizes = [0, 1, 2, 3, 4, 5, 6] + + def __init__(self, + bitsize: int, descr: str, window_size: int) -> None: + self.bitsize = bitsize + self.bitsize_description = descr + self.window_size = window_size + + def arguments(self) -> List[str]: + return [str(self.bitsize), str(self.window_size)] + + def description(self) -> str: + return '{} - {} MPI with {} bit window'.format( + BignumCoreCTLookup.test_name, + self.bitsize_description, + self.window_size + ) + + @classmethod + def generate_function_tests(cls) -> Iterator[test_case.TestCase]: + for bitsize, bitsize_description in cls.bitsizes: + for window_size in cls.window_sizes: + yield (cls(bitsize, bitsize_description, window_size) + .create_test_case()) + + +class BignumCoreAddAndAddIf(BignumCoreTarget, bignum_common.OperationCommon): + """Test cases for bignum core add and add-if.""" + count = 0 + symbol = "+" + test_function = "mpi_core_add_and_add_if" + test_name = "mpi_core_add_and_add_if" + input_style = "arch_split" + input_values = ADD_SUB_DATA + unique_combinations_only = True + + def result(self) -> List[str]: + result = self.int_a + self.int_b + + carry, result = divmod(result, self.limb_boundary) + + return [ + self.format_result(result), + str(carry) + ] + + +class BignumCoreSub(BignumCoreTarget, bignum_common.OperationCommon): + """Test cases for bignum core sub.""" + count = 0 + input_style = "arch_split" + symbol = "-" + test_function = "mpi_core_sub" + test_name = "mbedtls_mpi_core_sub" + input_values = ADD_SUB_DATA + + def result(self) -> List[str]: + if self.int_a >= self.int_b: + result = self.int_a - self.int_b + carry = 0 + else: + result = self.limb_boundary + self.int_a - self.int_b + carry = 1 + return [ + self.format_result(result), + str(carry) + ] + + +class BignumCoreMLA(BignumCoreTarget, bignum_common.OperationCommon): + """Test cases for fixed-size multiply accumulate.""" + count = 0 + test_function = "mpi_core_mla" + test_name = "mbedtls_mpi_core_mla" + + input_values = [ + "0", "1", "fffe", "ffffffff", "100000000", "20000000000000", + "ffffffffffffffff", "10000000000000000", "1234567890abcdef0", + "fffffffffffffffffefefefefefefefe", + "100000000000000000000000000000000", + "1234567890abcdef01234567890abcdef0", + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "1234567890abcdef01234567890abcdef01234567890abcdef01234567890abcdef0", + ( + "4df72d07b4b71c8dacb6cffa954f8d88254b6277099308baf003fab73227f" + "34029643b5a263f66e0d3c3fa297ef71755efd53b8fb6cb812c6bbf7bcf17" + "9298bd9947c4c8b14324140a2c0f5fad7958a69050a987a6096e9f055fb38" + "edf0c5889eca4a0cfa99b45fbdeee4c696b328ddceae4723945901ec02507" + "6b12b" + ) + ] # type: List[str] + input_scalars = [ + "0", "3", "fe", "ff", "ffff", "10000", "ffffffff", "100000000", + "7f7f7f7f7f7f7f7f", "8000000000000000", "fffffffffffffffe" + ] # type: List[str] + + def __init__(self, val_a: str, val_b: str, val_s: str) -> None: + super().__init__(val_a, val_b) + self.arg_scalar = val_s + self.int_scalar = bignum_common.hex_to_int(val_s) + if bignum_common.limbs_mpi(self.int_scalar, 32) > 1: + self.dependencies = ["MBEDTLS_HAVE_INT64"] + + def arguments(self) -> List[str]: + return [ + bignum_common.quote_str(self.arg_a), + bignum_common.quote_str(self.arg_b), + bignum_common.quote_str(self.arg_scalar) + ] + self.result() + + def description(self) -> str: + """Override and add the additional scalar.""" + if not self.case_description: + self.case_description = "0x{} + 0x{} * 0x{}".format( + self.arg_a, self.arg_b, self.arg_scalar + ) + return super().description() + + def result(self) -> List[str]: + result = self.int_a + (self.int_b * self.int_scalar) + bound_val = max(self.int_a, self.int_b) + bound_4 = bignum_common.bound_mpi(bound_val, 32) + bound_8 = bignum_common.bound_mpi(bound_val, 64) + carry_4, remainder_4 = divmod(result, bound_4) + carry_8, remainder_8 = divmod(result, bound_8) + return [ + "\"{:x}\"".format(remainder_4), + "\"{:x}\"".format(carry_4), + "\"{:x}\"".format(remainder_8), + "\"{:x}\"".format(carry_8) + ] + + @classmethod + def get_value_pairs(cls) -> Iterator[Tuple[str, str]]: + """Generator to yield pairs of inputs. + + Combinations are first generated from all input values, and then + specific cases provided. + """ + yield from super().get_value_pairs() + yield from cls.input_cases + + @classmethod + def generate_function_tests(cls) -> Iterator[test_case.TestCase]: + """Override for additional scalar input.""" + for a_value, b_value in cls.get_value_pairs(): + for s_value in cls.input_scalars: + cur_op = cls(a_value, b_value, s_value) + yield cur_op.create_test_case() + + +class BignumCoreMul(BignumCoreTarget, bignum_common.OperationCommon): + """Test cases for bignum core multiplication.""" + count = 0 + input_style = "arch_split" + symbol = "*" + test_function = "mpi_core_mul" + test_name = "mbedtls_mpi_core_mul" + arity = 2 + unique_combinations_only = True + + def format_arg(self, val: str) -> str: + return val + + def format_result(self, res: int) -> str: + res_str = '{:x}'.format(res) + a_limbs = bignum_common.limbs_mpi(self.int_a, self.bits_in_limb) + b_limbs = bignum_common.limbs_mpi(self.int_b, self.bits_in_limb) + hex_digits = bignum_common.hex_digits_for_limb(a_limbs + b_limbs, self.bits_in_limb) + return bignum_common.quote_str(self.format_arg(res_str).zfill(hex_digits)) + + def result(self) -> List[str]: + result = self.int_a * self.int_b + return [self.format_result(result)] + + +class BignumCoreMontmul(BignumCoreTarget, test_data_generation.BaseTest): + """Test cases for Montgomery multiplication.""" + count = 0 + test_function = "mpi_core_montmul" + test_name = "mbedtls_mpi_core_montmul" + + start_2_mpi4 = False + start_2_mpi8 = False + + replay_test_cases = [ + (2, 1, 1, 1, "19", "1", "1D"), (2, 1, 1, 1, "7", "1", "9"), + (2, 1, 1, 1, "4", "1", "9"), + ( + 12, 1, 6, 1, ( + "3C246D0E059A93A266288A7718419EC741661B474C58C032C5EDAF92709402" + "B07CC8C7CE0B781C641A1EA8DB2F4343" + ), "1", ( + "66A198186C18C10B2F5ED9B522752A9830B69916E535C8F047518A889A43A5" + "94B6BED27A168D31D4A52F88925AA8F5" + ) + ), ( + 8, 1, 4, 1, + "1E442976B0E63D64FCCE74B999E470CA9888165CB75BFA1F340E918CE03C6211", + "1", "B3A119602EE213CDE28581ECD892E0F592A338655DCE4CA88054B3D124D0E561" + ), ( + 22, 1, 11, 1, ( + "7CF5AC97304E0B63C65413F57249F59994B0FED1D2A8D3D83ED5FA38560FFB" + "82392870D6D08F87D711917FD7537E13B7E125BE407E74157776839B0AC9DB" + "23CBDFC696104353E4D2780B2B4968F8D8542306BCA7A2366E" + ), "1", ( + "284139EA19C139EBE09A8111926AAA39A2C2BE12ED487A809D3CB5BC558547" + "25B4CDCB5734C58F90B2F60D99CC1950CDBC8D651793E93C9C6F0EAD752500" + "A32C56C62082912B66132B2A6AA42ADA923E1AD22CEB7BA0123" + ) + ) + ] # type: List[Tuple[int, int, int, int, str, str, str]] + + random_test_cases = [ + ("2", "2", "3", ""), ("1", "2", "3", ""), ("2", "1", "3", ""), + ("6", "5", "7", ""), ("3", "4", "7", ""), ("1", "6", "7", ""), ("5", "6", "7", ""), + ("3", "4", "B", ""), ("7", "4", "B", ""), ("9", "7", "B", ""), ("2", "a", "B", ""), + ("25", "16", "29", "(0x29 is prime)"), ("8", "28", "29", ""), + ("18", "21", "29", ""), ("15", "f", "29", ""), + ("e2", "ea", "FF", ""), ("43", "72", "FF", ""), + ("d8", "70", "FF", ""), ("3c", "7c", "FF", ""), + ("99", "b9", "101", "(0x101 is prime)"), ("65", "b2", "101", ""), + ("81", "32", "101", ""), ("51", "dd", "101", ""), + ("d5", "143", "38B", "(0x38B is prime)"), ("3d", "387", "38B", ""), + ("160", "2e5", "38B", ""), ("10f", "137", "38B", ""), + ("7dac", "25a", "8003", "(0x8003 is prime)"), ("6f1c", "3286", "8003", ""), + ("59ed", "2f3f", "8003", ""), ("6893", "736d", "8003", ""), + ("d199", "2832", "10001", "(0x10001 is prime)"), ("c3b2", "3e5b", "10001", ""), + ("abe4", "214e", "10001", ""), ("4360", "a05d", "10001", ""), + ("3f5a1", "165b2", "7F7F7", ""), ("3bd29", "37863", "7F7F7", ""), + ("60c47", "64819", "7F7F7", ""), ("16584", "12c49", "7F7F7", ""), + ("1ff03f", "610347", "800009", "(0x800009 is prime)"), ("340fd5", "19812e", "800009", ""), + ("3fe2e8", "4d0dc7", "800009", ""), ("40356", "e6392", "800009", ""), + ("dd8a1d", "266c0e", "100002B", "(0x100002B is prime)"), + ("3fa1cb", "847fd6", "100002B", ""), ("5f439d", "5c3196", "100002B", ""), + ("18d645", "f72dc6", "100002B", ""), + ("20051ad", "37def6e", "37EEE9D", "(0x37EEE9D is prime)"), + ("2ec140b", "3580dbf", "37EEE9D", ""), ("1d91b46", "190d4fc", "37EEE9D", ""), + ("34e488d", "1224d24", "37EEE9D", ""), + ("2a4fe2cb", "263466a9", "8000000B", "(0x8000000B is prime)"), + ("5643fe94", "29a1aefa", "8000000B", ""), ("29633513", "7b007ac4", "8000000B", ""), + ("2439cef5", "5c9d5a47", "8000000B", ""), + ("4de3cfaa", "50dea178", "8CD626B9", "(0x8CD626B9 is prime)"), + ("b8b8563", "10dbbbac", "8CD626B9", ""), ("4e8a6151", "5574ec19", "8CD626B9", ""), + ("69224878", "309cfc23", "8CD626B9", ""), + ("fb6f7fb6", "afb05423", "10000000F", "(0x10000000F is prime)"), + ("8391a243", "26034dcd", "10000000F", ""), ("d26b98c", "14b2d6aa", "10000000F", ""), + ("6b9f1371", "a21daf1d", "10000000F", ""), + ( + "9f49435ad", "c8264ade8", "174876E7E9", + "0x174876E7E9 is prime (dec) 99999999977" + ), + ("c402da434", "1fb427acf", "174876E7E9", ""), + ("f6ebc2bb1", "1096d39f2a", "174876E7E9", ""), + ("153b7f7b6b", "878fda8ff", "174876E7E9", ""), + ("2c1adbb8d6", "4384d2d3c6", "8000000017", "(0x8000000017 is prime)"), + ("2e4f9cf5fb", "794f3443d9", "8000000017", ""), + ("149e495582", "3802b8f7b7", "8000000017", ""), + ("7b9d49df82", "69c68a442a", "8000000017", ""), + ("683a134600", "6dd80ea9f6", "864CB9076D", "(0x864CB9076D is prime)"), + ("13a870ff0d", "59b099694a", "864CB9076D", ""), + ("37d06b0e63", "4d2147e46f", "864CB9076D", ""), + ("661714f8f4", "22e55df507", "864CB9076D", ""), + ("2f0a96363", "52693307b4", "F7F7F7F7F7", ""), + ("3c85078e64", "f2275ecb6d", "F7F7F7F7F7", ""), + ("352dae68d1", "707775b4c6", "F7F7F7F7F7", ""), + ("37ae0f3e0b", "912113040f", "F7F7F7F7F7", ""), + ("6dada15e31", "f58ed9eff7", "1000000000F", "(0x1000000000F is prime)"), + ("69627a7c89", "cfb5ebd13d", "1000000000F", ""), + ("a5e1ad239b", "afc030c731", "1000000000F", ""), + ("f1cc45f4c5", "c64ad607c8", "1000000000F", ""), + ("2ebad87d2e31", "4c72d90bca78", "800000000005", "(0x800000000005 is prime)"), + ("a30b3cc50d", "29ac4fe59490", "800000000005", ""), + ("33674e9647b4", "5ec7ee7e72d3", "800000000005", ""), + ("3d956f474f61", "74070040257d", "800000000005", ""), + ("48348e3717d6", "43fcb4399571", "800795D9BA47", "(0x800795D9BA47 is prime)"), + ("5234c03cc99b", "2f3cccb87803", "800795D9BA47", ""), + ("3ed13db194ab", "44b8f4ba7030", "800795D9BA47", ""), + ("1c11e843bfdb", "95bd1b47b08", "800795D9BA47", ""), + ("a81d11cb81fd", "1e5753a3f33d", "1000000000015", "(0x1000000000015 is prime)"), + ("688c4db99232", "36fc0cf7ed", "1000000000015", ""), + ("f0720cc07e07", "fc76140ed903", "1000000000015", ""), + ("2ec61f8d17d1", "d270c85e36d2", "1000000000015", ""), + ( + "6a24cd3ab63820", "ed4aad55e5e348", "100000000000051", + "(0x100000000000051 is prime)" + ), + ("e680c160d3b248", "31e0d8840ed510", "100000000000051", ""), + ("a80637e9aebc38", "bb81decc4e1738", "100000000000051", ""), + ("9afa5a59e9d630", "be9e65a6d42938", "100000000000051", ""), + ("ab5e104eeb71c000", "2cffbd639e9fea00", "ABCDEF0123456789", ""), + ("197b867547f68a00", "44b796cf94654800", "ABCDEF0123456789", ""), + ("329f9483a04f2c00", "9892f76961d0f000", "ABCDEF0123456789", ""), + ("4a2e12dfb4545000", "1aa3e89a69794500", "ABCDEF0123456789", ""), + ( + "8b9acdf013d140f000", "12e4ceaefabdf2b2f00", "25A55A46E5DA99C71C7", + "0x25A55A46E5DA99C71C7 is the 3rd repunit prime(dec) 11111111111111111111111" + ), + ("1b8d960ea277e3f5500", "14418aa980e37dd000", "25A55A46E5DA99C71C7", ""), + ("7314524977e8075980", "8172fa45618ccd0d80", "25A55A46E5DA99C71C7", ""), + ("ca14f031769be63580", "147a2f3cf2964ca9400", "25A55A46E5DA99C71C7", ""), + ( + "18532ba119d5cd0cf39735c0000", "25f9838e31634844924733000000", + "314DC643FB763F2B8C0E2DE00879", + "0x314DC643FB763F2B8C0E2DE00879 is (dec)99999999977^3" + ), + ( + "a56e2d2517519e3970e70c40000", "ec27428d4bb380458588fa80000", + "314DC643FB763F2B8C0E2DE00879", "" + ), + ( + "1cb5e8257710e8653fff33a00000", "15fdd42fe440fd3a1d121380000", + "314DC643FB763F2B8C0E2DE00879", "" + ), + ( + "e50d07a65fc6f93e538ce040000", "1f4b059ca609f3ce597f61240000", + "314DC643FB763F2B8C0E2DE00879", "" + ), + ( + "1ea3ade786a095d978d387f30df9f20000000", + "127c448575f04af5a367a7be06c7da0000000", + "47BF19662275FA2F6845C74942ED1D852E521", + "0x47BF19662275FA2F6845C74942ED1D852E521 is (dec) 99999999977^4" + ), + ( + "16e15b0ca82764e72e38357b1f10a20000000", + "43e2355d8514bbe22b0838fdc3983a0000000", + "47BF19662275FA2F6845C74942ED1D852E521", "" + ), + ( + "be39332529d93f25c3d116c004c620000000", + "5cccec42370a0a2c89c6772da801a0000000", + "47BF19662275FA2F6845C74942ED1D852E521", "" + ), + ( + "ecaa468d90de0eeda474d39b3e1fc0000000", + "1e714554018de6dc0fe576bfd3b5660000000", + "47BF19662275FA2F6845C74942ED1D852E521", "" + ), + ( + "32298816711c5dce46f9ba06e775c4bedfc770e6700000000000000", + "8ee751fd5fb24f0b4a653cb3a0c8b7d9e724574d168000000000000", + "97EDD86E4B5C4592C6D32064AC55C888A7245F07CA3CC455E07C931", + ( + "0x97EDD86E4B5C4592C6D32064AC55C888A7245F07CA3CC455E07C931" + " is (dec) 99999999977^6" + ) + ), + ( + "29213b9df3cfd15f4b428645b67b677c29d1378d810000000000000", + "6cbb732c65e10a28872394dfdd1936d5171c3c3aac0000000000000", + "97EDD86E4B5C4592C6D32064AC55C888A7245F07CA3CC455E07C931", "" + ), + ( + "6f18db06ad4abc52c0c50643dd13098abccd4a232f0000000000000", + "7e6bf41f2a86098ad51f98dfc10490ba3e8081bc830000000000000", + "97EDD86E4B5C4592C6D32064AC55C888A7245F07CA3CC455E07C931", "" + ), + ( + "62d3286cd706ad9d73caff63f1722775d7e8c731208000000000000", + "530f7ba02ae2b04c2fe3e3d27ec095925631a6c2528000000000000", + "97EDD86E4B5C4592C6D32064AC55C888A7245F07CA3CC455E07C931", "" + ), + ( + "a6c6503e3c031fdbf6009a89ed60582b7233c5a85de28b16000000000000000", + "75c8ed18270b583f16d442a467d32bf95c5e491e9b8523798000000000000000", + "DD15FE80B731872AC104DB37832F7E75A244AA2631BC87885B861E8F20375499", + ( + "0xDD15FE80B731872AC104DB37832F7E75A244AA2631BC87885B861E8F20375499" + " is (dec) 99999999977^7" + ) + ), + ( + "bf84d1f85cf6b51e04d2c8f4ffd03532d852053cf99b387d4000000000000000", + "397ba5a743c349f4f28bc583ecd5f06e0a25f9c6d98f09134000000000000000", + "DD15FE80B731872AC104DB37832F7E75A244AA2631BC87885B861E8F20375499", "" + ), + ( + "6db11c3a4152ed1a2aa6fa34b0903ec82ea1b88908dcb482000000000000000", + "ac8ac576a74ad6ca48f201bf89f77350ce86e821358d85920000000000000000", + "DD15FE80B731872AC104DB37832F7E75A244AA2631BC87885B861E8F20375499", "" + ), + ( + "3001d96d7fe8b733f33687646fc3017e3ac417eb32e0ec708000000000000000", + "925ddbdac4174e8321a48a32f79640e8cf7ec6f46ea235a80000000000000000", + "DD15FE80B731872AC104DB37832F7E75A244AA2631BC87885B861E8F20375499", "" + ), + ( + "1029048755f2e60dd98c8de6d9989226b6bb4f0db8e46bd1939de560000000000000000000", + "51bb7270b2e25cec0301a03e8275213bb6c2f6e6ec93d4d46d36ca0000000000000000000", + "141B8EBD9009F84C241879A1F680FACCED355DA36C498F73E96E880CF78EA5F96146380E41", + ( + "0x141B8EBD9009F84C241879A1F680FACCED355DA36C498F73E96E880CF78EA5F96146" + "380E41 is 99999999977^8" + ) + ), + ( + "1c5337ff982b3ad6611257dbff5bbd7a9920ba2d4f5838a0cc681ce000000000000000000", + "520c5d049ca4702031ba728591b665c4d4ccd3b2b86864d4c160fd2000000000000000000", + "141B8EBD9009F84C241879A1F680FACCED355DA36C498F73E96E880CF78EA5F96146380E41", + "" + ), + ( + "57074dfa00e42f6555bae624b7f0209f218adf57f73ed34ab0ff90c000000000000000000", + "41eb14b6c07bfd3d1fe4f4a610c17cc44fcfcda695db040e011065000000000000000000", + "141B8EBD9009F84C241879A1F680FACCED355DA36C498F73E96E880CF78EA5F96146380E41", + "" + ), + ( + "d8ed7feed2fe855e6997ad6397f776158573d425031bf085a615784000000000000000000", + "6f121dcd18c578ab5e229881006007bb6d319b179f11015fe958b9c000000000000000000", + "141B8EBD9009F84C241879A1F680FACCED355DA36C498F73E96E880CF78EA5F96146380E41", + "" + ), + ( + ( + "2a462b156180ea5fe550d3758c764e06fae54e626b5f503265a09df76edbdfbf" + "a1e6000000000000000000000000" + ), ( + "1136f41d1879fd4fb9e49e0943a46b6704d77c068ee237c3121f9071cfd3e6a0" + "0315800000000000000000000000" + ), ( + "2A94608DE88B6D5E9F8920F5ABB06B24CC35AE1FBACC87D075C621C3E2833EC90" + "2713E40F51E3B3C214EDFABC451" + ), ( + "0x2A94608DE88B6D5E9F8920F5ABB06B24CC35AE1FBACC87D075C621C3E2833EC" + "902713E40F51E3B3C214EDFABC451 is (dec) 99999999977^10" + ) + ), + ( + ( + "c1ac3800dfb3c6954dea391d206200cf3c47f795bf4a5603b4cb88ae7e574de47" + "40800000000000000000000000" + ), ( + "c0d16eda0549ede42fa0deb4635f7b7ce061fadea02ee4d85cba4c4f709603419" + "3c800000000000000000000000" + ), ( + "2A94608DE88B6D5E9F8920F5ABB06B24CC35AE1FBACC87D075C621C3E2833EC90" + "2713E40F51E3B3C214EDFABC451" + ), "" + ), + ( + ( + "19e45bb7633094d272588ad2e43bcb3ee341991c6731b6fa9d47c4018d7ce7bba" + "5ee800000000000000000000000" + ), ( + "1e4f83166ae59f6b9cc8fd3e7677ed8bfc01bb99c98bd3eb084246b64c1e18c33" + "65b800000000000000000000000" + ), ( + "2A94608DE88B6D5E9F8920F5ABB06B24CC35AE1FBACC87D075C621C3E2833EC90" + "2713E40F51E3B3C214EDFABC451" + ), "" + ), + ( + ( + "1aa93395fad5f9b7f20b8f9028a054c0bb7c11bb8520e6a95e5a34f06cb70bcdd" + "01a800000000000000000000000" + ), ( + "54b45afa5d4310192f8d224634242dd7dcfb342318df3d9bd37b4c614788ba13b" + "8b000000000000000000000000" + ), ( + "2A94608DE88B6D5E9F8920F5ABB06B24CC35AE1FBACC87D075C621C3E2833EC90" + "2713E40F51E3B3C214EDFABC451" + ), "" + ), + ( + ( + "544f2628a28cfb5ce0a1b7180ee66b49716f1d9476c466c57f0c4b23089917843" + "06d48f78686115ee19e25400000000000000000000000000000000" + ), ( + "677eb31ef8d66c120fa872a60cd47f6e10cbfdf94f90501bd7883cba03d185be0" + "a0148d1625745e9c4c827300000000000000000000000000000000" + ), ( + "8335616AED761F1F7F44E6BD49E807B82E3BF2BF11BFA6AF813C808DBF33DBFA1" + "1DABD6E6144BEF37C6800000000000000000000000000000000051" + ), ( + "0x8335616AED761F1F7F44E6BD49E807B82E3BF2BF11BFA6AF813C808DBF33DBF" + "A11DABD6E6144BEF37C6800000000000000000000000000000000051 is prime," + " (dec) 10^143 + 3^4" + ) + ), + ( + ( + "76bb3470985174915e9993522aec989666908f9e8cf5cb9f037bf4aee33d8865c" + "b6464174795d07e30015b80000000000000000000000000000000" + ), ( + "6aaaf60d5784dcef612d133613b179a317532ecca0eed40b8ad0c01e6d4a6d8c7" + "9a52af190abd51739009a900000000000000000000000000000000" + ), ( + "8335616AED761F1F7F44E6BD49E807B82E3BF2BF11BFA6AF813C808DBF33DBFA1" + "1DABD6E6144BEF37C6800000000000000000000000000000000051" + ), "" + ), + ( + ( + "6cfdd6e60912e441d2d1fc88f421b533f0103a5322ccd3f4db84861643ad63fd6" + "3d1d8cfbc1d498162786ba00000000000000000000000000000000" + ), ( + "1177246ec5e93814816465e7f8f248b350d954439d35b2b5d75d917218e7fd5fb" + "4c2f6d0667f9467fdcf33400000000000000000000000000000000" + ), ( + "8335616AED761F1F7F44E6BD49E807B82E3BF2BF11BFA6AF813C808DBF33DBFA1" + "1DABD6E6144BEF37C6800000000000000000000000000000000051" + ), "" + ), + ( + ( + "7a09a0b0f8bbf8057116fb0277a9bdf3a91b5eaa8830d448081510d8973888be5" + "a9f0ad04facb69aa3715f00000000000000000000000000000000" + ), ( + "764dec6c05a1c0d87b649efa5fd94c91ea28bffb4725d4ab4b33f1a3e8e3b314d" + "799020e244a835a145ec9800000000000000000000000000000000" + ), ( + "8335616AED761F1F7F44E6BD49E807B82E3BF2BF11BFA6AF813C808DBF33DBFA1" + "1DABD6E6144BEF37C6800000000000000000000000000000000051" + ), "" + ) + ] # type: List[Tuple[str, str, str, str]] + + def __init__( + self, val_a: str, val_b: str, val_n: str, case_description: str = "" + ): + self.case_description = case_description + self.arg_a = val_a + self.int_a = bignum_common.hex_to_int(val_a) + self.arg_b = val_b + self.int_b = bignum_common.hex_to_int(val_b) + self.arg_n = val_n + self.int_n = bignum_common.hex_to_int(val_n) + + limbs_a4 = bignum_common.limbs_mpi(self.int_a, 32) + limbs_a8 = bignum_common.limbs_mpi(self.int_a, 64) + self.limbs_b4 = bignum_common.limbs_mpi(self.int_b, 32) + self.limbs_b8 = bignum_common.limbs_mpi(self.int_b, 64) + self.limbs_an4 = bignum_common.limbs_mpi(self.int_n, 32) + self.limbs_an8 = bignum_common.limbs_mpi(self.int_n, 64) + + if limbs_a4 > self.limbs_an4 or limbs_a8 > self.limbs_an8: + raise Exception("Limbs of input A ({}) exceeds N ({})".format( + self.arg_a, self.arg_n + )) + + def arguments(self) -> List[str]: + return [ + str(self.limbs_an4), str(self.limbs_b4), + str(self.limbs_an8), str(self.limbs_b8), + bignum_common.quote_str(self.arg_a), + bignum_common.quote_str(self.arg_b), + bignum_common.quote_str(self.arg_n) + ] + self.result() + + def description(self) -> str: + if self.case_description != "replay": + if not self.start_2_mpi4 and self.limbs_an4 > 1: + tmp = "(start of 2-MPI 4-byte bignums) " + self.__class__.start_2_mpi4 = True + elif not self.start_2_mpi8 and self.limbs_an8 > 1: + tmp = "(start of 2-MPI 8-byte bignums) " + self.__class__.start_2_mpi8 = True + else: + tmp = "(gen) " + self.case_description = tmp + self.case_description + return super().description() + + def result(self) -> List[str]: + """Get the result of the operation.""" + r4 = bignum_common.bound_mpi_limbs(self.limbs_an4, 32) + i4 = bignum_common.invmod(r4, self.int_n) + x4 = self.int_a * self.int_b * i4 + x4 = x4 % self.int_n + + r8 = bignum_common.bound_mpi_limbs(self.limbs_an8, 64) + i8 = bignum_common.invmod(r8, self.int_n) + x8 = self.int_a * self.int_b * i8 + x8 = x8 % self.int_n + return [ + "\"{:x}\"".format(x4), + "\"{:x}\"".format(x8) + ] + + def set_limbs( + self, limbs_an4: int, limbs_b4: int, limbs_an8: int, limbs_b8: int + ) -> None: + """Set number of limbs for each input. + + Replaces default values set during initialization. + """ + self.limbs_an4 = limbs_an4 + self.limbs_b4 = limbs_b4 + self.limbs_an8 = limbs_an8 + self.limbs_b8 = limbs_b8 + + @classmethod + def generate_function_tests(cls) -> Iterator[test_case.TestCase]: + """Generate replay and randomly generated test cases.""" + # Test cases which replay captured invocations during unit test runs. + for limbs_an4, limbs_b4, limbs_an8, limbs_b8, a, b, n in cls.replay_test_cases: + cur_op = cls(a, b, n, case_description="replay") + cur_op.set_limbs(limbs_an4, limbs_b4, limbs_an8, limbs_b8) + yield cur_op.create_test_case() + # Random test cases can be generated using mpi_modmul_case_generate() + # Uses a mixture of primes and odd numbers as N, with four randomly + # generated cases for each N. + for a, b, n, description in cls.random_test_cases: + cur_op = cls(a, b, n, case_description=description) + yield cur_op.create_test_case() + + +def mpi_modmul_case_generate() -> None: + """Generate valid inputs for montmul tests using moduli. + + For each modulus, generates random values for A and B and simple descriptions + for the test case. + """ + moduli = [ + ("3", ""), ("7", ""), ("B", ""), ("29", ""), ("FF", ""), + ("101", ""), ("38B", ""), ("8003", ""), ("10001", ""), + ("7F7F7", ""), ("800009", ""), ("100002B", ""), ("37EEE9D", ""), + ("8000000B", ""), ("8CD626B9", ""), ("10000000F", ""), + ("174876E7E9", "is prime (dec) 99999999977"), + ("8000000017", ""), ("864CB9076D", ""), ("F7F7F7F7F7", ""), + ("1000000000F", ""), ("800000000005", ""), ("800795D9BA47", ""), + ("1000000000015", ""), ("100000000000051", ""), ("ABCDEF0123456789", ""), + ( + "25A55A46E5DA99C71C7", + "is the 3rd repunit prime (dec) 11111111111111111111111" + ), + ("314DC643FB763F2B8C0E2DE00879", "is (dec)99999999977^3"), + ("47BF19662275FA2F6845C74942ED1D852E521", "is (dec) 99999999977^4"), + ( + "97EDD86E4B5C4592C6D32064AC55C888A7245F07CA3CC455E07C931", + "is (dec) 99999999977^6" + ), + ( + "DD15FE80B731872AC104DB37832F7E75A244AA2631BC87885B861E8F20375499", + "is (dec) 99999999977^7" + ), + ( + "141B8EBD9009F84C241879A1F680FACCED355DA36C498F73E96E880CF78EA5F96146380E41", + "is (dec) 99999999977^8" + ), + ( + ( + "2A94608DE88B6D5E9F8920F5ABB06B24CC35AE1FBACC87D075C621C3E283" + "3EC902713E40F51E3B3C214EDFABC451" + ), + "is (dec) 99999999977^10" + ), + ( + "8335616AED761F1F7F44E6BD49E807B82E3BF2BF11BFA6AF813C808DBF33DBFA11" + "DABD6E6144BEF37C6800000000000000000000000000000000051", + "is prime, (dec) 10^143 + 3^4" + ) + ] # type: List[Tuple[str, str]] + primes = [ + "3", "7", "B", "29", "101", "38B", "8003", "10001", "800009", + "100002B", "37EEE9D", "8000000B", "8CD626B9", + # From here they require > 1 4-byte MPI + "10000000F", "174876E7E9", "8000000017", "864CB9076D", "1000000000F", + "800000000005", "800795D9BA47", "1000000000015", "100000000000051", + # From here they require > 1 8-byte MPI + "25A55A46E5DA99C71C7", # this is 11111111111111111111111 decimal + # 10^143 + 3^4: (which is prime) + # 100000000000000000000000000000000000000000000000000000000000000000000000000000 + # 000000000000000000000000000000000000000000000000000000000000000081 + ( + "8335616AED761F1F7F44E6BD49E807B82E3BF2BF11BFA6AF813C808DBF33DBFA11" + "DABD6E6144BEF37C6800000000000000000000000000000000051" + ) + ] # type: List[str] + generated_inputs = [] + for mod, description in moduli: + n = bignum_common.hex_to_int(mod) + mod_read = "{:x}".format(n) + case_count = 3 if n < 5 else 4 + cases = {} # type: Dict[int, int] + i = 0 + while i < case_count: + a = random.randint(1, n) + b = random.randint(1, n) + if cases.get(a) == b: + continue + cases[a] = b + if description: + out_description = "0x{} {}".format(mod_read, description) + elif i == 0 and len(mod) > 1 and mod in primes: + out_description = "(0x{} is prime)" + else: + out_description = "" + generated_inputs.append( + ("{:x}".format(a), "{:x}".format(b), mod, out_description) + ) + i += 1 + print(generated_inputs) + + +class BignumCoreExpMod(BignumCoreTarget, bignum_common.ModOperationCommon): + """Test cases for bignum core exponentiation.""" + symbol = "^" + test_function = "mpi_core_exp_mod" + test_name = "Core modular exponentiation (Mongtomery form only)" + input_style = "fixed" + montgomery_form_a = True + + def result(self) -> List[str]: + # Result has to be given in Montgomery form too + result = pow(self.int_a, self.int_b, self.int_n) + mont_result = self.to_montgomery(result) + return [self.format_result(mont_result)] + + @property + def is_valid(self) -> bool: + # The base needs to be canonical, but the exponent can be larger than + # the modulus (see for example exponent blinding) + return bool(self.int_a < self.int_n) + + +class BignumCoreSubInt(BignumCoreTarget, bignum_common.OperationCommon): + """Test cases for bignum core sub int.""" + count = 0 + symbol = "-" + test_function = "mpi_core_sub_int" + test_name = "mpi_core_sub_int" + input_style = "arch_split" + + @property + def is_valid(self) -> bool: + # This is "sub int", so b is only one limb + if bignum_common.limbs_mpi(self.int_b, self.bits_in_limb) > 1: + return False + return True + + # Overriding because we don't want leading zeros on b + @property + def arg_b(self) -> str: + return self.val_b + + def result(self) -> List[str]: + result = self.int_a - self.int_b + + borrow, result = divmod(result, self.limb_boundary) + + # Borrow will be -1 if non-zero, but we want it to be 1 in the test data + return [ + self.format_result(result), + str(-borrow) + ] + +class BignumCoreZeroCheckCT(BignumCoreTarget, bignum_common.OperationCommon): + """Test cases for bignum core zero check (constant flow).""" + count = 0 + symbol = "== 0" + test_function = "mpi_core_check_zero_ct" + test_name = "mpi_core_check_zero_ct" + input_style = "variable" + arity = 1 + suffix = True + + def result(self) -> List[str]: + result = 1 if self.int_a == 0 else 0 + return [str(result)] diff --git a/scripts/framework_dev/bignum_data.py b/scripts/framework_dev/bignum_data.py new file mode 100644 index 000000000..5c6c2c81e --- /dev/null +++ b/scripts/framework_dev/bignum_data.py @@ -0,0 +1,159 @@ +"""Base values and datasets for bignum generated tests and helper functions that +produced them.""" +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +import random + +# Functions calling these were used to produce test data and are here only for +# reproducibility, they are not used by the test generation framework/classes +try: + from Cryptodome.Util.number import isPrime, getPrime #type: ignore #pylint: disable=import-error +except ImportError: + pass + +# Generated by bignum_common.gen_safe_prime(192,1) +SAFE_PRIME_192_BIT_SEED_1 = "d1c127a667786703830500038ebaef20e5a3e2dc378fb75b" + +# First number generated by random.getrandbits(192) - seed(2,2), not a prime +RANDOM_192_BIT_SEED_2_NO1 = "177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973" + +# Second number generated by random.getrandbits(192) - seed(2,2), not a prime +RANDOM_192_BIT_SEED_2_NO2 = "cf1822ffbc6887782b491044d5e341245c6e433715ba2bdd" + +# Third number generated by random.getrandbits(192) - seed(2,2), not a prime +RANDOM_192_BIT_SEED_2_NO3 = "3653f8dd9b1f282e4067c3584ee207f8da94e3e8ab73738f" + +# Fourth number generated by random.getrandbits(192) - seed(2,2), not a prime +RANDOM_192_BIT_SEED_2_NO4 = "ffed9235288bc781ae66267594c9c9500925e4749b575bd1" + +# Ninth number generated by random.getrandbits(192) - seed(2,2), not a prime +RANDOM_192_BIT_SEED_2_NO9 = "2a1be9cd8697bbd0e2520e33e44c50556c71c4a66148a86f" + +# Generated by bignum_common.gen_safe_prime(1024,3) +SAFE_PRIME_1024_BIT_SEED_3 = ("c93ba7ec74d96f411ba008bdb78e63ff11bb5df46a51e16b" + "2c9d156f8e4e18abf5e052cb01f47d0d1925a77f60991577" + "e128fb6f52f34a27950a594baadd3d8057abeb222cf3cca9" + "62db16abf79f2ada5bd29ab2f51244bf295eff9f6aaba130" + "2efc449b128be75eeaca04bc3c1a155d11d14e8be32a2c82" + "87b3996cf6ad5223") + +# First number generated by random.getrandbits(1024) - seed(4,2), not a prime +RANDOM_1024_BIT_SEED_4_NO1 = ("6905269ed6f0b09f165c8ce36e2f24b43000de01b2ed40ed" + "3addccb2c33be0ac79d679346d4ac7a5c3902b38963dc6e8" + "534f45738d048ec0f1099c6c3e1b258fd724452ccea71ff4" + "a14876aeaff1a098ca5996666ceab360512bd13110722311" + "710cf5327ac435a7a97c643656412a9b8a1abcd1a6916c74" + "da4f9fc3c6da5d7") + +# Second number generated by random.getrandbits(1024) - seed(4,2), not a prime +RANDOM_1024_BIT_SEED_4_NO2 = ("f1cfd99216df648647adec26793d0e453f5082492d83a823" + "3fb62d2c81862fc9634f806fabf4a07c566002249b191bf4" + "d8441b5616332aca5f552773e14b0190d93936e1daca3c06" + "f5ff0c03bb5d7385de08caa1a08179104a25e4664f5253a0" + "2a3187853184ff27459142deccea264542a00403ce80c4b0" + "a4042bb3d4341aad") + +# Third number generated by random.getrandbits(1024) - seed(4,2), not a prime +RANDOM_1024_BIT_SEED_4_NO3 = ("14c15c910b11ad28cc21ce88d0060cc54278c2614e1bcb38" + "3bb4a570294c4ea3738d243a6e58d5ca49c7b59b995253fd" + "6c79a3de69f85e3131f3b9238224b122c3e4a892d9196ada" + "4fcfa583e1df8af9b474c7e89286a1754abcb06ae8abb93f" + "01d89a024cdce7a6d7288ff68c320f89f1347e0cdd905ecf" + "d160c5d0ef412ed6") + +# Fourth number generated by random.getrandbits(1024) - seed(4,2), not a prime +RANDOM_1024_BIT_SEED_4_NO4 = ("32decd6b8efbc170a26a25c852175b7a96b98b5fbf37a2be" + "6f98bca35b17b9662f0733c846bbe9e870ef55b1a1f65507" + "a2909cb633e238b4e9dd38b869ace91311021c9e32111ac1" + "ac7cc4a4ff4dab102522d53857c49391b36cc9aa78a330a1" + "a5e333cb88dcf94384d4cd1f47ca7883ff5a52f1a05885ac" + "7671863c0bdbc23a") + +# Fifth number generated by random.getrandbits(1024) - seed(4,2), not a prime +RANDOM_1024_BIT_SEED_4_NO5 = ("53be4721f5b9e1f5acdac615bc20f6264922b9ccf469aef8" + "f6e7d078e55b85dd1525f363b281b8885b69dc230af5ac87" + "0692b534758240df4a7a03052d733dcdef40af2e54c0ce68" + "1f44ebd13cc75f3edcb285f89d8cf4d4950b16ffc3e1ac3b" + "4708d9893a973000b54a23020fc5b043d6e4a51519d9c9cc" + "52d32377e78131c1") + +# Adding 192 bit and 1024 bit numbers because these are the shortest required +# for ECC and RSA respectively. +INPUTS_DEFAULT = [ + "0", "1", # corner cases + "2", "3", # small primes + "4", # non-prime even + "38", # small random + SAFE_PRIME_192_BIT_SEED_1, # prime + RANDOM_192_BIT_SEED_2_NO1, # not a prime + RANDOM_192_BIT_SEED_2_NO2, # not a prime + SAFE_PRIME_1024_BIT_SEED_3, # prime + RANDOM_1024_BIT_SEED_4_NO1, # not a prime + RANDOM_1024_BIT_SEED_4_NO3, # not a prime + RANDOM_1024_BIT_SEED_4_NO2, # largest (not a prime) + ] + +ADD_SUB_DATA = [ + "0", "1", "3", "f", "fe", "ff", "100", "ff00", + "fffe", "ffff", "10000", # 2^16 - 1, 2^16, 2^16 + 1 + "fffffffe", "ffffffff", "100000000", # 2^32 - 1, 2^32, 2^32 + 1 + "1f7f7f7f7f7f7f", + "8000000000000000", "fefefefefefefefe", + "fffffffffffffffe", "ffffffffffffffff", "10000000000000000", # 2^64 - 1, 2^64, 2^64 + 1 + "1234567890abcdef0", + "fffffffffffffffffffffffe", + "ffffffffffffffffffffffff", + "1000000000000000000000000", + "fffffffffffffffffefefefefefefefe", + "fffffffffffffffffffffffffffffffe", + "ffffffffffffffffffffffffffffffff", + "100000000000000000000000000000000", + "1234567890abcdef01234567890abcdef0", + "fffffffffffffffffffffffffffffffffffffffffffffffffefefefefefefefe", + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe", + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "10000000000000000000000000000000000000000000000000000000000000000", + "1234567890abcdef01234567890abcdef01234567890abcdef01234567890abcdef0", + ] + +# Only odd moduli are present as in the new bignum code only odd moduli are +# supported for now. +MODULI_DEFAULT = [ + "53", # safe prime + "45", # non-prime + SAFE_PRIME_192_BIT_SEED_1, # safe prime + RANDOM_192_BIT_SEED_2_NO4, # not a prime + SAFE_PRIME_1024_BIT_SEED_3, # safe prime + RANDOM_1024_BIT_SEED_4_NO5, # not a prime + ] + +# Some functions, e.g. mbedtls_mpi_mod_raw_inv_prime(), only support prime moduli. +ONLY_PRIME_MODULI = [ + "53", # safe prime + "8ac72304057392b5", # 9999999997777777333 (longer, not safe, prime) + # The next prime has a different R in Montgomery form depending on + # whether 32- or 64-bit MPIs are used. + "152d02c7e14af67fe0bf", # 99999999999999999991999 + SAFE_PRIME_192_BIT_SEED_1, # safe prime + SAFE_PRIME_1024_BIT_SEED_3, # safe prime + ] + +def __gen_safe_prime(bits, seed): + ''' + Generate a safe prime. + + This function is intended for generating constants offline and shouldn't be + used in test generation classes. + + Requires pycryptodomex for getPrime and isPrime and python 3.9 or later for + randbytes. + ''' + rng = random.Random() + # We want reproducibility across python versions + rng.seed(seed, version=2) + while True: + prime = 2*getPrime(bits-1, rng.randbytes)+1 #pylint: disable=no-member + if isPrime(prime, 1e-30): + return prime diff --git a/scripts/framework_dev/bignum_mod.py b/scripts/framework_dev/bignum_mod.py new file mode 100644 index 000000000..f554001ec --- /dev/null +++ b/scripts/framework_dev/bignum_mod.py @@ -0,0 +1,102 @@ +"""Framework classes for generation of bignum mod test cases.""" +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +from typing import Dict, List + +from . import test_data_generation +from . import bignum_common +from .bignum_data import ONLY_PRIME_MODULI + +class BignumModTarget(test_data_generation.BaseTarget): + #pylint: disable=abstract-method, too-few-public-methods + """Target for bignum mod test case generation.""" + target_basename = 'test_suite_bignum_mod.generated' + + +class BignumModMul(bignum_common.ModOperationCommon, + BignumModTarget): + # pylint:disable=duplicate-code + """Test cases for bignum mpi_mod_mul().""" + symbol = "*" + test_function = "mpi_mod_mul" + test_name = "mbedtls_mpi_mod_mul" + input_style = "arch_split" + arity = 2 + + def arguments(self) -> List[str]: + return [self.format_result(self.to_montgomery(self.int_a)), + self.format_result(self.to_montgomery(self.int_b)), + bignum_common.quote_str(self.arg_n) + ] + self.result() + + def result(self) -> List[str]: + result = (self.int_a * self.int_b) % self.int_n + return [self.format_result(self.to_montgomery(result))] + + +class BignumModSub(bignum_common.ModOperationCommon, BignumModTarget): + """Test cases for bignum mpi_mod_sub().""" + symbol = "-" + test_function = "mpi_mod_sub" + test_name = "mbedtls_mpi_mod_sub" + input_style = "fixed" + arity = 2 + + def result(self) -> List[str]: + result = (self.int_a - self.int_b) % self.int_n + # To make negative tests easier, append 0 for success to the + # generated cases + return [self.format_result(result), "0"] + +class BignumModInvNonMont(bignum_common.ModOperationCommon, BignumModTarget): + """Test cases for bignum mpi_mod_inv() - not in Montgomery form.""" + moduli = ONLY_PRIME_MODULI # for now only prime moduli supported + symbol = "^ -1" + test_function = "mpi_mod_inv_non_mont" + test_name = "mbedtls_mpi_mod_inv non-Mont. form" + input_style = "fixed" + arity = 1 + suffix = True + disallow_zero_a = True + + def result(self) -> List[str]: + result = bignum_common.invmod_positive(self.int_a, self.int_n) + # To make negative tests easier, append 0 for success to the + # generated cases + return [self.format_result(result), "0"] + +class BignumModInvMont(bignum_common.ModOperationCommon, BignumModTarget): + """Test cases for bignum mpi_mod_inv() - Montgomery form.""" + moduli = ONLY_PRIME_MODULI # for now only prime moduli supported + symbol = "^ -1" + test_function = "mpi_mod_inv_mont" + test_name = "mbedtls_mpi_mod_inv Mont. form" + input_style = "arch_split" # Mont. form requires arch_split + arity = 1 + suffix = True + disallow_zero_a = True + montgomery_form_a = True + + def result(self) -> List[str]: + result = bignum_common.invmod_positive(self.int_a, self.int_n) + mont_result = self.to_montgomery(result) + # To make negative tests easier, append 0 for success to the + # generated cases + return [self.format_result(mont_result), "0"] + + +class BignumModAdd(bignum_common.ModOperationCommon, BignumModTarget): + """Test cases for bignum mpi_mod_add().""" + count = 0 + symbol = "+" + test_function = "mpi_mod_add" + test_name = "mbedtls_mpi_mod_add" + input_style = "fixed" + + def result(self) -> List[str]: + result = (self.int_a + self.int_b) % self.int_n + # To make negative tests easier, append "0" for success to the + # generated cases + return [self.format_result(result), "0"] diff --git a/scripts/framework_dev/bignum_mod_raw.py b/scripts/framework_dev/bignum_mod_raw.py new file mode 100644 index 000000000..37ad27a11 --- /dev/null +++ b/scripts/framework_dev/bignum_mod_raw.py @@ -0,0 +1,242 @@ +"""Framework classes for generation of bignum mod_raw test cases.""" +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +from typing import Iterator, List + +from . import test_case +from . import test_data_generation +from . import bignum_common +from .bignum_data import ONLY_PRIME_MODULI + +class BignumModRawTarget(test_data_generation.BaseTarget): + #pylint: disable=abstract-method, too-few-public-methods + """Target for bignum mod_raw test case generation.""" + target_basename = 'test_suite_bignum_mod_raw.generated' + + +class BignumModRawSub(bignum_common.ModOperationCommon, + BignumModRawTarget): + """Test cases for bignum mpi_mod_raw_sub().""" + symbol = "-" + test_function = "mpi_mod_raw_sub" + test_name = "mbedtls_mpi_mod_raw_sub" + input_style = "fixed" + arity = 2 + + def arguments(self) -> List[str]: + return [bignum_common.quote_str(n) for n in [self.arg_a, + self.arg_b, + self.arg_n] + ] + self.result() + + def result(self) -> List[str]: + result = (self.int_a - self.int_b) % self.int_n + return [self.format_result(result)] + +class BignumModRawFixQuasiReduction(bignum_common.ModOperationCommon, + BignumModRawTarget): + """Test cases for ecp quasi_reduction().""" + symbol = "-" + test_function = "mpi_mod_raw_fix_quasi_reduction" + test_name = "fix_quasi_reduction" + input_style = "fixed" + arity = 1 + + # Extend the default values with n < x < 2n + input_values = bignum_common.ModOperationCommon.input_values + [ + "73", + + # First number generated by random.getrandbits(1024) - seed(3,2) + "ea7b5bf55eb561a4216363698b529b4a97b750923ceb3ffd", + + # First number generated by random.getrandbits(1024) - seed(1,2) + ("cd447e35b8b6d8fe442e3d437204e52db2221a58008a05a6c4647159c324c985" + "9b810e766ec9d28663ca828dd5f4b3b2e4b06ce60741c7a87ce42c8218072e8c" + "35bf992dc9e9c616612e7696a6cecc1b78e510617311d8a3c2ce6f447ed4d57b" + "1e2feb89414c343c1027c4d1c386bbc4cd613e30d8f16adf91b7584a2265b1f5") + ] # type: List[str] + + def result(self) -> List[str]: + result = self.int_a % self.int_n + return [self.format_result(result)] + + @property + def is_valid(self) -> bool: + return bool(self.int_a < 2 * self.int_n) + +class BignumModRawMul(bignum_common.ModOperationCommon, + BignumModRawTarget): + """Test cases for bignum mpi_mod_raw_mul().""" + symbol = "*" + test_function = "mpi_mod_raw_mul" + test_name = "mbedtls_mpi_mod_raw_mul" + input_style = "arch_split" + arity = 2 + + def arguments(self) -> List[str]: + return [self.format_result(self.to_montgomery(self.int_a)), + self.format_result(self.to_montgomery(self.int_b)), + bignum_common.quote_str(self.arg_n) + ] + self.result() + + def result(self) -> List[str]: + result = (self.int_a * self.int_b) % self.int_n + return [self.format_result(self.to_montgomery(result))] + + +class BignumModRawInvPrime(bignum_common.ModOperationCommon, + BignumModRawTarget): + """Test cases for bignum mpi_mod_raw_inv_prime().""" + moduli = ONLY_PRIME_MODULI + symbol = "^ -1" + test_function = "mpi_mod_raw_inv_prime" + test_name = "mbedtls_mpi_mod_raw_inv_prime (Montgomery form only)" + input_style = "arch_split" + arity = 1 + suffix = True + montgomery_form_a = True + disallow_zero_a = True + + def result(self) -> List[str]: + result = bignum_common.invmod_positive(self.int_a, self.int_n) + mont_result = self.to_montgomery(result) + return [self.format_result(mont_result)] + + +class BignumModRawAdd(bignum_common.ModOperationCommon, + BignumModRawTarget): + """Test cases for bignum mpi_mod_raw_add().""" + symbol = "+" + test_function = "mpi_mod_raw_add" + test_name = "mbedtls_mpi_mod_raw_add" + input_style = "fixed" + arity = 2 + + def result(self) -> List[str]: + result = (self.int_a + self.int_b) % self.int_n + return [self.format_result(result)] + + +class BignumModRawConvertRep(bignum_common.ModOperationCommon, + BignumModRawTarget): + # This is an abstract class, it's ok to have unimplemented methods. + #pylint: disable=abstract-method + """Test cases for representation conversion.""" + symbol = "" + input_style = "arch_split" + arity = 1 + rep = bignum_common.ModulusRepresentation.INVALID + + def set_representation(self, r: bignum_common.ModulusRepresentation) -> None: + self.rep = r + + def arguments(self) -> List[str]: + return ([bignum_common.quote_str(self.arg_n), self.rep.symbol(), + bignum_common.quote_str(self.arg_a)] + + self.result()) + + def description(self) -> str: + base = super().description() + mod_with_rep = 'mod({})'.format(self.rep.name) + return base.replace('mod', mod_with_rep, 1) + + @classmethod + def test_cases_for_values(cls, rep: bignum_common.ModulusRepresentation, + n: str, a: str) -> Iterator[test_case.TestCase]: + """Emit test cases for the given values (if any). + + This may emit no test cases if a isn't valid for the modulus n, + or multiple test cases if rep requires different data depending + on the limb size. + """ + for bil in cls.limb_sizes: + test_object = cls(n, a, bits_in_limb=bil) + test_object.set_representation(rep) + # The class is set to having separate test cases for each limb + # size, because the Montgomery representation requires it. + # But other representations don't require it. So for other + # representations, emit a single test case with no dependency + # on the limb size. + if rep is not bignum_common.ModulusRepresentation.MONTGOMERY: + test_object.dependencies = \ + [dep for dep in test_object.dependencies + if not dep.startswith('MBEDTLS_HAVE_INT')] + if test_object.is_valid: + yield test_object.create_test_case() + if rep is not bignum_common.ModulusRepresentation.MONTGOMERY: + # A single test case (emitted, or skipped due to invalidity) + # is enough, since this test case doesn't depend on the + # limb size. + break + + # The parent class doesn't support non-bignum parameters. So we override + # test generation, in order to have the representation as a parameter. + @classmethod + def generate_function_tests(cls) -> Iterator[test_case.TestCase]: + + for rep in bignum_common.ModulusRepresentation.supported_representations(): + for n in cls.moduli: + for a in cls.input_values: + yield from cls.test_cases_for_values(rep, n, a) + +class BignumModRawCanonicalToModulusRep(BignumModRawConvertRep): + """Test cases for mpi_mod_raw_canonical_to_modulus_rep.""" + test_function = "mpi_mod_raw_canonical_to_modulus_rep" + test_name = "Rep canon->mod" + + def result(self) -> List[str]: + return [self.format_result(self.convert_from_canonical(self.int_a, self.rep))] + +class BignumModRawModulusToCanonicalRep(BignumModRawConvertRep): + """Test cases for mpi_mod_raw_modulus_to_canonical_rep.""" + test_function = "mpi_mod_raw_modulus_to_canonical_rep" + test_name = "Rep mod->canon" + + @property + def arg_a(self) -> str: + return self.format_arg("{:x}".format(self.convert_from_canonical(self.int_a, self.rep))) + + def result(self) -> List[str]: + return [self.format_result(self.int_a)] + + +class BignumModRawConvertToMont(bignum_common.ModOperationCommon, + BignumModRawTarget): + """ Test cases for mpi_mod_raw_to_mont_rep(). """ + test_function = "mpi_mod_raw_to_mont_rep" + test_name = "Convert into Mont: " + symbol = "R *" + input_style = "arch_split" + arity = 1 + + def result(self) -> List[str]: + result = self.to_montgomery(self.int_a) + return [self.format_result(result)] + +class BignumModRawConvertFromMont(bignum_common.ModOperationCommon, + BignumModRawTarget): + """ Test cases for mpi_mod_raw_from_mont_rep(). """ + test_function = "mpi_mod_raw_from_mont_rep" + test_name = "Convert from Mont: " + symbol = "1/R *" + input_style = "arch_split" + arity = 1 + + def result(self) -> List[str]: + result = self.from_montgomery(self.int_a) + return [self.format_result(result)] + +class BignumModRawModNegate(bignum_common.ModOperationCommon, + BignumModRawTarget): + """ Test cases for mpi_mod_raw_neg(). """ + test_function = "mpi_mod_raw_neg" + test_name = "Modular negation: " + symbol = "-" + input_style = "arch_split" + arity = 1 + + def result(self) -> List[str]: + result = (self.int_n - self.int_a) % self.int_n + return [self.format_result(result)] diff --git a/scripts/framework_dev/build_tree.py b/scripts/framework_dev/build_tree.py new file mode 100644 index 000000000..ec67e4cdf --- /dev/null +++ b/scripts/framework_dev/build_tree.py @@ -0,0 +1,120 @@ +"""Mbed TLS build tree information and manipulation. +""" + +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +import os +import inspect +from typing import Optional + +def looks_like_tf_psa_crypto_root(path: str) -> bool: + """Whether the given directory looks like the root of the PSA Crypto source tree.""" + return all(os.path.isdir(os.path.join(path, subdir)) + for subdir in ['include', 'core', 'drivers', 'programs', 'tests']) + +def looks_like_mbedtls_root(path: str) -> bool: + """Whether the given directory looks like the root of the Mbed TLS source tree.""" + return all(os.path.isdir(os.path.join(path, subdir)) + for subdir in ['include', 'library', 'programs', 'tests']) + +def looks_like_root(path: str) -> bool: + return looks_like_tf_psa_crypto_root(path) or looks_like_mbedtls_root(path) + +def crypto_core_directory(root: Optional[str] = None, relative: Optional[bool] = False) -> str: + """ + Return the path of the directory containing the PSA crypto core + for either TF-PSA-Crypto or Mbed TLS. + + Returns either the full path or relative path depending on the + "relative" boolean argument. + """ + if root is None: + root = guess_project_root() + if looks_like_tf_psa_crypto_root(root): + if relative: + return "core" + return os.path.join(root, "core") + elif looks_like_mbedtls_root(root): + if relative: + return "library" + return os.path.join(root, "library") + else: + raise Exception('Neither Mbed TLS nor TF-PSA-Crypto source tree found') + +def crypto_library_filename(root: Optional[str] = None) -> str: + """Return the crypto library filename for either TF-PSA-Crypto or Mbed TLS.""" + if root is None: + root = guess_project_root() + if looks_like_tf_psa_crypto_root(root): + return "tfpsacrypto" + elif looks_like_mbedtls_root(root): + return "mbedcrypto" + else: + raise Exception('Neither Mbed TLS nor TF-PSA-Crypto source tree found') + +def check_repo_path(): + """Check that the current working directory is the project root, and throw + an exception if not. + """ + if not all(os.path.isdir(d) for d in ["include", "library", "tests"]): + raise Exception("This script must be run from Mbed TLS root") + +def chdir_to_root() -> None: + """Detect the root of the Mbed TLS source tree and change to it. + + The current directory must be up to two levels deep inside an Mbed TLS + source tree. + """ + for d in [os.path.curdir, + os.path.pardir, + os.path.join(os.path.pardir, os.path.pardir)]: + if looks_like_root(d): + os.chdir(d) + return + raise Exception('Mbed TLS source tree not found') + +def guess_project_root(): + """Guess project source code directory. + + Return the first possible project root directory. + """ + dirs = set({}) + for frame in inspect.stack(): + path = os.path.dirname(frame.filename) + for d in ['.', os.path.pardir] \ + + [os.path.join(*([os.path.pardir]*i)) for i in range(2, 10)]: + d = os.path.abspath(os.path.join(path, d)) + if d in dirs: + continue + dirs.add(d) + if looks_like_root(d): + return d + raise Exception('Neither Mbed TLS nor TF-PSA-Crypto source tree found') + +def guess_mbedtls_root(root: Optional[str] = None) -> str: + """Guess Mbed TLS source code directory. + + Return the first possible Mbed TLS root directory. + Raise an exception if we are not in Mbed TLS. + """ + if root is None: + root = guess_project_root() + if looks_like_mbedtls_root(root): + return root + else: + raise Exception('Mbed TLS source tree not found') + +def guess_tf_psa_crypto_root(root: Optional[str] = None) -> str: + """Guess TF-PSA-Crypto source code directory. + + Return the first possible TF-PSA-Crypto root directory. + Raise an exception if we are not in TF-PSA-Crypto. + """ + if root is None: + root = guess_project_root() + if looks_like_tf_psa_crypto_root(root): + return root + else: + raise Exception('TF-PSA-Crypto source tree not found') diff --git a/scripts/framework_dev/c_build_helper.py b/scripts/framework_dev/c_build_helper.py new file mode 100644 index 000000000..f2cbbe4af --- /dev/null +++ b/scripts/framework_dev/c_build_helper.py @@ -0,0 +1,162 @@ +"""Generate and run C code. +""" + +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +import os +import platform +import subprocess +import sys +import tempfile + +def remove_file_if_exists(filename): + """Remove the specified file, ignoring errors.""" + if not filename: + return + try: + os.remove(filename) + except OSError: + pass + +def create_c_file(file_label): + """Create a temporary C file. + + * ``file_label``: a string that will be included in the file name. + + Return ```(c_file, c_name, exe_name)``` where ``c_file`` is a Python + stream open for writing to the file, ``c_name`` is the name of the file + and ``exe_name`` is the name of the executable that will be produced + by compiling the file. + """ + c_fd, c_name = tempfile.mkstemp(prefix='tmp-{}-'.format(file_label), + suffix='.c') + exe_suffix = '.exe' if platform.system() == 'Windows' else '' + exe_name = c_name[:-2] + exe_suffix + remove_file_if_exists(exe_name) + c_file = os.fdopen(c_fd, 'w', encoding='ascii') + return c_file, c_name, exe_name + +def generate_c_printf_expressions(c_file, cast_to, printf_format, expressions): + """Generate C instructions to print the value of ``expressions``. + + Write the code with ``c_file``'s ``write`` method. + + Each expression is cast to the type ``cast_to`` and printed with the + printf format ``printf_format``. + """ + for expr in expressions: + c_file.write(' printf("{}\\n", ({}) {});\n' + .format(printf_format, cast_to, expr)) + +def generate_c_file(c_file, + caller, header, + main_generator): + """Generate a temporary C source file. + + * ``c_file`` is an open stream on the C source file. + * ``caller``: an informational string written in a comment at the top + of the file. + * ``header``: extra code to insert before any function in the generated + C file. + * ``main_generator``: a function called with ``c_file`` as its sole argument + to generate the body of the ``main()`` function. + """ + c_file.write('/* Generated by {} */' + .format(caller)) + c_file.write(''' +#include +''') + c_file.write(header) + c_file.write(''' +int main(void) +{ +''') + main_generator(c_file) + c_file.write(''' return 0; +} +''') + +def compile_c_file(c_filename, exe_filename, include_dirs): + """Compile a C source file with the host compiler. + + * ``c_filename``: the name of the source file to compile. + * ``exe_filename``: the name for the executable to be created. + * ``include_dirs``: a list of paths to include directories to be passed + with the -I switch. + """ + # Respect $HOSTCC if it is set + cc = os.getenv('HOSTCC', None) + if cc is None: + cc = os.getenv('CC', 'cc') + cmd = [cc] + + proc = subprocess.Popen(cmd, + stdout=subprocess.DEVNULL, + stderr=subprocess.PIPE, + universal_newlines=True) + cc_is_msvc = 'Microsoft (R) C/C++' in proc.communicate()[1] + + cmd += ['-I' + dir for dir in include_dirs] + if cc_is_msvc: + # MSVC has deprecated using -o to specify the output file, + # and produces an object file in the working directory by default. + obj_filename = exe_filename[:-4] + '.obj' + cmd += ['-Fe' + exe_filename, '-Fo' + obj_filename] + else: + cmd += ['-o' + exe_filename] + + subprocess.check_call(cmd + [c_filename]) + +def get_c_expression_values( + cast_to, printf_format, + expressions, + caller=__name__, file_label='', + header='', include_path=None, + keep_c=False, +): # pylint: disable=too-many-arguments, too-many-locals + """Generate and run a program to print out numerical values for expressions. + + * ``cast_to``: a C type. + * ``printf_format``: a printf format suitable for the type ``cast_to``. + * ``header``: extra code to insert before any function in the generated + C file. + * ``expressions``: a list of C language expressions that have the type + ``cast_to``. + * ``include_path``: a list of directories containing header files. + * ``keep_c``: if true, keep the temporary C file (presumably for debugging + purposes). + + Use the C compiler specified by the ``CC`` environment variable, defaulting + to ``cc``. If ``CC`` looks like MSVC, use its command line syntax, + otherwise assume the compiler supports Unix traditional ``-I`` and ``-o``. + + Return the list of values of the ``expressions``. + """ + if include_path is None: + include_path = [] + c_name = None + exe_name = None + obj_name = None + try: + c_file, c_name, exe_name = create_c_file(file_label) + generate_c_file( + c_file, caller, header, + lambda c_file: generate_c_printf_expressions(c_file, + cast_to, printf_format, + expressions) + ) + c_file.close() + + compile_c_file(c_name, exe_name, include_path) + if keep_c: + sys.stderr.write('List of {} tests kept at {}\n' + .format(caller, c_name)) + else: + os.remove(c_name) + output = subprocess.check_output([exe_name]) + return output.decode('ascii').strip().split('\n') + finally: + remove_file_if_exists(exe_name) + remove_file_if_exists(obj_name) diff --git a/scripts/framework_dev/c_parsing_helper.py b/scripts/framework_dev/c_parsing_helper.py new file mode 100644 index 000000000..2657b7d23 --- /dev/null +++ b/scripts/framework_dev/c_parsing_helper.py @@ -0,0 +1,131 @@ +"""Helper functions to parse C code in heavily constrained scenarios. + +Currently supported functionality: + +* read_function_declarations: read function declarations from a header file. +""" + +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +### WARNING: the code in this file has not been extensively reviewed yet. +### We do not think it is harmful, but it may be below our normal standards +### for robustness and maintainability. + +import re +from typing import Dict, Iterable, Iterator, List, Optional, Tuple + + +class ArgumentInfo: + """Information about an argument to an API function.""" + #pylint: disable=too-few-public-methods + + _KEYWORDS = [ + 'const', 'register', 'restrict', + 'int', 'long', 'short', 'signed', 'unsigned', + ] + _DECLARATION_RE = re.compile( + r'(?P\w[\w\s*]*?)\s*' + + r'(?!(?:' + r'|'.join(_KEYWORDS) + r'))(?P\b\w+\b)?' + + r'\s*(?P\[[^][]*\])?\Z', + re.A | re.S) + + @classmethod + def normalize_type(cls, typ: str) -> str: + """Normalize whitespace in a type.""" + typ = re.sub(r'\s+', r' ', typ) + typ = re.sub(r'\s*\*', r' *', typ) + return typ + + def __init__(self, decl: str) -> None: + self.decl = decl.strip() + m = self._DECLARATION_RE.match(self.decl) + if not m: + raise ValueError(self.decl) + self.type = self.normalize_type(m.group('type')) #type: str + self.name = m.group('name') #type: Optional[str] + self.suffix = m.group('suffix') if m.group('suffix') else '' #type: str + + +class FunctionInfo: + """Information about an API function.""" + #pylint: disable=too-few-public-methods + + # Regex matching the declaration of a function that returns void. + VOID_RE = re.compile(r'\s*\bvoid\s*\Z', re.A) + + def __init__(self, #pylint: disable=too-many-arguments + filename: str, + line_number: int, + qualifiers: Iterable[str], + return_type: str, + name: str, + arguments: List[str]) -> None: + self.filename = filename + self.line_number = line_number + self.qualifiers = frozenset(qualifiers) + self.return_type = return_type + self.name = name + self.arguments = [ArgumentInfo(arg) for arg in arguments] + + def returns_void(self) -> bool: + """Whether the function returns void.""" + return bool(self.VOID_RE.search(self.return_type)) + + +# Match one C comment. +# Note that we match both comment types, so things like // in a /*...*/ +# comment are handled correctly. +_C_COMMENT_RE = re.compile(r'//(?:[^\n]|\\\n)*|/\*.*?\*/', re.S) +_NOT_NEWLINES_RE = re.compile(r'[^\n]+') + +def read_logical_lines(filename: str) -> Iterator[Tuple[int, str]]: + """Read logical lines from a file. + + Logical lines are one or more physical line, with balanced parentheses. + """ + with open(filename, encoding='utf-8') as inp: + content = inp.read() + # Strip comments, but keep newlines for line numbering + content = re.sub(_C_COMMENT_RE, + lambda m: re.sub(_NOT_NEWLINES_RE, "", m.group(0)), + content) + lines = enumerate(content.splitlines(), 1) + for line_number, line in lines: + # Read a logical line, containing balanced parentheses. + # We assume that parentheses are balanced (this should be ok + # since comments have been stripped), otherwise there will be + # a gigantic logical line at the end. + paren_level = line.count('(') - line.count(')') + while paren_level > 0: + _, more = next(lines) #pylint: disable=stop-iteration-return + paren_level += more.count('(') - more.count(')') + line += '\n' + more + yield line_number, line + +_C_FUNCTION_DECLARATION_RE = re.compile( + r'(?P(?:(?:extern|inline|static)\b\s*)*)' + r'(?P\w[\w\s*]*?)\s*' + + r'\b(?P\w+)' + + r'\s*\((?P.*)\)\s*;', + re.A | re.S) + +def read_function_declarations(functions: Dict[str, FunctionInfo], + filename: str) -> None: + """Collect function declarations from a C header file.""" + for line_number, line in read_logical_lines(filename): + m = _C_FUNCTION_DECLARATION_RE.match(line) + if not m: + continue + qualifiers = m.group('qualifiers').split() + return_type = m.group('return_type') + name = m.group('name') + arguments = m.group('arguments').split(',') + if len(arguments) == 1 and re.match(FunctionInfo.VOID_RE, arguments[0]): + arguments = [] + # Note: we replace any existing declaration for the same name. + functions[name] = FunctionInfo(filename, line_number, + qualifiers, + return_type, + name, + arguments) diff --git a/scripts/framework_dev/c_wrapper_generator.py b/scripts/framework_dev/c_wrapper_generator.py new file mode 100644 index 000000000..3cf1e05eb --- /dev/null +++ b/scripts/framework_dev/c_wrapper_generator.py @@ -0,0 +1,473 @@ +"""Generate C wrapper functions. +""" + +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +### WARNING: the code in this file has not been extensively reviewed yet. +### We do not think it is harmful, but it may be below our normal standards +### for robustness and maintainability. + +import os +import re +import sys +import typing +from typing import Dict, List, Optional, Tuple + +from .c_parsing_helper import ArgumentInfo, FunctionInfo +from . import typing_util + + +def c_declare(prefix: str, name: str, suffix: str) -> str: + """Format a declaration of name with the given type prefix and suffix.""" + if not prefix.endswith('*'): + prefix += ' ' + return prefix + name + suffix + + +WrapperInfo = typing.NamedTuple('WrapperInfo', [ + ('argument_names', List[str]), + ('guard', Optional[str]), + ('wrapper_name', str), +]) + + +class Base: + """Generate a C source file containing wrapper functions.""" + + # This class is designed to have many methods potentially overloaded. + # Tell pylint not to complain about methods that have unused arguments: + # child classes are likely to override those methods and need the + # arguments in question. + #pylint: disable=no-self-use,unused-argument + + # Prefix prepended to the function's name to form the wrapper name. + _WRAPPER_NAME_PREFIX = '' + # Suffix appended to the function's name to form the wrapper name. + _WRAPPER_NAME_SUFFIX = '_wrap' + + # Functions with one of these qualifiers are skipped. + _SKIP_FUNCTION_WITH_QUALIFIERS = frozenset(['inline', 'static']) + + def __init__(self): + """Construct a wrapper generator object. + """ + self.program_name = os.path.basename(sys.argv[0]) + # To be populated in a derived class + self.functions = {} #type: Dict[str, FunctionInfo] + # Preprocessor symbol used as a guard against multiple inclusion in the + # header. Must be set before writing output to a header. + # Not used when writing .c output. + self.header_guard = None #type: Optional[str] + + def _write_prologue(self, out: typing_util.Writable, header: bool) -> None: + """Write the prologue of a C file. + + This includes a description comment and some include directives. + """ + out.write("""/* Automatically generated by {}, do not edit! */ + +/* Copyright The Mbed TLS Contributors + * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + */ +""" + .format(self.program_name)) + if header: + out.write(""" +#ifndef {guard} +#define {guard} + +#ifdef __cplusplus +extern "C" {{ +#endif +""" + .format(guard=self.header_guard)) + out.write(""" +#include +""") + + def _write_epilogue(self, out: typing_util.Writable, header: bool) -> None: + """Write the epilogue of a C file. + """ + if header: + out.write(""" +#ifdef __cplusplus +}} +#endif + +#endif /* {guard} */ +""" + .format(guard=self.header_guard)) + out.write(""" +/* End of automatically generated file. */ +""") + + def _wrapper_function_name(self, original_name: str) -> str: + """The name of the wrapper function. + + By default, this adds a suffix. + """ + return (self._WRAPPER_NAME_PREFIX + + original_name + + self._WRAPPER_NAME_SUFFIX) + + def _wrapper_declaration_start(self, + function: FunctionInfo, + wrapper_name: str) -> str: + """The beginning of the wrapper function declaration. + + This ends just before the opening parenthesis of the argument list. + + This is a string containing at least the return type and the + function name. It may start with additional qualifiers or attributes + such as `static`, `__attribute__((...))`, etc. + """ + return c_declare(function.return_type, wrapper_name, '') + + def _argument_name(self, + function_name: str, + num: int, + arg: ArgumentInfo) -> str: + """Name to use for the given argument in the wrapper function. + + Argument numbers count from 0. + """ + name = 'arg' + str(num) + if arg.name: + name += '_' + arg.name + return name + + def _wrapper_declaration_argument(self, + function_name: str, + num: int, name: str, + arg: ArgumentInfo) -> str: + """One argument definition in the wrapper function declaration. + + Argument numbers count from 0. + """ + return c_declare(arg.type, name, arg.suffix) + + def _underlying_function_name(self, function: FunctionInfo) -> str: + """The name of the underlying function. + + By default, this is the name of the wrapped function. + """ + return function.name + + def _return_variable_name(self, function: FunctionInfo) -> str: + """The name of the variable that will contain the return value.""" + return 'retval' + + def _write_function_call(self, out: typing_util.Writable, + function: FunctionInfo, + argument_names: List[str]) -> None: + """Write the call to the underlying function. + """ + # Note that the function name is in parentheses, to avoid calling + # a function-like macro with the same name, since in typical usage + # there is a function-like macro with the same name which is the + # wrapper. + call = '({})({})'.format(self._underlying_function_name(function), + ', '.join(argument_names)) + if function.returns_void(): + out.write(' {};\n'.format(call)) + else: + ret_name = self._return_variable_name(function) + ret_decl = c_declare(function.return_type, ret_name, '') + out.write(' {} = {};\n'.format(ret_decl, call)) + + def _write_function_return(self, out: typing_util.Writable, + function: FunctionInfo, + if_void: bool = False) -> None: + """Write a return statement. + + If the function returns void, only write a statement if if_void is true. + """ + if function.returns_void(): + if if_void: + out.write(' return;\n') + else: + ret_name = self._return_variable_name(function) + out.write(' return {};\n'.format(ret_name)) + + def _write_function_body(self, out: typing_util.Writable, + function: FunctionInfo, + argument_names: List[str]) -> None: + """Write the body of the wrapper code for the specified function. + """ + self._write_function_call(out, function, argument_names) + self._write_function_return(out, function) + + def _skip_function(self, function: FunctionInfo) -> bool: + """Whether to skip this function. + + By default, static or inline functions are skipped. + """ + if not self._SKIP_FUNCTION_WITH_QUALIFIERS.isdisjoint(function.qualifiers): + return True + return False + + _FUNCTION_GUARDS = { + } #type: Dict[str, str] + + def _function_guard(self, function: FunctionInfo) -> Optional[str]: + """A preprocessor condition for this function. + + The wrapper will be guarded with `#if` on this condition, if not None. + """ + return self._FUNCTION_GUARDS.get(function.name) + + def _wrapper_info(self, function: FunctionInfo) -> Optional[WrapperInfo]: + """Information about the wrapper for one function. + + Return None if the function should be skipped. + """ + if self._skip_function(function): + return None + argument_names = [self._argument_name(function.name, num, arg) + for num, arg in enumerate(function.arguments)] + return WrapperInfo( + argument_names=argument_names, + guard=self._function_guard(function), + wrapper_name=self._wrapper_function_name(function.name), + ) + + def _write_function_prototype(self, out: typing_util.Writable, + function: FunctionInfo, + wrapper: WrapperInfo, + header: bool) -> None: + """Write the prototype of a wrapper function. + + If header is true, write a function declaration, with a semicolon at + the end. Otherwise just write the prototype, intended to be followed + by the function's body. + """ + declaration_start = self._wrapper_declaration_start(function, + wrapper.wrapper_name) + arg_indent = ' ' + terminator = ';\n' if header else '\n' + if function.arguments: + out.write(declaration_start + '(\n') + for num in range(len(function.arguments)): + arg_def = self._wrapper_declaration_argument( + function.name, + num, wrapper.argument_names[num], function.arguments[num]) + arg_terminator = \ + (')' + terminator if num == len(function.arguments) - 1 else + ',\n') + out.write(arg_indent + arg_def + arg_terminator) + else: + out.write(declaration_start + '(void)' + terminator) + + def _write_c_function(self, out: typing_util.Writable, + function: FunctionInfo) -> None: + """Write wrapper code for one function. + + Do nothing if the function is skipped. + """ + wrapper = self._wrapper_info(function) + if wrapper is None: + return + out.write(""" +/* Wrapper for {} */ +""" + .format(function.name)) + if wrapper.guard is not None: + out.write('#if {}\n'.format(wrapper.guard)) + self._write_function_prototype(out, function, wrapper, False) + out.write('{\n') + self._write_function_body(out, function, wrapper.argument_names) + out.write('}\n') + if wrapper.guard is not None: + out.write('#endif /* {} */\n'.format(wrapper.guard)) + + def _write_h_function_declaration(self, out: typing_util.Writable, + function: FunctionInfo, + wrapper: WrapperInfo) -> None: + """Write the declaration of one wrapper function. + """ + self._write_function_prototype(out, function, wrapper, True) + + def _write_h_macro_definition(self, out: typing_util.Writable, + function: FunctionInfo, + wrapper: WrapperInfo) -> None: + """Write the macro definition for one wrapper. + """ + arg_list = ', '.join(wrapper.argument_names) + out.write('#define {function_name}({args}) \\\n {wrapper_name}({args})\n' + .format(function_name=function.name, + wrapper_name=wrapper.wrapper_name, + args=arg_list)) + + def _write_h_function(self, out: typing_util.Writable, + function: FunctionInfo) -> None: + """Write the complete header content for one wrapper. + + This is the declaration of the wrapper function, and the + definition of a function-like macro that calls the wrapper function. + + Do nothing if the function is skipped. + """ + wrapper = self._wrapper_info(function) + if wrapper is None: + return + out.write('\n') + if wrapper.guard is not None: + out.write('#if {}\n'.format(wrapper.guard)) + self._write_h_function_declaration(out, function, wrapper) + self._write_h_macro_definition(out, function, wrapper) + if wrapper.guard is not None: + out.write('#endif /* {} */\n'.format(wrapper.guard)) + + def write_c_file(self, filename: str) -> None: + """Output a whole C file containing function wrapper definitions.""" + with open(filename, 'w', encoding='utf-8') as out: + self._write_prologue(out, False) + for name in sorted(self.functions): + self._write_c_function(out, self.functions[name]) + self._write_epilogue(out, False) + + def _header_guard_from_file_name(self, filename: str) -> str: + """Preprocessor symbol used as a guard against multiple inclusion.""" + # Heuristic to strip irrelevant leading directories + filename = re.sub(r'.*include[\\/]', r'', filename) + return re.sub(r'[^0-9A-Za-z]', r'_', filename, re.A).upper() + + def write_h_file(self, filename: str) -> None: + """Output a header file with function wrapper declarations and macro definitions.""" + self.header_guard = self._header_guard_from_file_name(filename) + with open(filename, 'w', encoding='utf-8') as out: + self._write_prologue(out, True) + for name in sorted(self.functions): + self._write_h_function(out, self.functions[name]) + self._write_epilogue(out, True) + + +class UnknownTypeForPrintf(Exception): + """Exception raised when attempting to generate code that logs a value of an unknown type.""" + + def __init__(self, typ: str) -> None: + super().__init__("Unknown type for printf format generation: " + typ) + + +class Logging(Base): + """Generate wrapper functions that log the inputs and outputs.""" + + def __init__(self) -> None: + """Construct a wrapper generator including logging of inputs and outputs. + + Log to stdout by default. Call `set_stream` to change this. + """ + super().__init__() + self.stream = 'stdout' + + def set_stream(self, stream: str) -> None: + """Set the stdio stream to log to. + + Call this method before calling `write_c_output` or `write_h_output`. + """ + self.stream = stream + + def _write_prologue(self, out: typing_util.Writable, header: bool) -> None: + super()._write_prologue(out, header) + if not header: + out.write(""" +#if defined(MBEDTLS_FS_IO) && defined(MBEDTLS_TEST_HOOKS) +#include +#include +#include // for MBEDTLS_PRINTF_SIZET +#include // for mbedtls_fprintf +#endif /* defined(MBEDTLS_FS_IO) && defined(MBEDTLS_TEST_HOOKS) */ +""") + + _PRINTF_SIMPLE_FORMAT = { + 'int': '%d', + 'long': '%ld', + 'long long': '%lld', + 'size_t': '%"MBEDTLS_PRINTF_SIZET"', + 'unsigned': '0x%08x', + 'unsigned int': '0x%08x', + 'unsigned long': '0x%08lx', + 'unsigned long long': '0x%016llx', + } + + def _printf_simple_format(self, typ: str) -> Optional[str]: + """Use this printf format for a value of typ. + + Return None if values of typ need more complex handling. + """ + return self._PRINTF_SIMPLE_FORMAT.get(typ) + + _PRINTF_TYPE_CAST = { + 'int32_t': 'int', + 'uint32_t': 'unsigned', + 'uint64_t': 'unsigned long long', + } #type: Dict[str, str] + + def _printf_type_cast(self, typ: str) -> Optional[str]: + """Cast values of typ to this type before passing them to printf. + + Return None if values of the given type do not need a cast. + """ + return self._PRINTF_TYPE_CAST.get(typ) + + _POINTER_TYPE_RE = re.compile(r'\s*\*\Z') + + def _printf_parameters(self, typ: str, var: str) -> Tuple[str, List[str]]: + """The printf format and arguments for a value of type typ stored in var. + """ + expr = var + base_type = typ + # For outputs via a pointer, get the value that has been written. + # Note: we don't support pointers to pointers here. + pointer_match = self._POINTER_TYPE_RE.search(base_type) + if pointer_match: + base_type = base_type[:pointer_match.start(0)] + expr = '*({})'.format(expr) + # Maybe cast the value to a standard type. + cast_to = self._printf_type_cast(base_type) + if cast_to is not None: + expr = '({}) {}'.format(cast_to, expr) + base_type = cast_to + # Try standard types. + fmt = self._printf_simple_format(base_type) + if fmt is not None: + return '{}={}'.format(var, fmt), [expr] + raise UnknownTypeForPrintf(typ) + + def _write_function_logging(self, out: typing_util.Writable, + function: FunctionInfo, + argument_names: List[str]) -> None: + """Write code to log the function's inputs and outputs.""" + formats, values = '%s', ['"' + function.name + '"'] + for arg_info, arg_name in zip(function.arguments, argument_names): + fmt, vals = self._printf_parameters(arg_info.type, arg_name) + if fmt: + formats += ' ' + fmt + values += vals + if not function.returns_void(): + ret_name = self._return_variable_name(function) + fmt, vals = self._printf_parameters(function.return_type, ret_name) + if fmt: + formats += ' ' + fmt + values += vals + out.write("""\ +#if defined(MBEDTLS_FS_IO) && defined(MBEDTLS_TEST_HOOKS) + if ({stream}) {{ + mbedtls_fprintf({stream}, "{formats}\\n", + {values}); + }} +#endif /* defined(MBEDTLS_FS_IO) && defined(MBEDTLS_TEST_HOOKS) */ +""" + .format(stream=self.stream, + formats=formats, + values=', '.join(values))) + + def _write_function_body(self, out: typing_util.Writable, + function: FunctionInfo, + argument_names: List[str]) -> None: + """Write the body of the wrapper code for the specified function. + """ + self._write_function_call(out, function, argument_names) + self._write_function_logging(out, function, argument_names) + self._write_function_return(out, function) diff --git a/scripts/framework_dev/crypto_data_tests.py b/scripts/framework_dev/crypto_data_tests.py new file mode 100644 index 000000000..a36de692e --- /dev/null +++ b/scripts/framework_dev/crypto_data_tests.py @@ -0,0 +1,112 @@ +"""Generate test data for cryptographic mechanisms. + +This module is a work in progress, only implementing a few cases for now. +""" + +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +import hashlib +from typing import Callable, Dict, Iterator, List, Optional #pylint: disable=unused-import + +from . import crypto_knowledge +from . import psa_information +from . import test_case + + +def psa_low_level_dependencies(*expressions: str) -> List[str]: + """Infer dependencies of a PSA low-level test case by looking for PSA_xxx symbols. + + This function generates MBEDTLS_PSA_BUILTIN_xxx symbols. + """ + high_level = psa_information.automatic_dependencies(*expressions) + for dep in high_level: + assert dep.startswith('PSA_WANT_') + return ['MBEDTLS_PSA_BUILTIN_' + dep[9:] for dep in high_level] + + +class HashPSALowLevel: + """Generate test cases for the PSA low-level hash interface.""" + + def __init__(self, info: psa_information.Information) -> None: + self.info = info + base_algorithms = sorted(info.constructors.algorithms) + all_algorithms = \ + [crypto_knowledge.Algorithm(expr) + for expr in info.constructors.generate_expressions(base_algorithms)] + self.algorithms = \ + [alg + for alg in all_algorithms + if (not alg.is_wildcard and + alg.can_do(crypto_knowledge.AlgorithmCategory.HASH))] + + # CALCULATE[alg] = function to return the hash of its argument in hex + # TO-DO: implement the None entries with a third-party library, because + # hashlib might not have everything, depending on the Python version and + # the underlying OpenSSL. On Ubuntu 16.04, truncated sha512 and sha3/shake + # are not available. On Ubuntu 22.04, md2, md4 and ripemd160 are not + # available. + CALCULATE = { + 'PSA_ALG_MD5': lambda data: hashlib.md5(data).hexdigest(), + 'PSA_ALG_RIPEMD160': None, #lambda data: hashlib.new('ripdemd160').hexdigest() + 'PSA_ALG_SHA_1': lambda data: hashlib.sha1(data).hexdigest(), + 'PSA_ALG_SHA_224': lambda data: hashlib.sha224(data).hexdigest(), + 'PSA_ALG_SHA_256': lambda data: hashlib.sha256(data).hexdigest(), + 'PSA_ALG_SHA_384': lambda data: hashlib.sha384(data).hexdigest(), + 'PSA_ALG_SHA_512': lambda data: hashlib.sha512(data).hexdigest(), + 'PSA_ALG_SHA_512_224': None, #lambda data: hashlib.new('sha512_224').hexdigest() + 'PSA_ALG_SHA_512_256': None, #lambda data: hashlib.new('sha512_256').hexdigest() + 'PSA_ALG_SHA3_224': None, #lambda data: hashlib.sha3_224(data).hexdigest(), + 'PSA_ALG_SHA3_256': None, #lambda data: hashlib.sha3_256(data).hexdigest(), + 'PSA_ALG_SHA3_384': None, #lambda data: hashlib.sha3_384(data).hexdigest(), + 'PSA_ALG_SHA3_512': None, #lambda data: hashlib.sha3_512(data).hexdigest(), + 'PSA_ALG_SHAKE256_512': None, #lambda data: hashlib.shake_256(data).hexdigest(64), + } #type: Dict[str, Optional[Callable[[bytes], str]]] + + @staticmethod + def one_test_case(alg: crypto_knowledge.Algorithm, + function: str, note: str, + arguments: List[str]) -> test_case.TestCase: + """Construct one test case involving a hash.""" + tc = test_case.TestCase() + tc.set_description('{}{} {}' + .format(function, + ' ' + note if note else '', + alg.short_expression())) + tc.set_dependencies(psa_low_level_dependencies(alg.expression)) + tc.set_function(function) + tc.set_arguments([alg.expression] + + ['"{}"'.format(arg) for arg in arguments]) + return tc + + def test_cases_for_hash(self, + alg: crypto_knowledge.Algorithm + ) -> Iterator[test_case.TestCase]: + """Enumerate all test cases for one hash algorithm.""" + calc = self.CALCULATE[alg.expression] + if calc is None: + return # not implemented yet + + short = b'abc' + hash_short = calc(short) + long = (b'Hello, world. Here are 16 unprintable bytes: [' + b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a' + b'\x80\x81\x82\x83\xfe\xff]. ' + b' This message was brought to you by a natural intelligence. ' + b' If you can read this, good luck with your debugging!') + hash_long = calc(long) + + yield self.one_test_case(alg, 'hash_empty', '', [calc(b'')]) + yield self.one_test_case(alg, 'hash_valid_one_shot', '', + [short.hex(), hash_short]) + for n in [0, 1, 64, len(long) - 1, len(long)]: + yield self.one_test_case(alg, 'hash_valid_multipart', + '{} + {}'.format(n, len(long) - n), + [long[:n].hex(), calc(long[:n]), + long[n:].hex(), hash_long]) + + def all_test_cases(self) -> Iterator[test_case.TestCase]: + """Enumerate all test cases for all hash algorithms.""" + for alg in self.algorithms: + yield from self.test_cases_for_hash(alg) diff --git a/scripts/framework_dev/crypto_knowledge.py b/scripts/framework_dev/crypto_knowledge.py new file mode 100644 index 000000000..ebfd55cdb --- /dev/null +++ b/scripts/framework_dev/crypto_knowledge.py @@ -0,0 +1,568 @@ +"""Knowledge about cryptographic mechanisms implemented in Mbed TLS. + +This module is entirely based on the PSA API. +""" + +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +import enum +import re +from typing import FrozenSet, Iterable, List, Optional, Tuple, Dict + +from .asymmetric_key_data import ASYMMETRIC_KEY_DATA + + +def short_expression(original: str, level: int = 0) -> str: + """Abbreviate the expression, keeping it human-readable. + + If `level` is 0, just remove parts that are implicit from context, + such as a leading ``PSA_KEY_TYPE_``. + For larger values of `level`, also abbreviate some names in an + unambiguous, but ad hoc way. + """ + short = original + short = re.sub(r'\bPSA_(?:ALG|DH_FAMILY|ECC_FAMILY|KEY_[A-Z]+)_', r'', short) + short = re.sub(r' +', r'', short) + if level >= 1: + short = re.sub(r'PUBLIC_KEY\b', r'PUB', short) + short = re.sub(r'KEY_PAIR\b', r'PAIR', short) + short = re.sub(r'\bBRAINPOOL_P', r'BP', short) + short = re.sub(r'\bMONTGOMERY\b', r'MGM', short) + short = re.sub(r'AEAD_WITH_SHORTENED_TAG\b', r'AEAD_SHORT', short) + short = re.sub(r'\bDETERMINISTIC_', r'DET_', short) + short = re.sub(r'\bKEY_AGREEMENT\b', r'KA', short) + short = re.sub(r'_PSK_TO_MS\b', r'_PSK2MS', short) + return short + + +BLOCK_CIPHERS = frozenset(['AES', 'ARIA', 'CAMELLIA', 'DES']) +BLOCK_MAC_MODES = frozenset(['CBC_MAC', 'CMAC']) +BLOCK_CIPHER_MODES = frozenset([ + 'CTR', 'CFB', 'OFB', 'XTS', 'CCM_STAR_NO_TAG', + 'ECB_NO_PADDING', 'CBC_NO_PADDING', 'CBC_PKCS7', +]) +BLOCK_AEAD_MODES = frozenset(['CCM', 'GCM']) + +class EllipticCurveCategory(enum.Enum): + """Categorization of elliptic curve families. + + The category of a curve determines what algorithms are defined over it. + """ + + SHORT_WEIERSTRASS = 0 + MONTGOMERY = 1 + TWISTED_EDWARDS = 2 + + @staticmethod + def from_family(family: str) -> 'EllipticCurveCategory': + if family == 'PSA_ECC_FAMILY_MONTGOMERY': + return EllipticCurveCategory.MONTGOMERY + if family == 'PSA_ECC_FAMILY_TWISTED_EDWARDS': + return EllipticCurveCategory.TWISTED_EDWARDS + # Default to SW, which most curves belong to. + return EllipticCurveCategory.SHORT_WEIERSTRASS + + +class KeyType: + """Knowledge about a PSA key type.""" + + def __init__(self, name: str, params: Optional[Iterable[str]] = None) -> None: + """Analyze a key type. + + The key type must be specified in PSA syntax. In its simplest form, + `name` is a string 'PSA_KEY_TYPE_xxx' which is the name of a PSA key + type macro. For key types that take arguments, the arguments can + be passed either through the optional argument `params` or by + passing an expression of the form 'PSA_KEY_TYPE_xxx(param1, ...)' + in `name` as a string. + """ + + self.name = name.strip() + """The key type macro name (``PSA_KEY_TYPE_xxx``). + + For key types constructed from a macro with arguments, this is the + name of the macro, and the arguments are in `self.params`. + """ + if params is None: + if '(' in self.name: + m = re.match(r'(\w+)\s*\((.*)\)\Z', self.name) + assert m is not None + self.name = m.group(1) + params = m.group(2).split(',') + self.params = (None if params is None else + [param.strip() for param in params]) + """The parameters of the key type, if there are any. + + None if the key type is a macro without arguments. + """ + assert re.match(r'PSA_KEY_TYPE_\w+\Z', self.name) + + self.expression = self.name + """A C expression whose value is the key type encoding.""" + if self.params is not None: + self.expression += '(' + ', '.join(self.params) + ')' + + m = re.match(r'PSA_KEY_TYPE_(\w+)', self.name) + assert m + self.head = re.sub(r'_(?:PUBLIC_KEY|KEY_PAIR)\Z', r'', m.group(1)) + """The key type macro name, with common prefixes and suffixes stripped.""" + + self.private_type = re.sub(r'_PUBLIC_KEY\Z', r'_KEY_PAIR', self.name) + """The key type macro name for the corresponding key pair type. + + For everything other than a public key type, this is the same as + `self.name`. + """ + + def short_expression(self, level: int = 0) -> str: + """Abbreviate the expression, keeping it human-readable. + + See `crypto_knowledge.short_expression`. + """ + return short_expression(self.expression, level=level) + + def is_public(self) -> bool: + """Whether the key type is for public keys.""" + return self.name.endswith('_PUBLIC_KEY') + + DH_KEY_SIZES = { + 'PSA_DH_FAMILY_RFC7919': (2048, 3072, 4096, 6144, 8192), + } # type: Dict[str, Tuple[int, ...]] + ECC_KEY_SIZES = { + 'PSA_ECC_FAMILY_SECP_K1': (192, 225, 256), + 'PSA_ECC_FAMILY_SECP_R1': (224, 256, 384, 521), + 'PSA_ECC_FAMILY_SECP_R2': (160,), + 'PSA_ECC_FAMILY_SECT_K1': (163, 233, 239, 283, 409, 571), + 'PSA_ECC_FAMILY_SECT_R1': (163, 233, 283, 409, 571), + 'PSA_ECC_FAMILY_SECT_R2': (163,), + 'PSA_ECC_FAMILY_BRAINPOOL_P_R1': (160, 192, 224, 256, 320, 384, 512), + 'PSA_ECC_FAMILY_MONTGOMERY': (255, 448), + 'PSA_ECC_FAMILY_TWISTED_EDWARDS': (255, 448), + } # type: Dict[str, Tuple[int, ...]] + KEY_TYPE_SIZES = { + 'PSA_KEY_TYPE_AES': (128, 192, 256), # exhaustive + 'PSA_KEY_TYPE_ARIA': (128, 192, 256), # exhaustive + 'PSA_KEY_TYPE_CAMELLIA': (128, 192, 256), # exhaustive + 'PSA_KEY_TYPE_CHACHA20': (256,), # exhaustive + 'PSA_KEY_TYPE_DERIVE': (120, 128), # sample + 'PSA_KEY_TYPE_DES': (64, 128, 192), # exhaustive + 'PSA_KEY_TYPE_HMAC': (128, 160, 224, 256, 384, 512), # standard size for each supported hash + 'PSA_KEY_TYPE_PASSWORD': (48, 168, 336), # sample + 'PSA_KEY_TYPE_PASSWORD_HASH': (128, 256), # sample + 'PSA_KEY_TYPE_PEPPER': (128, 256), # sample + 'PSA_KEY_TYPE_RAW_DATA': (8, 40, 128), # sample + 'PSA_KEY_TYPE_RSA_KEY_PAIR': (1024, 1536), # small sample + } # type: Dict[str, Tuple[int, ...]] + def sizes_to_test(self) -> Tuple[int, ...]: + """Return a tuple of key sizes to test. + + For key types that only allow a single size, or only a small set of + sizes, these are all the possible sizes. For key types that allow a + wide range of sizes, these are a representative sample of sizes, + excluding large sizes for which a typical resource-constrained platform + may run out of memory. + """ + if self.private_type == 'PSA_KEY_TYPE_ECC_KEY_PAIR': + assert self.params is not None + return self.ECC_KEY_SIZES[self.params[0]] + if self.private_type == 'PSA_KEY_TYPE_DH_KEY_PAIR': + assert self.params is not None + return self.DH_KEY_SIZES[self.params[0]] + return self.KEY_TYPE_SIZES[self.private_type] + + # "48657265006973206b6579a064617461" + DATA_BLOCK = b'Here\000is key\240data' + def key_material(self, bits: int) -> bytes: + """Return a byte string containing suitable key material with the given bit length. + + Use the PSA export representation. The resulting byte string is one that + can be obtained with the following code: + ``` + psa_set_key_type(&attributes, `self.expression`); + psa_set_key_bits(&attributes, `bits`); + psa_set_key_usage_flags(&attributes, PSA_KEY_USAGE_EXPORT); + psa_generate_key(&attributes, &id); + psa_export_key(id, `material`, ...); + ``` + """ + if self.expression in ASYMMETRIC_KEY_DATA: + if bits not in ASYMMETRIC_KEY_DATA[self.expression]: + raise ValueError('No key data for {}-bit {}' + .format(bits, self.expression)) + return ASYMMETRIC_KEY_DATA[self.expression][bits] + if bits % 8 != 0: + raise ValueError('Non-integer number of bytes: {} bits for {}' + .format(bits, self.expression)) + length = bits // 8 + if self.name == 'PSA_KEY_TYPE_DES': + # "644573206b457901644573206b457902644573206b457904" + des3 = b'dEs kEy\001dEs kEy\002dEs kEy\004' + return des3[:length] + return b''.join([self.DATA_BLOCK] * (length // len(self.DATA_BLOCK)) + + [self.DATA_BLOCK[:length % len(self.DATA_BLOCK)]]) + + def can_do(self, alg: 'Algorithm') -> bool: + """Whether this key type can be used for operations with the given algorithm. + + This function does not currently handle key derivation or PAKE. + """ + #pylint: disable=too-many-branches,too-many-return-statements + if not alg.is_valid_for_operation(): + return False + if self.head == 'HMAC' and alg.head == 'HMAC': + return True + if self.head == 'DES': + # 64-bit block ciphers only allow a reduced set of modes. + return alg.head in [ + 'CBC_NO_PADDING', 'CBC_PKCS7', + 'ECB_NO_PADDING', + ] + if self.head in BLOCK_CIPHERS and \ + alg.head in frozenset.union(BLOCK_MAC_MODES, + BLOCK_CIPHER_MODES, + BLOCK_AEAD_MODES): + if alg.head in ['CMAC', 'OFB'] and \ + self.head in ['ARIA', 'CAMELLIA']: + return False # not implemented in Mbed TLS + return True + if self.head == 'CHACHA20' and alg.head == 'CHACHA20_POLY1305': + return True + if self.head in {'ARC4', 'CHACHA20'} and \ + alg.head == 'STREAM_CIPHER': + return True + if self.head == 'RSA' and alg.head.startswith('RSA_'): + return True + if alg.category == AlgorithmCategory.KEY_AGREEMENT and \ + self.is_public(): + # The PSA API does not use public key objects in key agreement + # operations: it imports the public key as a formatted byte string. + # So a public key object with a key agreement algorithm is not + # a valid combination. + return False + if alg.is_invalid_key_agreement_with_derivation(): + return False + if self.head == 'ECC': + assert self.params is not None + eccc = EllipticCurveCategory.from_family(self.params[0]) + if alg.head == 'ECDH' and \ + eccc in {EllipticCurveCategory.SHORT_WEIERSTRASS, + EllipticCurveCategory.MONTGOMERY}: + return True + if alg.head == 'ECDSA' and \ + eccc == EllipticCurveCategory.SHORT_WEIERSTRASS: + return True + if alg.head in {'PURE_EDDSA', 'EDDSA_PREHASH'} and \ + eccc == EllipticCurveCategory.TWISTED_EDWARDS: + return True + if self.head == 'DH' and alg.head == 'FFDH': + return True + return False + + +class AlgorithmCategory(enum.Enum): + """PSA algorithm categories.""" + # The numbers are aligned with the category bits in numerical values of + # algorithms. + HASH = 2 + MAC = 3 + CIPHER = 4 + AEAD = 5 + SIGN = 6 + ASYMMETRIC_ENCRYPTION = 7 + KEY_DERIVATION = 8 + KEY_AGREEMENT = 9 + PAKE = 10 + + def requires_key(self) -> bool: + """Whether operations in this category are set up with a key.""" + return self not in {self.HASH, self.KEY_DERIVATION} + + def is_asymmetric(self) -> bool: + """Whether operations in this category involve asymmetric keys.""" + return self in { + self.SIGN, + self.ASYMMETRIC_ENCRYPTION, + self.KEY_AGREEMENT + } + + +class AlgorithmNotRecognized(Exception): + def __init__(self, expr: str) -> None: + super().__init__('Algorithm not recognized: ' + expr) + self.expr = expr + + +class Algorithm: + """Knowledge about a PSA algorithm.""" + + @staticmethod + def determine_base(expr: str) -> str: + """Return an expression for the "base" of the algorithm. + + This strips off variants of algorithms such as MAC truncation. + + This function does not attempt to detect invalid inputs. + """ + m = re.match(r'PSA_ALG_(?:' + r'(?:TRUNCATED|AT_LEAST_THIS_LENGTH)_MAC|' + r'AEAD_WITH_(?:SHORTENED|AT_LEAST_THIS_LENGTH)_TAG' + r')\((.*),[^,]+\)\Z', expr) + if m: + expr = m.group(1) + return expr + + @staticmethod + def determine_head(expr: str) -> str: + """Return the head of an algorithm expression. + + The head is the first (outermost) constructor, without its PSA_ALG_ + prefix, and with some normalization of similar algorithms. + """ + m = re.match(r'PSA_ALG_(?:DETERMINISTIC_)?(\w+)', expr) + if not m: + raise AlgorithmNotRecognized(expr) + head = m.group(1) + if head == 'KEY_AGREEMENT': + m = re.match(r'PSA_ALG_KEY_AGREEMENT\s*\(\s*PSA_ALG_(\w+)', expr) + if not m: + raise AlgorithmNotRecognized(expr) + head = m.group(1) + head = re.sub(r'_ANY\Z', r'', head) + if re.match(r'ED[0-9]+PH\Z', head): + head = 'EDDSA_PREHASH' + return head + + CATEGORY_FROM_HEAD = { + 'SHA': AlgorithmCategory.HASH, + 'SHAKE256_512': AlgorithmCategory.HASH, + 'MD': AlgorithmCategory.HASH, + 'RIPEMD': AlgorithmCategory.HASH, + 'ANY_HASH': AlgorithmCategory.HASH, + 'HMAC': AlgorithmCategory.MAC, + 'STREAM_CIPHER': AlgorithmCategory.CIPHER, + 'CHACHA20_POLY1305': AlgorithmCategory.AEAD, + 'DSA': AlgorithmCategory.SIGN, + 'ECDSA': AlgorithmCategory.SIGN, + 'EDDSA': AlgorithmCategory.SIGN, + 'PURE_EDDSA': AlgorithmCategory.SIGN, + 'RSA_PSS': AlgorithmCategory.SIGN, + 'RSA_PKCS1V15_SIGN': AlgorithmCategory.SIGN, + 'RSA_PKCS1V15_CRYPT': AlgorithmCategory.ASYMMETRIC_ENCRYPTION, + 'RSA_OAEP': AlgorithmCategory.ASYMMETRIC_ENCRYPTION, + 'HKDF': AlgorithmCategory.KEY_DERIVATION, + 'TLS12_PRF': AlgorithmCategory.KEY_DERIVATION, + 'TLS12_PSK_TO_MS': AlgorithmCategory.KEY_DERIVATION, + 'TLS12_ECJPAKE_TO_PMS': AlgorithmCategory.KEY_DERIVATION, + 'PBKDF': AlgorithmCategory.KEY_DERIVATION, + 'ECDH': AlgorithmCategory.KEY_AGREEMENT, + 'FFDH': AlgorithmCategory.KEY_AGREEMENT, + # KEY_AGREEMENT(...) is a key derivation with a key agreement component + 'KEY_AGREEMENT': AlgorithmCategory.KEY_DERIVATION, + 'JPAKE': AlgorithmCategory.PAKE, + } + for x in BLOCK_MAC_MODES: + CATEGORY_FROM_HEAD[x] = AlgorithmCategory.MAC + for x in BLOCK_CIPHER_MODES: + CATEGORY_FROM_HEAD[x] = AlgorithmCategory.CIPHER + for x in BLOCK_AEAD_MODES: + CATEGORY_FROM_HEAD[x] = AlgorithmCategory.AEAD + + def determine_category(self, expr: str, head: str) -> AlgorithmCategory: + """Return the category of the given algorithm expression. + + This function does not attempt to detect invalid inputs. + """ + prefix = head + while prefix: + if prefix in self.CATEGORY_FROM_HEAD: + return self.CATEGORY_FROM_HEAD[prefix] + if re.match(r'.*[0-9]\Z', prefix): + prefix = re.sub(r'_*[0-9]+\Z', r'', prefix) + else: + prefix = re.sub(r'_*[^_]*\Z', r'', prefix) + raise AlgorithmNotRecognized(expr) + + @staticmethod + def determine_wildcard(expr) -> bool: + """Whether the given algorithm expression is a wildcard. + + This function does not attempt to detect invalid inputs. + """ + if re.search(r'\bPSA_ALG_ANY_HASH\b', expr): + return True + if re.search(r'_AT_LEAST_', expr): + return True + return False + + def __init__(self, expr: str) -> None: + """Analyze an algorithm value. + + The algorithm must be expressed as a C expression containing only + calls to PSA algorithm constructor macros and numeric literals. + + This class is only programmed to handle valid expressions. Invalid + expressions may result in exceptions or in nonsensical results. + """ + self.expression = re.sub(r'\s+', r'', expr) + self.base_expression = self.determine_base(self.expression) + self.head = self.determine_head(self.base_expression) + self.category = self.determine_category(self.base_expression, self.head) + self.is_wildcard = self.determine_wildcard(self.expression) + + def get_key_agreement_derivation(self) -> Optional[str]: + """For a combined key agreement and key derivation algorithm, get the derivation part. + + For anything else, return None. + """ + if self.category != AlgorithmCategory.KEY_AGREEMENT: + return None + m = re.match(r'PSA_ALG_KEY_AGREEMENT\(\w+,\s*(.*)\)\Z', self.expression) + if not m: + return None + kdf_alg = m.group(1) + # Assume kdf_alg is either a valid KDF or 0. + if re.match(r'(?:0[Xx])?0+\s*\Z', kdf_alg): + return None + return kdf_alg + + KEY_DERIVATIONS_INCOMPATIBLE_WITH_AGREEMENT = frozenset([ + 'PSA_ALG_TLS12_ECJPAKE_TO_PMS', # secret input in specific format + ]) + def is_valid_key_agreement_with_derivation(self) -> bool: + """Whether this is a valid combined key agreement and key derivation algorithm.""" + kdf_alg = self.get_key_agreement_derivation() + if kdf_alg is None: + return False + return kdf_alg not in self.KEY_DERIVATIONS_INCOMPATIBLE_WITH_AGREEMENT + + def is_invalid_key_agreement_with_derivation(self) -> bool: + """Whether this is an invalid combined key agreement and key derivation algorithm.""" + kdf_alg = self.get_key_agreement_derivation() + if kdf_alg is None: + return False + return kdf_alg in self.KEY_DERIVATIONS_INCOMPATIBLE_WITH_AGREEMENT + + def short_expression(self, level: int = 0) -> str: + """Abbreviate the expression, keeping it human-readable. + + See `crypto_knowledge.short_expression`. + """ + return short_expression(self.expression, level=level) + + HASH_LENGTH = { + 'PSA_ALG_MD5': 16, + 'PSA_ALG_SHA_1': 20, + } + HASH_LENGTH_BITS_RE = re.compile(r'([0-9]+)\Z') + @classmethod + def hash_length(cls, alg: str) -> int: + """The length of the given hash algorithm, in bytes.""" + if alg in cls.HASH_LENGTH: + return cls.HASH_LENGTH[alg] + m = cls.HASH_LENGTH_BITS_RE.search(alg) + if m: + return int(m.group(1)) // 8 + raise ValueError('Unknown hash length for ' + alg) + + PERMITTED_TAG_LENGTHS = { + 'PSA_ALG_CCM': frozenset([4, 6, 8, 10, 12, 14, 16]), + 'PSA_ALG_CHACHA20_POLY1305': frozenset([16]), + 'PSA_ALG_GCM': frozenset([4, 8, 12, 13, 14, 15, 16]), + } + MAC_LENGTH = { + 'PSA_ALG_CBC_MAC': 16, # actually the block cipher length + 'PSA_ALG_CMAC': 16, # actually the block cipher length + } + HMAC_RE = re.compile(r'PSA_ALG_HMAC\((.*)\)\Z') + @classmethod + def permitted_truncations(cls, base: str) -> FrozenSet[int]: + """Permitted output lengths for the given MAC or AEAD base algorithm. + + For a MAC algorithm, this is the set of truncation lengths that + Mbed TLS supports. + For an AEAD algorithm, this is the set of truncation lengths that + are permitted by the algorithm specification. + """ + if base in cls.PERMITTED_TAG_LENGTHS: + return cls.PERMITTED_TAG_LENGTHS[base] + max_length = cls.MAC_LENGTH.get(base, None) + if max_length is None: + m = cls.HMAC_RE.match(base) + if m: + max_length = cls.hash_length(m.group(1)) + if max_length is None: + raise ValueError('Unknown permitted lengths for ' + base) + return frozenset(range(4, max_length + 1)) + + TRUNCATED_ALG_RE = re.compile( + r'(?PPSA_ALG_(?:AEAD_WITH_SHORTENED_TAG|TRUNCATED_MAC))' + r'\((?P.*),' + r'(?P0[Xx][0-9A-Fa-f]+|[1-9][0-9]*|0[0-7]*)[LUlu]*\)\Z') + def is_invalid_truncation(self) -> bool: + """False for a MAC or AEAD algorithm truncated to an invalid length. + + True for a MAC or AEAD algorithm truncated to a valid length or to + a length that cannot be determined. True for anything other than + a truncated MAC or AEAD. + """ + m = self.TRUNCATED_ALG_RE.match(self.expression) + if m: + base = m.group('base') + to_length = int(m.group('length'), 0) + permitted_lengths = self.permitted_truncations(base) + if to_length not in permitted_lengths: + return True + return False + + def is_valid_for_operation(self) -> bool: + """Whether this algorithm construction is valid for an operation. + + This function assumes that the algorithm is constructed in a + "grammatically" correct way, and only rejects semantically invalid + combinations. + """ + if self.is_wildcard: + return False + if self.is_invalid_truncation(): + return False + return True + + def can_do(self, category: AlgorithmCategory) -> bool: + """Whether this algorithm can perform operations in the given category. + """ + if category == self.category: + return True + if category == AlgorithmCategory.KEY_DERIVATION and \ + self.is_valid_key_agreement_with_derivation(): + return True + return False + + def usage_flags(self, public: bool = False) -> List[str]: + """The list of usage flags describing operations that can perform this algorithm. + + If public is true, only return public-key operations, not private-key operations. + """ + if self.category == AlgorithmCategory.HASH: + flags = [] + elif self.category == AlgorithmCategory.MAC: + flags = ['SIGN_HASH', 'SIGN_MESSAGE', + 'VERIFY_HASH', 'VERIFY_MESSAGE'] + elif self.category == AlgorithmCategory.CIPHER or \ + self.category == AlgorithmCategory.AEAD: + flags = ['DECRYPT', 'ENCRYPT'] + elif self.category == AlgorithmCategory.SIGN: + flags = ['VERIFY_HASH', 'VERIFY_MESSAGE'] + if not public: + flags += ['SIGN_HASH', 'SIGN_MESSAGE'] + elif self.category == AlgorithmCategory.ASYMMETRIC_ENCRYPTION: + flags = ['ENCRYPT'] + if not public: + flags += ['DECRYPT'] + elif self.category == AlgorithmCategory.KEY_DERIVATION or \ + self.category == AlgorithmCategory.KEY_AGREEMENT: + flags = ['DERIVE'] + else: + raise AlgorithmNotRecognized(self.expression) + return ['PSA_KEY_USAGE_' + flag for flag in flags] diff --git a/scripts/framework_dev/ecp.py b/scripts/framework_dev/ecp.py new file mode 100644 index 000000000..b40f3b126 --- /dev/null +++ b/scripts/framework_dev/ecp.py @@ -0,0 +1,875 @@ +"""Framework classes for generation of ecp test cases.""" +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +from typing import List + +from . import test_data_generation +from . import bignum_common + + +class EcpTarget(test_data_generation.BaseTarget): + #pylint: disable=abstract-method, too-few-public-methods + """Target for ecp test case generation.""" + target_basename = 'test_suite_ecp.generated' + + +class EcpP192R1Raw(bignum_common.ModOperationCommon, + EcpTarget): + """Test cases for ECP P192 fast reduction.""" + symbol = "-" + test_function = "ecp_mod_p_generic_raw" + test_name = "ecp_mod_p192_raw" + input_style = "fixed" + arity = 1 + dependencies = ["MBEDTLS_ECP_DP_SECP192R1_ENABLED", + "MBEDTLS_ECP_NIST_OPTIM"] + + moduli = ["fffffffffffffffffffffffffffffffeffffffffffffffff"] # type: List[str] + + input_values = [ + "0", "1", + + # Modulus - 1 + "fffffffffffffffffffffffffffffffefffffffffffffffe", + + # Modulus + 1 + "ffffffffffffffffffffffffffffffff0000000000000000", + + # 2^192 - 1 + "ffffffffffffffffffffffffffffffffffffffffffffffff", + + # Maximum canonical P192 multiplication result + ("fffffffffffffffffffffffffffffffdfffffffffffffffc" + "000000000000000100000000000000040000000000000004"), + + # Generate an overflow during reduction + ("00000000000000000000000000000001ffffffffffffffff" + "ffffffffffffffffffffffffffffffff0000000000000000"), + + # Generate an overflow during carry reduction + ("ffffffffffffffff00000000000000010000000000000000" + "fffffffffffffffeffffffffffffffff0000000000000000"), + + # First 8 number generated by random.getrandbits(384) - seed(2,2) + ("cf1822ffbc6887782b491044d5e341245c6e433715ba2bdd" + "177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"), + ("ffed9235288bc781ae66267594c9c9500925e4749b575bd1" + "3653f8dd9b1f282e4067c3584ee207f8da94e3e8ab73738f"), + ("ef8acd128b4f2fc15f3f57ebf30b94fa82523e86feac7eb7" + "dc38f519b91751dacdbd47d364be8049a372db8f6e405d93"), + ("e8624fab5186ee32ee8d7ee9770348a05d300cb90706a045" + "defc044a09325626e6b58de744ab6cce80877b6f71e1f6d2"), + ("2d3d854e061b90303b08c6e33c7295782d6c797f8f7d9b78" + "2a1be9cd8697bbd0e2520e33e44c50556c71c4a66148a86f"), + ("fec3f6b32e8d4b8a8f54f8ceacaab39e83844b40ffa9b9f1" + "5c14bc4a829e07b0829a48d422fe99a22c70501e533c9135"), + ("97eeab64ca2ce6bc5d3fd983c34c769fe89204e2e8168561" + "867e5e15bc01bfce6a27e0dfcbf8754472154e76e4c11ab2"), + ("bd143fa9b714210c665d7435c1066932f4767f26294365b2" + "721dea3bf63f23d0dbe53fcafb2147df5ca495fa5a91c89b"), + + # Next 2 number generated by random.getrandbits(192) + "47733e847d718d733ff98ff387c56473a7a83ee0761ebfd2", + "cbd4d3e2d4dec9ef83f0be4e80371eb97f81375eecc1cb63" + ] + + @property + def arg_a(self) -> str: + return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits) + + def result(self) -> List[str]: + result = self.int_a % self.int_n + return [self.format_result(result)] + + @property + def is_valid(self) -> bool: + return True + + def arguments(self)-> List[str]: + args = super().arguments() + return ["MBEDTLS_ECP_DP_SECP192R1"] + args + + +class EcpP224R1Raw(bignum_common.ModOperationCommon, + EcpTarget): + """Test cases for ECP P224 fast reduction.""" + symbol = "-" + test_function = "ecp_mod_p_generic_raw" + test_name = "ecp_mod_p224_raw" + input_style = "arch_split" + arity = 1 + dependencies = ["MBEDTLS_ECP_DP_SECP224R1_ENABLED", + "MBEDTLS_ECP_NIST_OPTIM"] + + moduli = ["ffffffffffffffffffffffffffffffff000000000000000000000001"] # type: List[str] + + input_values = [ + "0", "1", + + # Modulus - 1 + "ffffffffffffffffffffffffffffffff000000000000000000000000", + + # Modulus + 1 + "ffffffffffffffffffffffffffffffff000000000000000000000002", + + # 2^224 - 1 + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + + # Maximum canonical P224 multiplication result + ("fffffffffffffffffffffffffffffffe000000000000000000000000" + "00000001000000000000000000000000000000000000000000000000"), + + # Generate an overflow during reduction + ("00000000000000000000000000010000000070000000002000001000" + "ffffffffffff9fffffffffe00000efff000070000000002000001003"), + + # Generate an underflow during reduction + ("00000001000000000000000000000000000000000000000000000000" + "00000000000dc0000000000000000001000000010000000100000003"), + + # First 8 number generated by random.getrandbits(448) - seed(2,2) + ("da94e3e8ab73738fcf1822ffbc6887782b491044d5e341245c6e4337" + "15ba2bdd177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"), + ("cdbd47d364be8049a372db8f6e405d93ffed9235288bc781ae662675" + "94c9c9500925e4749b575bd13653f8dd9b1f282e4067c3584ee207f8"), + ("defc044a09325626e6b58de744ab6cce80877b6f71e1f6d2ef8acd12" + "8b4f2fc15f3f57ebf30b94fa82523e86feac7eb7dc38f519b91751da"), + ("2d6c797f8f7d9b782a1be9cd8697bbd0e2520e33e44c50556c71c4a6" + "6148a86fe8624fab5186ee32ee8d7ee9770348a05d300cb90706a045"), + ("8f54f8ceacaab39e83844b40ffa9b9f15c14bc4a829e07b0829a48d4" + "22fe99a22c70501e533c91352d3d854e061b90303b08c6e33c729578"), + ("97eeab64ca2ce6bc5d3fd983c34c769fe89204e2e8168561867e5e15" + "bc01bfce6a27e0dfcbf8754472154e76e4c11ab2fec3f6b32e8d4b8a"), + ("a7a83ee0761ebfd2bd143fa9b714210c665d7435c1066932f4767f26" + "294365b2721dea3bf63f23d0dbe53fcafb2147df5ca495fa5a91c89b"), + ("74667bffe202849da9643a295a9ac6decbd4d3e2d4dec9ef83f0be4e" + "80371eb97f81375eecc1cb6347733e847d718d733ff98ff387c56473"), + + # Next 2 number generated by random.getrandbits(224) + "eb9ac688b9d39cca91551e8259cc60b17604e4b4e73695c3e652c71a", + "f0caeef038c89b38a8acb5137c9260dc74e088a9b9492f258ebdbfe3" + ] + + @property + def arg_a(self) -> str: + limbs = 2 * bignum_common.bits_to_limbs(224, self.bits_in_limb) + hex_digits = bignum_common.hex_digits_for_limb(limbs, self.bits_in_limb) + return super().format_arg('{:x}'.format(self.int_a)).zfill(hex_digits) + + def result(self) -> List[str]: + result = self.int_a % self.int_n + return [self.format_result(result)] + + @property + def is_valid(self) -> bool: + return True + + def arguments(self)-> List[str]: + args = super().arguments() + return ["MBEDTLS_ECP_DP_SECP224R1"] + args + + +class EcpP256R1Raw(bignum_common.ModOperationCommon, + EcpTarget): + """Test cases for ECP P256 fast reduction.""" + symbol = "-" + test_function = "ecp_mod_p_generic_raw" + test_name = "ecp_mod_p256_raw" + input_style = "fixed" + arity = 1 + dependencies = ["MBEDTLS_ECP_DP_SECP256R1_ENABLED", + "MBEDTLS_ECP_NIST_OPTIM"] + + moduli = ["ffffffff00000001000000000000000000000000ffffffffffffffffffffffff"] # type: List[str] + + input_values = [ + "0", "1", + + # Modulus - 1 + "ffffffff00000001000000000000000000000000fffffffffffffffffffffffe", + + # Modulus + 1 + "ffffffff00000001000000000000000000000001000000000000000000000000", + + # 2^256 - 1 + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + + # Maximum canonical P256 multiplication result + ("fffffffe00000002fffffffe0000000100000001fffffffe00000001fffffffc" + "00000003fffffffcfffffffffffffffffffffffc000000000000000000000004"), + + # Generate an overflow during reduction + ("0000000000000000000000010000000000000000000000000000000000000000" + "00000000000000000000000000000000000000000000000000000000ffffffff"), + + # Generate an underflow during reduction + ("0000000000000000000000000000000000000000000000000000000000000010" + "ffffffff00000000000000000000000000000000000000000000000000000000"), + + # Generate an overflow during carry reduction + ("aaaaaaaa00000000000000000000000000000000000000000000000000000000" + "00000000000000000000000000000000aaaaaaacaaaaaaaaaaaaaaaa00000000"), + + # Generate an underflow during carry reduction + ("000000000000000000000001ffffffff00000000000000000000000000000000" + "0000000000000000000000000000000000000002000000020000000100000002"), + + # First 8 number generated by random.getrandbits(512) - seed(2,2) + ("4067c3584ee207f8da94e3e8ab73738fcf1822ffbc6887782b491044d5e34124" + "5c6e433715ba2bdd177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"), + ("82523e86feac7eb7dc38f519b91751dacdbd47d364be8049a372db8f6e405d93" + "ffed9235288bc781ae66267594c9c9500925e4749b575bd13653f8dd9b1f282e"), + ("e8624fab5186ee32ee8d7ee9770348a05d300cb90706a045defc044a09325626" + "e6b58de744ab6cce80877b6f71e1f6d2ef8acd128b4f2fc15f3f57ebf30b94fa"), + ("829a48d422fe99a22c70501e533c91352d3d854e061b90303b08c6e33c729578" + "2d6c797f8f7d9b782a1be9cd8697bbd0e2520e33e44c50556c71c4a66148a86f"), + ("e89204e2e8168561867e5e15bc01bfce6a27e0dfcbf8754472154e76e4c11ab2" + "fec3f6b32e8d4b8a8f54f8ceacaab39e83844b40ffa9b9f15c14bc4a829e07b0"), + ("bd143fa9b714210c665d7435c1066932f4767f26294365b2721dea3bf63f23d0" + "dbe53fcafb2147df5ca495fa5a91c89b97eeab64ca2ce6bc5d3fd983c34c769f"), + ("74667bffe202849da9643a295a9ac6decbd4d3e2d4dec9ef83f0be4e80371eb9" + "7f81375eecc1cb6347733e847d718d733ff98ff387c56473a7a83ee0761ebfd2"), + ("d08f1bb2531d6460f0caeef038c89b38a8acb5137c9260dc74e088a9b9492f25" + "8ebdbfe3eb9ac688b9d39cca91551e8259cc60b17604e4b4e73695c3e652c71a"), + + # Next 2 number generated by random.getrandbits(256) + "c5e2486c44a4a8f69dc8db48e86ec9c6e06f291b2a838af8d5c44a4eb3172062", + "d4c0dca8b4c9e755cc9c3adcf515a8234da4daeb4f3f87777ad1f45ae9500ec9" + ] + + @property + def arg_a(self) -> str: + return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits) + + def result(self) -> List[str]: + result = self.int_a % self.int_n + return [self.format_result(result)] + + @property + def is_valid(self) -> bool: + return True + + def arguments(self)-> List[str]: + args = super().arguments() + return ["MBEDTLS_ECP_DP_SECP256R1"] + args + + +class EcpP384R1Raw(bignum_common.ModOperationCommon, + EcpTarget): + """Test cases for ECP P384 fast reduction.""" + test_function = "ecp_mod_p_generic_raw" + test_name = "ecp_mod_p384_raw" + input_style = "fixed" + arity = 1 + dependencies = ["MBEDTLS_ECP_DP_SECP384R1_ENABLED", + "MBEDTLS_ECP_NIST_OPTIM"] + + moduli = [("ffffffffffffffffffffffffffffffffffffffffffffffff" + "fffffffffffffffeffffffff0000000000000000ffffffff") + ] # type: List[str] + + input_values = [ + "0", "1", + + # Modulus - 1 + ("ffffffffffffffffffffffffffffffffffffffffffffffff" + "fffffffffffffffeffffffff0000000000000000fffffffe"), + + # Modulus + 1 + ("ffffffffffffffffffffffffffffffffffffffffffffffff" + "fffffffffffffffeffffffff000000000000000100000000"), + + # 2^384 - 1 + ("ffffffffffffffffffffffffffffffffffffffffffffffff" + "ffffffffffffffffffffffffffffffffffffffffffffffff"), + + # Maximum canonical P384 multiplication result + ("ffffffffffffffffffffffffffffffffffffffffffffffff" + "fffffffffffffffdfffffffe0000000000000001fffffffc" + "000000000000000000000000000000010000000200000000" + "fffffffe000000020000000400000000fffffffc00000004"), + + # Testing with overflow in A(12) + A(21) + A(20); + ("497811378624857a2c2af60d70583376545484cfae5c812f" + "e2999fc1abb51d18b559e8ca3b50aaf263fdf8f24bdfb98f" + "ffffffff20e65bf9099e4e73a5e8b517cf4fbeb8fd1750fd" + "ae6d43f2e53f82d5ffffffffffffffffcc6f1e06111c62e0"), + + # Testing with underflow in A(13) + A(22) + A(23) - A(12) - A(20); + ("dfdd25e96777406b3c04b8c7b406f5fcf287e1e576003a09" + "2852a6fbe517f2712b68abef41dbd35183a0614fb7222606" + "ffffffff84396eee542f18a9189d94396c784059c17a9f18" + "f807214ef32f2f10ffffffff8a77fac20000000000000000"), + + # Testing with overflow in A(23) + A(20) + A(19) - A(22); + ("783753f8a5afba6c1862eead1deb2fcdd907272be3ffd185" + "42b24a71ee8b26cab0aa33513610ff973042bbe1637cc9fc" + "99ad36c7f703514572cf4f5c3044469a8f5be6312c19e5d3" + "f8fc1ac6ffffffffffffffff8c86252400000000ffffffff"), + + # Testing with underflow in A(23) + A(20) + A(19) - A(22); + ("65e1d2362fce922663b7fd517586e88842a9b4bd092e93e6" + "251c9c69f278cbf8285d99ae3b53da5ba36e56701e2b17c2" + "25f1239556c5f00117fa140218b46ebd8e34f50d0018701f" + "a8a0a5cc00000000000000004410bcb4ffffffff00000000"), + + # Testing the second round of carry reduction + ("000000000000000000000000ffffffffffffffffffffffff" + "ffffffffffffffffffffffffffffffff0000000000000000" + "0000000000000000ffffffff000000000000000000000001" + "00000000000000000000000000000000ffffffff00000001"), + + # First 8 number generated by random.getrandbits(768) - seed(2,2) + ("ffed9235288bc781ae66267594c9c9500925e4749b575bd1" + "3653f8dd9b1f282e4067c3584ee207f8da94e3e8ab73738f" + "cf1822ffbc6887782b491044d5e341245c6e433715ba2bdd" + "177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"), + ("e8624fab5186ee32ee8d7ee9770348a05d300cb90706a045" + "defc044a09325626e6b58de744ab6cce80877b6f71e1f6d2" + "ef8acd128b4f2fc15f3f57ebf30b94fa82523e86feac7eb7" + "dc38f519b91751dacdbd47d364be8049a372db8f6e405d93"), + ("fec3f6b32e8d4b8a8f54f8ceacaab39e83844b40ffa9b9f1" + "5c14bc4a829e07b0829a48d422fe99a22c70501e533c9135" + "2d3d854e061b90303b08c6e33c7295782d6c797f8f7d9b78" + "2a1be9cd8697bbd0e2520e33e44c50556c71c4a66148a86f"), + ("bd143fa9b714210c665d7435c1066932f4767f26294365b2" + "721dea3bf63f23d0dbe53fcafb2147df5ca495fa5a91c89b" + "97eeab64ca2ce6bc5d3fd983c34c769fe89204e2e8168561" + "867e5e15bc01bfce6a27e0dfcbf8754472154e76e4c11ab2"), + ("8ebdbfe3eb9ac688b9d39cca91551e8259cc60b17604e4b4" + "e73695c3e652c71a74667bffe202849da9643a295a9ac6de" + "cbd4d3e2d4dec9ef83f0be4e80371eb97f81375eecc1cb63" + "47733e847d718d733ff98ff387c56473a7a83ee0761ebfd2"), + ("d4c0dca8b4c9e755cc9c3adcf515a8234da4daeb4f3f8777" + "7ad1f45ae9500ec9c5e2486c44a4a8f69dc8db48e86ec9c6" + "e06f291b2a838af8d5c44a4eb3172062d08f1bb2531d6460" + "f0caeef038c89b38a8acb5137c9260dc74e088a9b9492f25"), + ("0227eeb7b9d7d01f5769da05d205bbfcc8c69069134bccd3" + "e1cf4f589f8e4ce0af29d115ef24bd625dd961e6830b54fa" + "7d28f93435339774bb1e386c4fd5079e681b8f5896838b76" + "9da59b74a6c3181c81e220df848b1df78feb994a81167346"), + ("d322a7353ead4efe440e2b4fda9c025a22f1a83185b98f5f" + "c11e60de1b343f52ea748db9e020307aaeb6db2c3a038a70" + "9779ac1f45e9dd320c855fdfa7251af0930cdbd30f0ad2a8" + "1b2d19a2beaa14a7ff3fe32a30ffc4eed0a7bd04e85bfcdd"), + + # Next 2 number generated by random.getrandbits(384) + ("5c3747465cc36c270e8a35b10828d569c268a20eb78ac332" + "e5e138e26c4454b90f756132e16dce72f18e859835e1f291"), + ("eb2b5693babb7fbb0a76c196067cfdcb11457d9cf45e2fa0" + "1d7f4275153924800600571fac3a5b263fdf57cd2c006497") + ] + + @property + def arg_a(self) -> str: + return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits) + + def result(self) -> List[str]: + result = self.int_a % self.int_n + return [self.format_result(result)] + + @property + def is_valid(self) -> bool: + return True + + def arguments(self)-> List[str]: + args = super().arguments() + return ["MBEDTLS_ECP_DP_SECP384R1"] + args + + +class EcpP521R1Raw(bignum_common.ModOperationCommon, + EcpTarget): + """Test cases for ECP P521 fast reduction.""" + test_function = "ecp_mod_p_generic_raw" + test_name = "ecp_mod_p521_raw" + input_style = "arch_split" + arity = 1 + dependencies = ["MBEDTLS_ECP_DP_SECP521R1_ENABLED", + "MBEDTLS_ECP_NIST_OPTIM"] + + moduli = [("01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + ] # type: List[str] + + input_values = [ + "0", "1", + + # Modulus - 1 + ("01ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"), + + # Modulus + 1 + ("020000000000000000000000000000000000000000000000000000000000000000" + "000000000000000000000000000000000000000000000000000000000000000000"), + + # Maximum canonical P521 multiplication result + ("0003ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + "fffff800" + "0000000000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000000004"), + + # Test case for overflow during addition + ("0001efffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + "000001ef" + "0000000000000000000000000000000000000000000000000000000000000000" + "000000000000000000000000000000000000000000000000000000000f000000"), + + # First 8 number generated by random.getrandbits(1042) - seed(2,2) + ("0003cc2e82523e86feac7eb7dc38f519b91751dacdbd47d364be8049a372db8f" + "6e405d93ffed9235288bc781ae66267594c9c9500925e4749b575bd13653f8dd" + "9b1f282e" + "4067c3584ee207f8da94e3e8ab73738fcf1822ffbc6887782b491044d5e34124" + "5c6e433715ba2bdd177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"), + ("00017052829e07b0829a48d422fe99a22c70501e533c91352d3d854e061b9030" + "3b08c6e33c7295782d6c797f8f7d9b782a1be9cd8697bbd0e2520e33e44c5055" + "6c71c4a6" + "6148a86fe8624fab5186ee32ee8d7ee9770348a05d300cb90706a045defc044a" + "09325626e6b58de744ab6cce80877b6f71e1f6d2ef8acd128b4f2fc15f3f57eb"), + ("00021f15a7a83ee0761ebfd2bd143fa9b714210c665d7435c1066932f4767f26" + "294365b2721dea3bf63f23d0dbe53fcafb2147df5ca495fa5a91c89b97eeab64" + "ca2ce6bc" + "5d3fd983c34c769fe89204e2e8168561867e5e15bc01bfce6a27e0dfcbf87544" + "72154e76e4c11ab2fec3f6b32e8d4b8a8f54f8ceacaab39e83844b40ffa9b9f1"), + ("000381bc2a838af8d5c44a4eb3172062d08f1bb2531d6460f0caeef038c89b38" + "a8acb5137c9260dc74e088a9b9492f258ebdbfe3eb9ac688b9d39cca91551e82" + "59cc60b1" + "7604e4b4e73695c3e652c71a74667bffe202849da9643a295a9ac6decbd4d3e2" + "d4dec9ef83f0be4e80371eb97f81375eecc1cb6347733e847d718d733ff98ff3"), + ("00034816c8c69069134bccd3e1cf4f589f8e4ce0af29d115ef24bd625dd961e6" + "830b54fa7d28f93435339774bb1e386c4fd5079e681b8f5896838b769da59b74" + "a6c3181c" + "81e220df848b1df78feb994a81167346d4c0dca8b4c9e755cc9c3adcf515a823" + "4da4daeb4f3f87777ad1f45ae9500ec9c5e2486c44a4a8f69dc8db48e86ec9c6"), + ("000397846c4454b90f756132e16dce72f18e859835e1f291d322a7353ead4efe" + "440e2b4fda9c025a22f1a83185b98f5fc11e60de1b343f52ea748db9e020307a" + "aeb6db2c" + "3a038a709779ac1f45e9dd320c855fdfa7251af0930cdbd30f0ad2a81b2d19a2" + "beaa14a7ff3fe32a30ffc4eed0a7bd04e85bfcdd0227eeb7b9d7d01f5769da05"), + ("00002c3296e6bc4d62b47204007ee4fab105d83e85e951862f0981aebc1b00d9" + "2838e766ef9b6bf2d037fe2e20b6a8464174e75a5f834da70569c018eb2b5693" + "babb7fbb" + "0a76c196067cfdcb11457d9cf45e2fa01d7f4275153924800600571fac3a5b26" + "3fdf57cd2c0064975c3747465cc36c270e8a35b10828d569c268a20eb78ac332"), + ("00009d23b4917fc09f20dbb0dcc93f0e66dfe717c17313394391b6e2e6eacb0f" + "0bb7be72bd6d25009aeb7fa0c4169b148d2f527e72daf0a54ef25c0707e33868" + "7d1f7157" + "5653a45c49390aa51cf5192bbf67da14be11d56ba0b4a2969d8055a9f03f2d71" + "581d8e830112ff0f0948eccaf8877acf26c377c13f719726fd70bddacb4deeec"), + + # Next 2 number generated by random.getrandbits(521) + ("12b84ae65e920a63ac1f2b64df6dff07870c9d531ae72a47403063238da1a1fe" + "3f9d6a179fa50f96cd4aff9261aa92c0e6f17ec940639bc2ccdf572df00790813e3"), + ("166049dd332a73fa0b26b75196cf87eb8a09b27ec714307c68c425424a1574f1" + "eedf5b0f16cdfdb839424d201e653f53d6883ca1c107ca6e706649889c0c7f38608") + ] + + @property + def arg_a(self) -> str: + # Number of limbs: 2 * N + return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits) + + def result(self) -> List[str]: + result = self.int_a % self.int_n + return [self.format_result(result)] + + @property + def is_valid(self) -> bool: + return True + + def arguments(self)-> List[str]: + args = super().arguments() + return ["MBEDTLS_ECP_DP_SECP521R1"] + args + + +class EcpP192K1Raw(bignum_common.ModOperationCommon, + EcpTarget): + """Test cases for ECP P192K1 fast reduction.""" + symbol = "-" + test_function = "ecp_mod_p_generic_raw" + test_name = "ecp_mod_p192k1_raw" + input_style = "fixed" + arity = 1 + dependencies = ["MBEDTLS_ECP_DP_SECP192K1_ENABLED"] + + moduli = ["fffffffffffffffffffffffffffffffffffffffeffffee37"] # type: List[str] + + input_values = [ + "0", "1", + + # Modulus - 1 + "fffffffffffffffffffffffffffffffffffffffeffffee36", + + # Modulus + 1 + "fffffffffffffffffffffffffffffffffffffffeffffee38", + + # 2^192 - 1 + "ffffffffffffffffffffffffffffffffffffffffffffffff", + + # Maximum canonical P192K1 multiplication result + ("fffffffffffffffffffffffffffffffffffffffdffffdc6c" + "0000000000000000000000000000000100002394013c7364"), + + # Test case for overflow during addition + ("00000007ffff71b809e27dd832cfd5e04d9d2dbb9f8da217" + "0000000000000000000000000000000000000000520834f0"), + + # First 8 number generated by random.getrandbits(384) - seed(2,2) + ("cf1822ffbc6887782b491044d5e341245c6e433715ba2bdd" + "177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"), + ("ffed9235288bc781ae66267594c9c9500925e4749b575bd1" + "3653f8dd9b1f282e4067c3584ee207f8da94e3e8ab73738f"), + ("ef8acd128b4f2fc15f3f57ebf30b94fa82523e86feac7eb7" + "dc38f519b91751dacdbd47d364be8049a372db8f6e405d93"), + ("e8624fab5186ee32ee8d7ee9770348a05d300cb90706a045" + "defc044a09325626e6b58de744ab6cce80877b6f71e1f6d2"), + ("2d3d854e061b90303b08c6e33c7295782d6c797f8f7d9b78" + "2a1be9cd8697bbd0e2520e33e44c50556c71c4a66148a86f"), + ("fec3f6b32e8d4b8a8f54f8ceacaab39e83844b40ffa9b9f1" + "5c14bc4a829e07b0829a48d422fe99a22c70501e533c9135"), + ("97eeab64ca2ce6bc5d3fd983c34c769fe89204e2e8168561" + "867e5e15bc01bfce6a27e0dfcbf8754472154e76e4c11ab2"), + ("bd143fa9b714210c665d7435c1066932f4767f26294365b2" + "721dea3bf63f23d0dbe53fcafb2147df5ca495fa5a91c89b"), + + # Next 2 number generated by random.getrandbits(192) + "47733e847d718d733ff98ff387c56473a7a83ee0761ebfd2", + "cbd4d3e2d4dec9ef83f0be4e80371eb97f81375eecc1cb63" + ] + + @property + def arg_a(self) -> str: + return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits) + + def result(self) -> List[str]: + result = self.int_a % self.int_n + return [self.format_result(result)] + + @property + def is_valid(self) -> bool: + return True + + def arguments(self): + args = super().arguments() + return ["MBEDTLS_ECP_DP_SECP192K1"] + args + + +class EcpP224K1Raw(bignum_common.ModOperationCommon, + EcpTarget): + """Test cases for ECP P224 fast reduction.""" + symbol = "-" + test_function = "ecp_mod_p_generic_raw" + test_name = "ecp_mod_p224k1_raw" + input_style = "arch_split" + arity = 1 + dependencies = ["MBEDTLS_ECP_DP_SECP224K1_ENABLED"] + + moduli = ["fffffffffffffffffffffffffffffffffffffffffffffffeffffe56d"] # type: List[str] + + input_values = [ + "0", "1", + + # Modulus - 1 + "fffffffffffffffffffffffffffffffffffffffffffffffeffffe56c", + + # Modulus + 1 + "fffffffffffffffffffffffffffffffffffffffffffffffeffffe56e", + + # 2^224 - 1 + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + + # Maximum canonical P224K1 multiplication result + ("fffffffffffffffffffffffffffffffffffffffffffffffdffffcad8" + "00000000000000000000000000000000000000010000352802c26590"), + + # Test case for overflow during addition + ("0000007ffff2b68161180fd8cd92e1a109be158a19a99b1809db8032" + "0000000000000000000000000000000000000000000000000bf04f49"), + + # First 8 number generated by random.getrandbits(448) - seed(2,2) + ("da94e3e8ab73738fcf1822ffbc6887782b491044d5e341245c6e4337" + "15ba2bdd177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"), + ("cdbd47d364be8049a372db8f6e405d93ffed9235288bc781ae662675" + "94c9c9500925e4749b575bd13653f8dd9b1f282e4067c3584ee207f8"), + ("defc044a09325626e6b58de744ab6cce80877b6f71e1f6d2ef8acd12" + "8b4f2fc15f3f57ebf30b94fa82523e86feac7eb7dc38f519b91751da"), + ("2d6c797f8f7d9b782a1be9cd8697bbd0e2520e33e44c50556c71c4a6" + "6148a86fe8624fab5186ee32ee8d7ee9770348a05d300cb90706a045"), + ("8f54f8ceacaab39e83844b40ffa9b9f15c14bc4a829e07b0829a48d4" + "22fe99a22c70501e533c91352d3d854e061b90303b08c6e33c729578"), + ("97eeab64ca2ce6bc5d3fd983c34c769fe89204e2e8168561867e5e15" + "bc01bfce6a27e0dfcbf8754472154e76e4c11ab2fec3f6b32e8d4b8a"), + ("a7a83ee0761ebfd2bd143fa9b714210c665d7435c1066932f4767f26" + "294365b2721dea3bf63f23d0dbe53fcafb2147df5ca495fa5a91c89b"), + ("74667bffe202849da9643a295a9ac6decbd4d3e2d4dec9ef83f0be4e" + "80371eb97f81375eecc1cb6347733e847d718d733ff98ff387c56473"), + + # Next 2 number generated by random.getrandbits(224) + ("eb9ac688b9d39cca91551e8259cc60b17604e4b4e73695c3e652c71a"), + ("f0caeef038c89b38a8acb5137c9260dc74e088a9b9492f258ebdbfe3"), + ] + + @property + def arg_a(self) -> str: + limbs = 2 * bignum_common.bits_to_limbs(224, self.bits_in_limb) + hex_digits = bignum_common.hex_digits_for_limb(limbs, self.bits_in_limb) + return super().format_arg('{:x}'.format(self.int_a)).zfill(hex_digits) + + def result(self) -> List[str]: + result = self.int_a % self.int_n + return [self.format_result(result)] + + @property + def is_valid(self) -> bool: + return True + + def arguments(self): + args = super().arguments() + return ["MBEDTLS_ECP_DP_SECP224K1"] + args + + +class EcpP256K1Raw(bignum_common.ModOperationCommon, + EcpTarget): + """Test cases for ECP P256 fast reduction.""" + symbol = "-" + test_function = "ecp_mod_p_generic_raw" + test_name = "ecp_mod_p256k1_raw" + input_style = "fixed" + arity = 1 + dependencies = ["MBEDTLS_ECP_DP_SECP256K1_ENABLED"] + + moduli = ["fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f"] # type: List[str] + + input_values = [ + "0", "1", + + # Modulus - 1 + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e", + + # Modulus + 1 + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + + # 2^256 - 1 + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + + # Maximum canonical P256K1 multiplication result + ("fffffffffffffffffffffffffffffffffffffffffffffffffffffffdfffff85c" + "000000000000000000000000000000000000000000000001000007a4000e9844"), + + # Test case for overflow during addition + ("0000fffffc2f000e90a0c86a0a63234e5ba641f43a7e4aecc4040e67ec850562" + "00000000000000000000000000000000000000000000000000000000585674fd"), + + # Test case for overflow during addition + ("0000fffffc2f000e90a0c86a0a63234e5ba641f43a7e4aecc4040e67ec850562" + "00000000000000000000000000000000000000000000000000000000585674fd"), + + # First 8 number generated by random.getrandbits(512) - seed(2,2) + ("4067c3584ee207f8da94e3e8ab73738fcf1822ffbc6887782b491044d5e34124" + "5c6e433715ba2bdd177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"), + ("82523e86feac7eb7dc38f519b91751dacdbd47d364be8049a372db8f6e405d93" + "ffed9235288bc781ae66267594c9c9500925e4749b575bd13653f8dd9b1f282e"), + ("e8624fab5186ee32ee8d7ee9770348a05d300cb90706a045defc044a09325626" + "e6b58de744ab6cce80877b6f71e1f6d2ef8acd128b4f2fc15f3f57ebf30b94fa"), + ("829a48d422fe99a22c70501e533c91352d3d854e061b90303b08c6e33c729578" + "2d6c797f8f7d9b782a1be9cd8697bbd0e2520e33e44c50556c71c4a66148a86f"), + ("e89204e2e8168561867e5e15bc01bfce6a27e0dfcbf8754472154e76e4c11ab2" + "fec3f6b32e8d4b8a8f54f8ceacaab39e83844b40ffa9b9f15c14bc4a829e07b0"), + ("bd143fa9b714210c665d7435c1066932f4767f26294365b2721dea3bf63f23d0" + "dbe53fcafb2147df5ca495fa5a91c89b97eeab64ca2ce6bc5d3fd983c34c769f"), + ("74667bffe202849da9643a295a9ac6decbd4d3e2d4dec9ef83f0be4e80371eb9" + "7f81375eecc1cb6347733e847d718d733ff98ff387c56473a7a83ee0761ebfd2"), + ("d08f1bb2531d6460f0caeef038c89b38a8acb5137c9260dc74e088a9b9492f25" + "8ebdbfe3eb9ac688b9d39cca91551e8259cc60b17604e4b4e73695c3e652c71a"), + + # Next 2 number generated by random.getrandbits(256) + ("c5e2486c44a4a8f69dc8db48e86ec9c6e06f291b2a838af8d5c44a4eb3172062"), + ("d4c0dca8b4c9e755cc9c3adcf515a8234da4daeb4f3f87777ad1f45ae9500ec9"), + ] + + @property + def arg_a(self) -> str: + return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits) + + def result(self) -> List[str]: + result = self.int_a % self.int_n + return [self.format_result(result)] + + @property + def is_valid(self) -> bool: + return True + + def arguments(self): + args = super().arguments() + return ["MBEDTLS_ECP_DP_SECP256K1"] + args + + +class EcpP255Raw(bignum_common.ModOperationCommon, + EcpTarget): + """Test cases for ECP 25519 fast reduction.""" + symbol = "-" + test_function = "ecp_mod_p_generic_raw" + test_name = "mbedtls_ecp_mod_p255_raw" + input_style = "fixed" + arity = 1 + dependencies = ["MBEDTLS_ECP_DP_CURVE25519_ENABLED"] + + moduli = [("7fffffffffffffffffffffffffffffffffffffffffffffffff" + "ffffffffffffed")] # type: List[str] + + input_values = [ + "0", "1", + + # Modulus - 1 + ("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec"), + + # Modulus + 1 + ("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffee"), + + # 2^255 - 1 + ("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + + # Maximum canonical P255 multiplication result + ("3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffec" + "0000000000000000000000000000000000000000000000000000000000000190"), + + # First 8 number generated by random.getrandbits(510) - seed(2,2) + ("1019f0d64ee207f8da94e3e8ab73738fcf1822ffbc6887782b491044d5e34124" + "5c6e433715ba2bdd177219d30e7a269fd95bafc8f2a4d27bdcf4bb99f4bea973"), + ("20948fa1feac7eb7dc38f519b91751dacdbd47d364be8049a372db8f6e405d93" + "ffed9235288bc781ae66267594c9c9500925e4749b575bd13653f8dd9b1f282e"), + ("3a1893ea5186ee32ee8d7ee9770348a05d300cb90706a045defc044a09325626" + "e6b58de744ab6cce80877b6f71e1f6d2ef8acd128b4f2fc15f3f57ebf30b94fa"), + ("20a6923522fe99a22c70501e533c91352d3d854e061b90303b08c6e33c729578" + "2d6c797f8f7d9b782a1be9cd8697bbd0e2520e33e44c50556c71c4a66148a86f"), + ("3a248138e8168561867e5e15bc01bfce6a27e0dfcbf8754472154e76e4c11ab2" + "fec3f6b32e8d4b8a8f54f8ceacaab39e83844b40ffa9b9f15c14bc4a829e07b0"), + ("2f450feab714210c665d7435c1066932f4767f26294365b2721dea3bf63f23d0" + "dbe53fcafb2147df5ca495fa5a91c89b97eeab64ca2ce6bc5d3fd983c34c769f"), + ("1d199effe202849da9643a295a9ac6decbd4d3e2d4dec9ef83f0be4e80371eb9" + "7f81375eecc1cb6347733e847d718d733ff98ff387c56473a7a83ee0761ebfd2"), + ("3423c6ec531d6460f0caeef038c89b38a8acb5137c9260dc74e088a9b9492f25" + "8ebdbfe3eb9ac688b9d39cca91551e8259cc60b17604e4b4e73695c3e652c71a"), + + # Next 2 number generated by random.getrandbits(255) + ("62f1243644a4a8f69dc8db48e86ec9c6e06f291b2a838af8d5c44a4eb3172062"), + ("6a606e54b4c9e755cc9c3adcf515a8234da4daeb4f3f87777ad1f45ae9500ec9"), + ] + + @property + def arg_a(self) -> str: + return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits) + + def result(self) -> List[str]: + result = self.int_a % self.int_n + return [self.format_result(result)] + + @property + def is_valid(self) -> bool: + return True + + def arguments(self)-> List[str]: + args = super().arguments() + return ["MBEDTLS_ECP_DP_CURVE25519"] + args + + +class EcpP448Raw(bignum_common.ModOperationCommon, + EcpTarget): + """Test cases for ECP P448 fast reduction.""" + symbol = "-" + test_function = "ecp_mod_p_generic_raw" + test_name = "ecp_mod_p448_raw" + input_style = "fixed" + arity = 1 + dependencies = ["MBEDTLS_ECP_DP_CURVE448_ENABLED"] + + moduli = [("fffffffffffffffffffffffffffffffffffffffffffffffffffffffe" + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff")] # type: List[str] + + input_values = [ + "0", "1", + + # Modulus - 1 + ("fffffffffffffffffffffffffffffffffffffffffffffffffffffffe" + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffe"), + + # Modulus + 1 + ("ffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + "00000000000000000000000000000000000000000000000000000000"), + + # 2^448 - 1 + ("ffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + + # Maximum canonical P448 multiplication result + ("fffffffffffffffffffffffffffffffffffffffffffffffffffffffd" + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffd" + "00000000000000000000000000000000000000000000000000000004" + "00000000000000000000000000000000000000000000000000000004"), + + # First 8 number generated by random.getrandbits(896) - seed(2,2) + ("74667bffe202849da9643a295a9ac6decbd4d3e2d4dec9ef83f0be4e" + "80371eb97f81375eecc1cb6347733e847d718d733ff98ff387c56473" + "a7a83ee0761ebfd2bd143fa9b714210c665d7435c1066932f4767f26" + "294365b2721dea3bf63f23d0dbe53fcafb2147df5ca495fa5a91c89b"), + ("4da4daeb4f3f87777ad1f45ae9500ec9c5e2486c44a4a8f69dc8db48" + "e86ec9c6e06f291b2a838af8d5c44a4eb3172062d08f1bb2531d6460" + "f0caeef038c89b38a8acb5137c9260dc74e088a9b9492f258ebdbfe3" + "eb9ac688b9d39cca91551e8259cc60b17604e4b4e73695c3e652c71a"), + ("bc1b00d92838e766ef9b6bf2d037fe2e20b6a8464174e75a5f834da7" + "0569c018eb2b5693babb7fbb0a76c196067cfdcb11457d9cf45e2fa0" + "1d7f4275153924800600571fac3a5b263fdf57cd2c0064975c374746" + "5cc36c270e8a35b10828d569c268a20eb78ac332e5e138e26c4454b9"), + ("8d2f527e72daf0a54ef25c0707e338687d1f71575653a45c49390aa5" + "1cf5192bbf67da14be11d56ba0b4a2969d8055a9f03f2d71581d8e83" + "0112ff0f0948eccaf8877acf26c377c13f719726fd70bddacb4deeec" + "0b0c995e96e6bc4d62b47204007ee4fab105d83e85e951862f0981ae"), + ("84ae65e920a63ac1f2b64df6dff07870c9d531ae72a47403063238da" + "1a1fe3f9d6a179fa50f96cd4aff9261aa92c0e6f17ec940639bc2ccd" + "f572df00790813e32748dd1db4917fc09f20dbb0dcc93f0e66dfe717" + "c17313394391b6e2e6eacb0f0bb7be72bd6d25009aeb7fa0c4169b14"), + ("2bb3b36f29421c4021b7379f0897246a40c270b00e893302aba9e7b8" + "23fc5ad2f58105748ed5d1b7b310b730049dd332a73fa0b26b75196c" + "f87eb8a09b27ec714307c68c425424a1574f1eedf5b0f16cdfdb8394" + "24d201e653f53d6883ca1c107ca6e706649889c0c7f3860895bfa813"), + ("af3f5d7841b1256d5c1dc12fb5a1ae519fb8883accda6559caa538a0" + "9fc9370d3a6b86a7975b54a31497024640332b0612d4050771d7b14e" + "b6c004cc3b8367dc3f2bb31efe9934ad0809eae3ef232a32b5459d83" + "fbc46f1aea990e94821d46063b4dbf2ca294523d74115c86188b1044"), + ("7430051376e31f5aab63ad02854efa600641b4fa37a47ce41aeffafc" + "3b45402ac02659fe2e87d4150511baeb198ababb1a16daff3da95cd2" + "167b75dfb948f82a8317cba01c75f67e290535d868a24b7f627f2855" + "09167d4126af8090013c3273c02c6b9586b4625b475b51096c4ad652"), + + # Corner case which causes maximum overflow + ("f4ae65e920a63ac1f2b64df6dff07870c9d531ae72a47403063238da1" + "a1fe3f9d6a179fa50f96cd4aff9261aa92c0e6f17ec940639bc2ccd0B" + "519A16DF59C53E0D49B209200F878F362ACE518D5B8BFCF9CDC725E5E" + "01C06295E8605AF06932B5006D9E556D3F190E8136BF9C643D332"), + + # Next 2 number generated by random.getrandbits(448) + ("8f54f8ceacaab39e83844b40ffa9b9f15c14bc4a829e07b0829a48d4" + "22fe99a22c70501e533c91352d3d854e061b90303b08c6e33c729578"), + ("97eeab64ca2ce6bc5d3fd983c34c769fe89204e2e8168561867e5e15" + "bc01bfce6a27e0dfcbf8754472154e76e4c11ab2fec3f6b32e8d4b8a"), + + ] + + @property + def arg_a(self) -> str: + return super().format_arg('{:x}'.format(self.int_a)).zfill(2 * self.hex_digits) + + def result(self) -> List[str]: + result = self.int_a % self.int_n + return [self.format_result(result)] + + @property + def is_valid(self) -> bool: + return True + + def arguments(self): + args = super().arguments() + return ["MBEDTLS_ECP_DP_CURVE448"] + args diff --git a/scripts/framework_dev/logging_util.py b/scripts/framework_dev/logging_util.py new file mode 100644 index 000000000..ddd7c7fd6 --- /dev/null +++ b/scripts/framework_dev/logging_util.py @@ -0,0 +1,46 @@ +"""Auxiliary functions used for logging module. +""" + +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +import logging +import sys + +def configure_logger( + logger: logging.Logger, + log_format="[%(levelname)s]: %(message)s", + split_level=logging.WARNING + ) -> None: + """ + Configure the logging.Logger instance so that: + - Format is set to any log_format. + Default: "[%(levelname)s]: %(message)s" + - loglevel >= split_level are printed to stderr. + - loglevel < split_level are printed to stdout. + Default: logging.WARNING + """ + class MaxLevelFilter(logging.Filter): + # pylint: disable=too-few-public-methods + def __init__(self, max_level, name=''): + super().__init__(name) + self.max_level = max_level + + def filter(self, record: logging.LogRecord) -> bool: + return record.levelno <= self.max_level + + log_formatter = logging.Formatter(log_format) + + # set loglevel >= split_level to be printed to stderr + stderr_hdlr = logging.StreamHandler(sys.stderr) + stderr_hdlr.setLevel(split_level) + stderr_hdlr.setFormatter(log_formatter) + + # set loglevel < split_level to be printed to stdout + stdout_hdlr = logging.StreamHandler(sys.stdout) + stdout_hdlr.addFilter(MaxLevelFilter(split_level - 1)) + stdout_hdlr.setFormatter(log_formatter) + + logger.addHandler(stderr_hdlr) + logger.addHandler(stdout_hdlr) diff --git a/scripts/framework_dev/macro_collector.py b/scripts/framework_dev/macro_collector.py new file mode 100644 index 000000000..d68be00bd --- /dev/null +++ b/scripts/framework_dev/macro_collector.py @@ -0,0 +1,539 @@ +"""Collect macro definitions from header files. +""" + +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +import itertools +import re +from typing import Dict, IO, Iterable, Iterator, List, Optional, Pattern, Set, Tuple, Union + + +class ReadFileLineException(Exception): + def __init__(self, filename: str, line_number: Union[int, str]) -> None: + message = 'in {} at {}'.format(filename, line_number) + super(ReadFileLineException, self).__init__(message) + self.filename = filename + self.line_number = line_number + + +class read_file_lines: + # Dear Pylint, conventionally, a context manager class name is lowercase. + # pylint: disable=invalid-name,too-few-public-methods + """Context manager to read a text file line by line. + + ``` + with read_file_lines(filename) as lines: + for line in lines: + process(line) + ``` + is equivalent to + ``` + with open(filename, 'r') as input_file: + for line in input_file: + process(line) + ``` + except that if process(line) raises an exception, then the read_file_lines + snippet annotates the exception with the file name and line number. + """ + def __init__(self, filename: str, binary: bool = False) -> None: + self.filename = filename + self.file = None #type: Optional[IO[str]] + self.line_number = 'entry' #type: Union[int, str] + self.generator = None #type: Optional[Iterable[Tuple[int, str]]] + self.binary = binary + def __enter__(self) -> 'read_file_lines': + self.file = open(self.filename, 'rb' if self.binary else 'r') + self.generator = enumerate(self.file) + return self + def __iter__(self) -> Iterator[str]: + assert self.generator is not None + for line_number, content in self.generator: + self.line_number = line_number + yield content + self.line_number = 'exit' + def __exit__(self, exc_type, exc_value, exc_traceback) -> None: + if self.file is not None: + self.file.close() + if exc_type is not None: + raise ReadFileLineException(self.filename, self.line_number) \ + from exc_value + + +class PSAMacroEnumerator: + """Information about constructors of various PSA Crypto types. + + This includes macro names as well as information about their arguments + when applicable. + + This class only provides ways to enumerate expressions that evaluate to + values of the covered types. Derived classes are expected to populate + the set of known constructors of each kind, as well as populate + `self.arguments_for` for arguments that are not of a kind that is + enumerated here. + """ + #pylint: disable=too-many-instance-attributes + + def __init__(self) -> None: + """Set up an empty set of known constructor macros. + """ + self.statuses = set() #type: Set[str] + self.lifetimes = set() #type: Set[str] + self.locations = set() #type: Set[str] + self.persistence_levels = set() #type: Set[str] + self.algorithms = set() #type: Set[str] + self.ecc_curves = set() #type: Set[str] + self.dh_groups = set() #type: Set[str] + self.key_types = set() #type: Set[str] + self.key_usage_flags = set() #type: Set[str] + self.hash_algorithms = set() #type: Set[str] + self.mac_algorithms = set() #type: Set[str] + self.ka_algorithms = set() #type: Set[str] + self.kdf_algorithms = set() #type: Set[str] + self.pake_algorithms = set() #type: Set[str] + self.aead_algorithms = set() #type: Set[str] + self.sign_algorithms = set() #type: Set[str] + # macro name -> list of argument names + self.argspecs = {} #type: Dict[str, List[str]] + # argument name -> list of values + self.arguments_for = { + 'mac_length': [], + 'min_mac_length': [], + 'tag_length': [], + 'min_tag_length': [], + } #type: Dict[str, List[str]] + # Whether to include intermediate macros in enumerations. Intermediate + # macros serve as category headers and are not valid values of their + # type. See `is_internal_name`. + # Always false in this class, may be set to true in derived classes. + self.include_intermediate = False + + def is_internal_name(self, name: str) -> bool: + """Whether this is an internal macro. Internal macros will be skipped.""" + if not self.include_intermediate: + if name.endswith('_BASE') or name.endswith('_NONE'): + return True + if '_CATEGORY_' in name: + return True + return name.endswith('_FLAG') or name.endswith('_MASK') + + def gather_arguments(self) -> None: + """Populate the list of values for macro arguments. + + Call this after parsing all the inputs. + """ + self.arguments_for['hash_alg'] = sorted(self.hash_algorithms) + self.arguments_for['mac_alg'] = sorted(self.mac_algorithms) + self.arguments_for['ka_alg'] = sorted(self.ka_algorithms) + self.arguments_for['kdf_alg'] = sorted(self.kdf_algorithms) + self.arguments_for['aead_alg'] = sorted(self.aead_algorithms) + self.arguments_for['sign_alg'] = sorted(self.sign_algorithms) + self.arguments_for['curve'] = sorted(self.ecc_curves) + self.arguments_for['group'] = sorted(self.dh_groups) + self.arguments_for['persistence'] = sorted(self.persistence_levels) + self.arguments_for['location'] = sorted(self.locations) + self.arguments_for['lifetime'] = sorted(self.lifetimes) + + @staticmethod + def _format_arguments(name: str, arguments: Iterable[str]) -> str: + """Format a macro call with arguments. + + The resulting format is consistent with + `InputsForTest.normalize_argument`. + """ + return name + '(' + ', '.join(arguments) + ')' + + _argument_split_re = re.compile(r' *, *') + @classmethod + def _argument_split(cls, arguments: str) -> List[str]: + return re.split(cls._argument_split_re, arguments) + + def distribute_arguments(self, name: str) -> Iterator[str]: + """Generate macro calls with each tested argument set. + + If name is a macro without arguments, just yield "name". + If name is a macro with arguments, yield a series of + "name(arg1,...,argN)" where each argument takes each possible + value at least once. + """ + try: + if name not in self.argspecs: + yield name + return + argspec = self.argspecs[name] + if argspec == []: + yield name + '()' + return + argument_lists = [self.arguments_for[arg] for arg in argspec] + arguments = [values[0] for values in argument_lists] + yield self._format_arguments(name, arguments) + # Dear Pylint, enumerate won't work here since we're modifying + # the array. + # pylint: disable=consider-using-enumerate + for i in range(len(arguments)): + for value in argument_lists[i][1:]: + arguments[i] = value + yield self._format_arguments(name, arguments) + arguments[i] = argument_lists[i][0] + except BaseException as e: + raise Exception('distribute_arguments({})'.format(name)) from e + + def distribute_arguments_without_duplicates( + self, seen: Set[str], name: str + ) -> Iterator[str]: + """Same as `distribute_arguments`, but don't repeat seen results.""" + for result in self.distribute_arguments(name): + if result not in seen: + seen.add(result) + yield result + + def generate_expressions(self, names: Iterable[str]) -> Iterator[str]: + """Generate expressions covering values constructed from the given names. + + `names` can be any iterable collection of macro names. + + For example: + * ``generate_expressions(['PSA_ALG_CMAC', 'PSA_ALG_HMAC'])`` + generates ``'PSA_ALG_CMAC'`` as well as ``'PSA_ALG_HMAC(h)'`` for + every known hash algorithm ``h``. + * ``macros.generate_expressions(macros.key_types)`` generates all + key types. + """ + seen = set() #type: Set[str] + return itertools.chain(*( + self.distribute_arguments_without_duplicates(seen, name) + for name in names + )) + + +class PSAMacroCollector(PSAMacroEnumerator): + """Collect PSA crypto macro definitions from C header files. + """ + + def __init__(self, include_intermediate: bool = False) -> None: + """Set up an object to collect PSA macro definitions. + + Call the read_file method of the constructed object on each header file. + + * include_intermediate: if true, include intermediate macros such as + PSA_XXX_BASE that do not designate semantic values. + """ + super().__init__() + self.include_intermediate = include_intermediate + self.key_types_from_curve = {} #type: Dict[str, str] + self.key_types_from_group = {} #type: Dict[str, str] + self.algorithms_from_hash = {} #type: Dict[str, str] + + @staticmethod + def algorithm_tester(name: str) -> str: + """The predicate for whether an algorithm is built from the given constructor. + + The given name must be the name of an algorithm constructor of the + form ``PSA_ALG_xxx`` which is used as ``PSA_ALG_xxx(yyy)`` to build + an algorithm value. Return the corresponding predicate macro which + is used as ``predicate(alg)`` to test whether ``alg`` can be built + as ``PSA_ALG_xxx(yyy)``. The predicate is usually called + ``PSA_ALG_IS_xxx``. + """ + prefix = 'PSA_ALG_' + assert name.startswith(prefix) + midfix = 'IS_' + suffix = name[len(prefix):] + if suffix in ['DSA', 'ECDSA']: + midfix += 'RANDOMIZED_' + elif suffix == 'RSA_PSS': + suffix += '_STANDARD_SALT' + return prefix + midfix + suffix + + def record_algorithm_subtype(self, name: str, expansion: str) -> None: + """Record the subtype of an algorithm constructor. + + Given a ``PSA_ALG_xxx`` macro name and its expansion, if the algorithm + is of a subtype that is tracked in its own set, add it to the relevant + set. + """ + # This code is very ad hoc and fragile. It should be replaced by + # something more robust. + if re.match(r'MAC(?:_|\Z)', name): + self.mac_algorithms.add(name) + elif re.match(r'KDF(?:_|\Z)', name): + self.kdf_algorithms.add(name) + elif re.search(r'0x020000[0-9A-Fa-f]{2}', expansion): + self.hash_algorithms.add(name) + elif re.search(r'0x03[0-9A-Fa-f]{6}', expansion): + self.mac_algorithms.add(name) + elif re.search(r'0x05[0-9A-Fa-f]{6}', expansion): + self.aead_algorithms.add(name) + elif re.search(r'0x09[0-9A-Fa-f]{2}0000', expansion): + self.ka_algorithms.add(name) + elif re.search(r'0x08[0-9A-Fa-f]{6}', expansion): + self.kdf_algorithms.add(name) + + # "#define" followed by a macro name with either no parameters + # or a single parameter and a non-empty expansion. + # Grab the macro name in group 1, the parameter name if any in group 2 + # and the expansion in group 3. + _define_directive_re = re.compile(r'\s*#\s*define\s+(\w+)' + + r'(?:\s+|\((\w+)\)\s*)' + + r'(.+)') + _deprecated_definition_re = re.compile(r'\s*MBEDTLS_DEPRECATED') + + def read_line(self, line): + """Parse a C header line and record the PSA identifier it defines if any. + This function analyzes lines that start with "#define PSA_" + (up to non-significant whitespace) and skips all non-matching lines. + """ + # pylint: disable=too-many-branches + m = re.match(self._define_directive_re, line) + if not m: + return + name, parameter, expansion = m.groups() + expansion = re.sub(r'/\*.*?\*/|//.*', r' ', expansion) + if parameter: + self.argspecs[name] = [parameter] + if re.match(self._deprecated_definition_re, expansion): + # Skip deprecated values, which are assumed to be + # backward compatibility aliases that share + # numerical values with non-deprecated values. + return + if self.is_internal_name(name): + # Macro only to build actual values + return + elif (name.startswith('PSA_ERROR_') or name == 'PSA_SUCCESS') \ + and not parameter: + self.statuses.add(name) + elif name.startswith('PSA_KEY_TYPE_') and not parameter: + self.key_types.add(name) + elif name.startswith('PSA_KEY_TYPE_') and parameter == 'curve': + self.key_types_from_curve[name] = name[:13] + 'IS_' + name[13:] + elif name.startswith('PSA_KEY_TYPE_') and parameter == 'group': + self.key_types_from_group[name] = name[:13] + 'IS_' + name[13:] + elif name.startswith('PSA_ECC_FAMILY_') and not parameter: + self.ecc_curves.add(name) + elif name.startswith('PSA_DH_FAMILY_') and not parameter: + self.dh_groups.add(name) + elif name.startswith('PSA_ALG_') and not parameter: + if name in ['PSA_ALG_ECDSA_BASE', + 'PSA_ALG_RSA_PKCS1V15_SIGN_BASE']: + # Ad hoc skipping of duplicate names for some numerical values + return + self.algorithms.add(name) + self.record_algorithm_subtype(name, expansion) + elif name.startswith('PSA_ALG_') and parameter == 'hash_alg': + self.algorithms_from_hash[name] = self.algorithm_tester(name) + elif name.startswith('PSA_KEY_USAGE_') and not parameter: + self.key_usage_flags.add(name) + else: + # Other macro without parameter + return + + _nonascii_re = re.compile(rb'[^\x00-\x7f]+') + _continued_line_re = re.compile(rb'\\\r?\n\Z') + def read_file(self, header_file): + for line in header_file: + m = re.search(self._continued_line_re, line) + while m: + cont = next(header_file) + line = line[:m.start(0)] + cont + m = re.search(self._continued_line_re, line) + line = re.sub(self._nonascii_re, rb'', line).decode('ascii') + self.read_line(line) + + +class InputsForTest(PSAMacroEnumerator): + # pylint: disable=too-many-instance-attributes + """Accumulate information about macros to test. +enumerate + This includes macro names as well as information about their arguments + when applicable. + """ + + def __init__(self) -> None: + super().__init__() + self.all_declared = set() #type: Set[str] + # Identifier prefixes + self.table_by_prefix = { + 'ERROR': self.statuses, + 'ALG': self.algorithms, + 'ECC_CURVE': self.ecc_curves, + 'DH_GROUP': self.dh_groups, + 'KEY_LIFETIME': self.lifetimes, + 'KEY_LOCATION': self.locations, + 'KEY_PERSISTENCE': self.persistence_levels, + 'KEY_TYPE': self.key_types, + 'KEY_USAGE': self.key_usage_flags, + } #type: Dict[str, Set[str]] + # Test functions + self.table_by_test_function = { + # Any function ending in _algorithm also gets added to + # self.algorithms. + 'key_type': [self.key_types], + 'block_cipher_key_type': [self.key_types], + 'stream_cipher_key_type': [self.key_types], + 'ecc_key_family': [self.ecc_curves], + 'ecc_key_types': [self.ecc_curves], + 'dh_key_family': [self.dh_groups], + 'dh_key_types': [self.dh_groups], + 'hash_algorithm': [self.hash_algorithms], + 'mac_algorithm': [self.mac_algorithms], + 'cipher_algorithm': [], + 'hmac_algorithm': [self.mac_algorithms, self.sign_algorithms], + 'aead_algorithm': [self.aead_algorithms], + 'key_derivation_algorithm': [self.kdf_algorithms], + 'key_agreement_algorithm': [self.ka_algorithms], + 'asymmetric_signature_algorithm': [self.sign_algorithms], + 'asymmetric_signature_wildcard': [self.algorithms], + 'asymmetric_encryption_algorithm': [], + 'pake_algorithm': [self.pake_algorithms], + 'other_algorithm': [], + 'lifetime': [self.lifetimes], + } #type: Dict[str, List[Set[str]]] + mac_lengths = [str(n) for n in [ + 1, # minimum expressible + 4, # minimum allowed by policy + 13, # an odd size in a plausible range + 14, # an even non-power-of-two size in a plausible range + 16, # same as full size for at least one algorithm + 63, # maximum expressible + ]] + self.arguments_for['mac_length'] += mac_lengths + self.arguments_for['min_mac_length'] += mac_lengths + aead_lengths = [str(n) for n in [ + 1, # minimum expressible + 4, # minimum allowed by policy + 13, # an odd size in a plausible range + 14, # an even non-power-of-two size in a plausible range + 16, # same as full size for at least one algorithm + 63, # maximum expressible + ]] + self.arguments_for['tag_length'] += aead_lengths + self.arguments_for['min_tag_length'] += aead_lengths + + def add_numerical_values(self) -> None: + """Add numerical values that are not supported to the known identifiers.""" + # Sets of names per type + self.algorithms.add('0xffffffff') + self.ecc_curves.add('0xff') + self.dh_groups.add('0xff') + self.key_types.add('0xffff') + self.key_usage_flags.add('0x80000000') + + # Hard-coded values for unknown algorithms + # + # These have to have values that are correct for their respective + # PSA_ALG_IS_xxx macros, but are also not currently assigned and are + # not likely to be assigned in the near future. + self.hash_algorithms.add('0x020000fe') # 0x020000ff is PSA_ALG_ANY_HASH + self.mac_algorithms.add('0x03007fff') + self.ka_algorithms.add('0x09fc0000') + self.kdf_algorithms.add('0x080000ff') + self.pake_algorithms.add('0x0a0000ff') + # For AEAD algorithms, the only variability is over the tag length, + # and this only applies to known algorithms, so don't test an + # unknown algorithm. + + def get_names(self, type_word: str) -> Set[str]: + """Return the set of known names of values of the given type.""" + return { + 'status': self.statuses, + 'algorithm': self.algorithms, + 'ecc_curve': self.ecc_curves, + 'dh_group': self.dh_groups, + 'key_type': self.key_types, + 'key_usage': self.key_usage_flags, + }[type_word] + + # Regex for interesting header lines. + # Groups: 1=macro name, 2=type, 3=argument list (optional). + _header_line_re = \ + re.compile(r'#define +' + + r'(PSA_((?:(?:DH|ECC|KEY)_)?[A-Z]+)_\w+)' + + r'(?:\(([^\n()]*)\))?') + # Regex of macro names to exclude. + _excluded_name_re = re.compile(r'_(?:GET|IS|OF)_|_(?:BASE|FLAG|MASK)\Z') + # Additional excluded macros. + _excluded_names = set([ + # Macros that provide an alternative way to build the same + # algorithm as another macro. + 'PSA_ALG_AEAD_WITH_DEFAULT_LENGTH_TAG', + 'PSA_ALG_FULL_LENGTH_MAC', + # Auxiliary macro whose name doesn't fit the usual patterns for + # auxiliary macros. + 'PSA_ALG_AEAD_WITH_DEFAULT_LENGTH_TAG_CASE', + ]) + def parse_header_line(self, line: str) -> None: + """Parse a C header line, looking for "#define PSA_xxx".""" + m = re.match(self._header_line_re, line) + if not m: + return + name = m.group(1) + self.all_declared.add(name) + if re.search(self._excluded_name_re, name) or \ + name in self._excluded_names or \ + self.is_internal_name(name): + return + dest = self.table_by_prefix.get(m.group(2)) + if dest is None: + return + dest.add(name) + if m.group(3): + self.argspecs[name] = self._argument_split(m.group(3)) + + _nonascii_re = re.compile(rb'[^\x00-\x7f]+') #type: Pattern + def parse_header(self, filename: str) -> None: + """Parse a C header file, looking for "#define PSA_xxx".""" + with read_file_lines(filename, binary=True) as lines: + for line in lines: + line = re.sub(self._nonascii_re, rb'', line).decode('ascii') + self.parse_header_line(line) + + _macro_identifier_re = re.compile(r'[A-Z]\w+') + def generate_undeclared_names(self, expr: str) -> Iterable[str]: + for name in re.findall(self._macro_identifier_re, expr): + if name not in self.all_declared: + yield name + + def accept_test_case_line(self, function: str, argument: str) -> bool: + #pylint: disable=unused-argument + undeclared = list(self.generate_undeclared_names(argument)) + if undeclared: + raise Exception('Undeclared names in test case', undeclared) + return True + + @staticmethod + def normalize_argument(argument: str) -> str: + """Normalize whitespace in the given C expression. + + The result uses the same whitespace as + ` PSAMacroEnumerator.distribute_arguments`. + """ + return re.sub(r',', r', ', re.sub(r' +', r'', argument)) + + def add_test_case_line(self, function: str, argument: str) -> None: + """Parse a test case data line, looking for algorithm metadata tests.""" + sets = [] + if function.endswith('_algorithm'): + sets.append(self.algorithms) + if function == 'key_agreement_algorithm' and \ + argument.startswith('PSA_ALG_KEY_AGREEMENT('): + # We only want *raw* key agreement algorithms as such, so + # exclude ones that are already chained with a KDF. + # Keep the expression as one to test as an algorithm. + function = 'other_algorithm' + sets += self.table_by_test_function[function] + if self.accept_test_case_line(function, argument): + for s in sets: + s.add(self.normalize_argument(argument)) + + # Regex matching a *.data line containing a test function call and + # its arguments. The actual definition is partly positional, but this + # regex is good enough in practice. + _test_case_line_re = re.compile(r'(?!depends_on:)(\w+):([^\n :][^:\n]*)') + def parse_test_cases(self, filename: str) -> None: + """Parse a test case file (*.data), looking for algorithm metadata tests.""" + with read_file_lines(filename) as lines: + for line in lines: + m = re.match(self._test_case_line_re, line) + if m: + self.add_test_case_line(m.group(1), m.group(2)) diff --git a/scripts/framework_dev/psa_information.py b/scripts/framework_dev/psa_information.py new file mode 100644 index 000000000..60803864f --- /dev/null +++ b/scripts/framework_dev/psa_information.py @@ -0,0 +1,161 @@ +"""Collect information about PSA cryptographic mechanisms. +""" + +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +import re +from collections import OrderedDict +from typing import FrozenSet, List, Optional + +from . import macro_collector + + +class Information: + """Gather information about PSA constructors.""" + + def __init__(self) -> None: + self.constructors = self.read_psa_interface() + + @staticmethod + def remove_unwanted_macros( + constructors: macro_collector.PSAMacroEnumerator + ) -> None: + # Mbed TLS does not support finite-field DSA. + # Don't attempt to generate any related test case. + constructors.key_types.discard('PSA_KEY_TYPE_DSA_KEY_PAIR') + constructors.key_types.discard('PSA_KEY_TYPE_DSA_PUBLIC_KEY') + + def read_psa_interface(self) -> macro_collector.PSAMacroEnumerator: + """Return the list of known key types, algorithms, etc.""" + constructors = macro_collector.InputsForTest() + header_file_names = ['include/psa/crypto_values.h', + 'include/psa/crypto_extra.h'] + test_suites = ['tests/suites/test_suite_psa_crypto_metadata.data'] + for header_file_name in header_file_names: + constructors.parse_header(header_file_name) + for test_cases in test_suites: + constructors.parse_test_cases(test_cases) + self.remove_unwanted_macros(constructors) + constructors.gather_arguments() + return constructors + + +def psa_want_symbol(name: str) -> str: + """Return the PSA_WANT_xxx symbol associated with a PSA crypto feature.""" + if name.startswith('PSA_'): + return name[:4] + 'WANT_' + name[4:] + else: + raise ValueError('Unable to determine the PSA_WANT_ symbol for ' + name) + +def finish_family_dependency(dep: str, bits: int) -> str: + """Finish dep if it's a family dependency symbol prefix. + + A family dependency symbol prefix is a PSA_WANT_ symbol that needs to be + qualified by the key size. If dep is such a symbol, finish it by adjusting + the prefix and appending the key size. Other symbols are left unchanged. + """ + return re.sub(r'_FAMILY_(.*)', r'_\1_' + str(bits), dep) + +def finish_family_dependencies(dependencies: List[str], bits: int) -> List[str]: + """Finish any family dependency symbol prefixes. + + Apply `finish_family_dependency` to each element of `dependencies`. + """ + return [finish_family_dependency(dep, bits) for dep in dependencies] + +SYMBOLS_WITHOUT_DEPENDENCY = frozenset([ + 'PSA_ALG_AEAD_WITH_AT_LEAST_THIS_LENGTH_TAG', # modifier, only in policies + 'PSA_ALG_AEAD_WITH_SHORTENED_TAG', # modifier + 'PSA_ALG_ANY_HASH', # only in policies + 'PSA_ALG_AT_LEAST_THIS_LENGTH_MAC', # modifier, only in policies + 'PSA_ALG_KEY_AGREEMENT', # chaining + 'PSA_ALG_TRUNCATED_MAC', # modifier +]) +def automatic_dependencies(*expressions: str) -> List[str]: + """Infer dependencies of a test case by looking for PSA_xxx symbols. + + The arguments are strings which should be C expressions. Do not use + string literals or comments as this function is not smart enough to + skip them. + """ + used = set() + for expr in expressions: + used.update(re.findall(r'PSA_(?:ALG|ECC_FAMILY|DH_FAMILY|KEY_TYPE)_\w+', expr)) + used.difference_update(SYMBOLS_WITHOUT_DEPENDENCY) + return sorted(psa_want_symbol(name) for name in used) + +# Define set of regular expressions and dependencies to optionally append +# extra dependencies for test case based on key description. + +# Skip AES test cases which require 192- or 256-bit key +# if MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH defined +AES_128BIT_ONLY_DEP_REGEX = re.compile(r'AES\s(192|256)') +AES_128BIT_ONLY_DEP = ['!MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH'] +# Skip AES/ARIA/CAMELLIA test cases which require decrypt operation in ECB mode +# if MBEDTLS_BLOCK_CIPHER_NO_DECRYPT enabled. +ECB_NO_PADDING_DEP_REGEX = re.compile(r'(AES|ARIA|CAMELLIA).*ECB_NO_PADDING') +ECB_NO_PADDING_DEP = ['!MBEDTLS_BLOCK_CIPHER_NO_DECRYPT'] + +DEPENDENCY_FROM_DESCRIPTION = OrderedDict() +DEPENDENCY_FROM_DESCRIPTION[AES_128BIT_ONLY_DEP_REGEX] = AES_128BIT_ONLY_DEP +DEPENDENCY_FROM_DESCRIPTION[ECB_NO_PADDING_DEP_REGEX] = ECB_NO_PADDING_DEP +def generate_deps_from_description( + description: str + ) -> List[str]: + """Return additional dependencies based on test case description and REGEX. + """ + dep_list = [] + for regex, deps in DEPENDENCY_FROM_DESCRIPTION.items(): + if re.search(regex, description): + dep_list += deps + + return dep_list + +# A temporary hack: at the time of writing, not all dependency symbols +# are implemented yet. Skip test cases for which the dependency symbols are +# not available. Once all dependency symbols are available, this hack must +# be removed so that a bug in the dependency symbols properly leads to a test +# failure. +def read_implemented_dependencies(filename: str) -> FrozenSet[str]: + return frozenset(symbol + for line in open(filename) + for symbol in re.findall(r'\bPSA_WANT_\w+\b', line)) +_implemented_dependencies = None #type: Optional[FrozenSet[str]] #pylint: disable=invalid-name +def hack_dependencies_not_implemented(dependencies: List[str]) -> None: + global _implemented_dependencies #pylint: disable=global-statement,invalid-name + if _implemented_dependencies is None: + _implemented_dependencies = \ + read_implemented_dependencies('include/psa/crypto_config.h') + if not all((dep.lstrip('!') in _implemented_dependencies or + not dep.lstrip('!').startswith('PSA_WANT')) + for dep in dependencies): + dependencies.append('DEPENDENCY_NOT_IMPLEMENTED_YET') + +def tweak_key_pair_dependency(dep: str, usage: str): + """ + This helper function add the proper suffix to PSA_WANT_KEY_TYPE_xxx_KEY_PAIR + symbols according to the required usage. + """ + ret_list = list() + if dep.endswith('KEY_PAIR'): + if usage == "BASIC": + # BASIC automatically includes IMPORT and EXPORT for test purposes (see + # config_psa.h). + ret_list.append(re.sub(r'KEY_PAIR', r'KEY_PAIR_BASIC', dep)) + ret_list.append(re.sub(r'KEY_PAIR', r'KEY_PAIR_IMPORT', dep)) + ret_list.append(re.sub(r'KEY_PAIR', r'KEY_PAIR_EXPORT', dep)) + elif usage == "GENERATE": + ret_list.append(re.sub(r'KEY_PAIR', r'KEY_PAIR_GENERATE', dep)) + else: + # No replacement to do in this case + ret_list.append(dep) + return ret_list + +def fix_key_pair_dependencies(dep_list: List[str], usage: str): + new_list = [new_deps + for dep in dep_list + for new_deps in tweak_key_pair_dependency(dep, usage)] + + return new_list diff --git a/scripts/framework_dev/psa_storage.py b/scripts/framework_dev/psa_storage.py new file mode 100644 index 000000000..b1fc37710 --- /dev/null +++ b/scripts/framework_dev/psa_storage.py @@ -0,0 +1,206 @@ +"""Knowledge about the PSA key store as implemented in Mbed TLS. + +Note that if you need to make a change that affects how keys are +stored, this may indicate that the key store is changing in a +backward-incompatible way! Think carefully about backward compatibility +before changing how test data is constructed or validated. +""" + +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +import re +import struct +from typing import Dict, List, Optional, Set, Union +import unittest + +from . import c_build_helper +from . import build_tree + + +class Expr: + """Representation of a C expression with a known or knowable numerical value.""" + + def __init__(self, content: Union[int, str]): + if isinstance(content, int): + digits = 8 if content > 0xffff else 4 + self.string = '{0:#0{1}x}'.format(content, digits + 2) + self.value_if_known = content #type: Optional[int] + else: + self.string = content + self.unknown_values.add(self.normalize(content)) + self.value_if_known = None + + value_cache = {} #type: Dict[str, int] + """Cache of known values of expressions.""" + + unknown_values = set() #type: Set[str] + """Expressions whose values are not present in `value_cache` yet.""" + + def update_cache(self) -> None: + """Update `value_cache` for expressions registered in `unknown_values`.""" + expressions = sorted(self.unknown_values) + includes = ['include'] + if build_tree.looks_like_tf_psa_crypto_root('.'): + includes.append('drivers/builtin/include') + values = c_build_helper.get_c_expression_values( + 'unsigned long', '%lu', + expressions, + header=""" + #include + """, + include_path=includes) #type: List[str] + for e, v in zip(expressions, values): + self.value_cache[e] = int(v, 0) + self.unknown_values.clear() + + @staticmethod + def normalize(string: str) -> str: + """Put the given C expression in a canonical form. + + This function is only intended to give correct results for the + relatively simple kind of C expression typically used with this + module. + """ + return re.sub(r'\s+', r'', string) + + def value(self) -> int: + """Return the numerical value of the expression.""" + if self.value_if_known is None: + if re.match(r'([0-9]+|0x[0-9a-f]+)\Z', self.string, re.I): + return int(self.string, 0) + normalized = self.normalize(self.string) + if normalized not in self.value_cache: + self.update_cache() + self.value_if_known = self.value_cache[normalized] + return self.value_if_known + +Exprable = Union[str, int, Expr] +"""Something that can be converted to a C expression with a known numerical value.""" + +def as_expr(thing: Exprable) -> Expr: + """Return an `Expr` object for `thing`. + + If `thing` is already an `Expr` object, return it. Otherwise build a new + `Expr` object from `thing`. `thing` can be an integer or a string that + contains a C expression. + """ + if isinstance(thing, Expr): + return thing + else: + return Expr(thing) + + +class Key: + """Representation of a PSA crypto key object and its storage encoding. + """ + + LATEST_VERSION = 0 + """The latest version of the storage format.""" + + def __init__(self, *, + version: Optional[int] = None, + id: Optional[int] = None, #pylint: disable=redefined-builtin + lifetime: Exprable = 'PSA_KEY_LIFETIME_PERSISTENT', + type: Exprable, #pylint: disable=redefined-builtin + bits: int, + usage: Exprable, alg: Exprable, alg2: Exprable, + material: bytes #pylint: disable=used-before-assignment + ) -> None: + self.version = self.LATEST_VERSION if version is None else version + self.id = id #pylint: disable=invalid-name #type: Optional[int] + self.lifetime = as_expr(lifetime) #type: Expr + self.type = as_expr(type) #type: Expr + self.bits = bits #type: int + self.usage = as_expr(usage) #type: Expr + self.alg = as_expr(alg) #type: Expr + self.alg2 = as_expr(alg2) #type: Expr + self.material = material #type: bytes + + MAGIC = b'PSA\000KEY\000' + + @staticmethod + def pack( + fmt: str, + *args: Union[int, Expr] + ) -> bytes: #pylint: disable=used-before-assignment + """Pack the given arguments into a byte string according to the given format. + + This function is similar to `struct.pack`, but with the following differences: + * All integer values are encoded with standard sizes and in + little-endian representation. `fmt` must not include an endianness + prefix. + * Arguments can be `Expr` objects instead of integers. + * Only integer-valued elements are supported. + """ + return struct.pack('<' + fmt, # little-endian, standard sizes + *[arg.value() if isinstance(arg, Expr) else arg + for arg in args]) + + def bytes(self) -> bytes: + """Return the representation of the key in storage as a byte array. + + This is the content of the PSA storage file. When PSA storage is + implemented over stdio files, this does not include any wrapping made + by the PSA-storage-over-stdio-file implementation. + + Note that if you need to make a change in this function, + this may indicate that the key store is changing in a + backward-incompatible way! Think carefully about backward + compatibility before making any change here. + """ + header = self.MAGIC + self.pack('L', self.version) + if self.version == 0: + attributes = self.pack('LHHLLL', + self.lifetime, self.type, self.bits, + self.usage, self.alg, self.alg2) + material = self.pack('L', len(self.material)) + self.material + else: + raise NotImplementedError + return header + attributes + material + + def hex(self) -> str: + """Return the representation of the key as a hexadecimal string. + + This is the hexadecimal representation of `self.bytes`. + """ + return self.bytes().hex() + + def location_value(self) -> int: + """The numerical value of the location encoded in the key's lifetime.""" + return self.lifetime.value() >> 8 + + +class TestKey(unittest.TestCase): + # pylint: disable=line-too-long + """A few smoke tests for the functionality of the `Key` class.""" + + def test_numerical(self): + key = Key(version=0, + id=1, lifetime=0x00000001, + type=0x2400, bits=128, + usage=0x00000300, alg=0x05500200, alg2=0x04c01000, + material=b'@ABCDEFGHIJKLMNO') + expected_hex = '505341004b45590000000000010000000024800000030000000250050010c00410000000404142434445464748494a4b4c4d4e4f' + self.assertEqual(key.bytes(), bytes.fromhex(expected_hex)) + self.assertEqual(key.hex(), expected_hex) + + def test_names(self): + length = 0xfff8 // 8 # PSA_MAX_KEY_BITS in bytes + key = Key(version=0, + id=1, lifetime='PSA_KEY_LIFETIME_PERSISTENT', + type='PSA_KEY_TYPE_RAW_DATA', bits=length*8, + usage=0, alg=0, alg2=0, + material=b'\x00' * length) + expected_hex = '505341004b45590000000000010000000110f8ff000000000000000000000000ff1f0000' + '00' * length + self.assertEqual(key.bytes(), bytes.fromhex(expected_hex)) + self.assertEqual(key.hex(), expected_hex) + + def test_defaults(self): + key = Key(type=0x1001, bits=8, + usage=0, alg=0, alg2=0, + material=b'\x2a') + expected_hex = '505341004b455900000000000100000001100800000000000000000000000000010000002a' + self.assertEqual(key.bytes(), bytes.fromhex(expected_hex)) + self.assertEqual(key.hex(), expected_hex) diff --git a/scripts/framework_dev/test_case.py b/scripts/framework_dev/test_case.py new file mode 100644 index 000000000..6ed5e849d --- /dev/null +++ b/scripts/framework_dev/test_case.py @@ -0,0 +1,91 @@ +"""Library for constructing an Mbed TLS test case. +""" + +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +import binascii +import os +import sys +from typing import Iterable, List, Optional + +from . import typing_util + +def hex_string(data: bytes) -> str: + return '"' + binascii.hexlify(data).decode('ascii') + '"' + + +class MissingDescription(Exception): + pass + +class MissingFunction(Exception): + pass + +class TestCase: + """An Mbed TLS test case.""" + + def __init__(self, description: Optional[str] = None): + self.comments = [] #type: List[str] + self.description = description #type: Optional[str] + self.dependencies = [] #type: List[str] + self.function = None #type: Optional[str] + self.arguments = [] #type: List[str] + + def add_comment(self, *lines: str) -> None: + self.comments += lines + + def set_description(self, description: str) -> None: + self.description = description + + def set_dependencies(self, dependencies: List[str]) -> None: + self.dependencies = dependencies + + def set_function(self, function: str) -> None: + self.function = function + + def set_arguments(self, arguments: List[str]) -> None: + self.arguments = arguments + + def check_completeness(self) -> None: + if self.description is None: + raise MissingDescription + if self.function is None: + raise MissingFunction + + def write(self, out: typing_util.Writable) -> None: + """Write the .data file paragraph for this test case. + + The output starts and ends with a single newline character. If the + surrounding code writes lines (consisting of non-newline characters + and a final newline), you will end up with a blank line before, but + not after the test case. + """ + self.check_completeness() + assert self.description is not None # guide mypy + assert self.function is not None # guide mypy + out.write('\n') + for line in self.comments: + out.write('# ' + line + '\n') + out.write(self.description + '\n') + if self.dependencies: + out.write('depends_on:' + ':'.join(self.dependencies) + '\n') + out.write(self.function + ':' + ':'.join(self.arguments) + '\n') + +def write_data_file(filename: str, + test_cases: Iterable[TestCase], + caller: Optional[str] = None) -> None: + """Write the test cases to the specified file. + + If the file already exists, it is overwritten. + """ + if caller is None: + caller = os.path.basename(sys.argv[0]) + tempfile = filename + '.new' + with open(tempfile, 'w') as out: + out.write('# Automatically generated by {}. Do not edit!\n' + .format(caller)) + for tc in test_cases: + tc.write(out) + out.write('\n# End of automatically generated file.\n') + os.replace(tempfile, filename) diff --git a/scripts/framework_dev/test_data_generation.py b/scripts/framework_dev/test_data_generation.py new file mode 100644 index 000000000..a84f7dd2f --- /dev/null +++ b/scripts/framework_dev/test_data_generation.py @@ -0,0 +1,224 @@ +"""Common code for test data generation. + +This module defines classes that are of general use to automatically +generate .data files for unit tests, as well as a main function. + +These are used both by generate_psa_tests.py and generate_bignum_tests.py. +""" + +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +import argparse +import os +import posixpath +import re +import inspect + +from abc import ABCMeta, abstractmethod +from typing import Callable, Dict, Iterable, Iterator, List, Type, TypeVar + +from . import build_tree +from . import test_case + +T = TypeVar('T') #pylint: disable=invalid-name + + +class BaseTest(metaclass=ABCMeta): + """Base class for test case generation. + + Attributes: + count: Counter for test cases from this class. + case_description: Short description of the test case. This may be + automatically generated using the class, or manually set. + dependencies: A list of dependencies required for the test case. + show_test_count: Toggle for inclusion of `count` in the test description. + test_function: Test function which the class generates cases for. + test_name: A common name or description of the test function. This can + be `test_function`, a clearer equivalent, or a short summary of the + test function's purpose. + """ + count = 0 + case_description = "" + dependencies = [] # type: List[str] + show_test_count = True + test_function = "" + test_name = "" + + def __new__(cls, *args, **kwargs): + # pylint: disable=unused-argument + cls.count += 1 + return super().__new__(cls) + + @abstractmethod + def arguments(self) -> List[str]: + """Get the list of arguments for the test case. + + Override this method to provide the list of arguments required for + the `test_function`. + + Returns: + List of arguments required for the test function. + """ + raise NotImplementedError + + def description(self) -> str: + """Create a test case description. + + Creates a description of the test case, including a name for the test + function, an optional case count, and a description of the specific + test case. This should inform a reader what is being tested, and + provide context for the test case. + + Returns: + Description for the test case. + """ + if self.show_test_count: + return "{} #{} {}".format( + self.test_name, self.count, self.case_description + ).strip() + else: + return "{} {}".format(self.test_name, self.case_description).strip() + + + def create_test_case(self) -> test_case.TestCase: + """Generate TestCase from the instance.""" + tc = test_case.TestCase() + tc.set_description(self.description()) + tc.set_function(self.test_function) + tc.set_arguments(self.arguments()) + tc.set_dependencies(self.dependencies) + + return tc + + @classmethod + @abstractmethod + def generate_function_tests(cls) -> Iterator[test_case.TestCase]: + """Generate test cases for the class test function. + + This will be called in classes where `test_function` is set. + Implementations should yield TestCase objects, by creating instances + of the class with appropriate input data, and then calling + `create_test_case()` on each. + """ + raise NotImplementedError + + +class BaseTarget: + #pylint: disable=too-few-public-methods + """Base target for test case generation. + + Child classes of this class represent an output file, and can be referred + to as file targets. These indicate where test cases will be written to for + all subclasses of the file target, which is set by `target_basename`. + + Attributes: + target_basename: Basename of file to write generated tests to. This + should be specified in a child class of BaseTarget. + """ + target_basename = "" + + @classmethod + def generate_tests(cls) -> Iterator[test_case.TestCase]: + """Generate test cases for the class and its subclasses. + + In classes with `test_function` set, `generate_function_tests()` is + called to generate test cases first. + + In all classes, this method will iterate over its subclasses, and + yield from `generate_tests()` in each. Calling this method on a class X + will yield test cases from all classes derived from X. + """ + if issubclass(cls, BaseTest) and not inspect.isabstract(cls): + #pylint: disable=no-member + yield from cls.generate_function_tests() + for subclass in sorted(cls.__subclasses__(), key=lambda c: c.__name__): + yield from subclass.generate_tests() + + +class TestGenerator: + """Generate test cases and write to data files.""" + def __init__(self, options) -> None: + self.test_suite_directory = options.directory + # Update `targets` with an entry for each child class of BaseTarget. + # Each entry represents a file generated by the BaseTarget framework, + # and enables generating the .data files using the CLI. + self.targets.update({ + subclass.target_basename: subclass.generate_tests + for subclass in BaseTarget.__subclasses__() + if subclass.target_basename + }) + + def filename_for(self, basename: str) -> str: + """The location of the data file with the specified base name.""" + return posixpath.join(self.test_suite_directory, basename + '.data') + + def write_test_data_file(self, basename: str, + test_cases: Iterable[test_case.TestCase]) -> None: + """Write the test cases to a .data file. + + The output file is ``basename + '.data'`` in the test suite directory. + """ + filename = self.filename_for(basename) + test_case.write_data_file(filename, test_cases) + + # Note that targets whose names contain 'test_format' have their content + # validated by `abi_check.py`. + targets = {} # type: Dict[str, Callable[..., Iterable[test_case.TestCase]]] + + def generate_target(self, name: str, *target_args) -> None: + """Generate cases and write to data file for a target. + + For target callables which require arguments, override this function + and pass these arguments using super() (see PSATestGenerator). + """ + test_cases = self.targets[name](*target_args) + self.write_test_data_file(name, test_cases) + +def main(args, description: str, generator_class: Type[TestGenerator] = TestGenerator): + """Command line entry point.""" + parser = argparse.ArgumentParser(description=description) + parser.add_argument('--list', action='store_true', + help='List available targets and exit') + parser.add_argument('--list-for-cmake', action='store_true', + help='Print \';\'-separated list of available targets and exit') + # If specified explicitly, this option may be a path relative to the + # current directory when the script is invoked. The default value + # is relative to the mbedtls root, which we don't know yet. So we + # can't set a string as the default value here. + parser.add_argument('--directory', metavar='DIR', + help='Output directory (default: tests/suites)') + parser.add_argument('targets', nargs='*', metavar='TARGET', + help='Target file to generate (default: all; "-": none)') + options = parser.parse_args(args) + + # Change to the mbedtls root, to keep things simple. But first, adjust + # command line options that might be relative paths. + if options.directory is None: + options.directory = 'tests/suites' + else: + options.directory = os.path.abspath(options.directory) + build_tree.chdir_to_root() + + generator = generator_class(options) + if options.list: + for name in sorted(generator.targets): + print(generator.filename_for(name)) + return + # List in a cmake list format (i.e. ';'-separated) + if options.list_for_cmake: + print(';'.join(generator.filename_for(name) + for name in sorted(generator.targets)), end='') + return + if options.targets: + # Allow "-" as a special case so you can run + # ``generate_xxx_tests.py - $targets`` and it works uniformly whether + # ``$targets`` is empty or not. + options.targets = [os.path.basename(re.sub(r'\.data\Z', r'', target)) + for target in options.targets + if target != '-'] + else: + options.targets = sorted(generator.targets) + for target in options.targets: + generator.generate_target(target) diff --git a/scripts/framework_dev/typing_util.py b/scripts/framework_dev/typing_util.py new file mode 100644 index 000000000..2ec448d00 --- /dev/null +++ b/scripts/framework_dev/typing_util.py @@ -0,0 +1,28 @@ +"""Auxiliary definitions used in type annotations. +""" + +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +# + +from typing import Any + +# The typing_extensions module is necessary for type annotations that are +# checked with mypy. It is only used for type annotations or to define +# things that are themselves only used for type annotations. It is not +# available on a default Python installation. Therefore, try loading +# what we need from it for the sake of mypy (which depends on, or comes +# with, typing_extensions), and if not define substitutes that lack the +# static type information but are good enough at runtime. +try: + from typing_extensions import Protocol #pylint: disable=import-error +except ImportError: + class Protocol: #type: ignore + #pylint: disable=too-few-public-methods + pass + +class Writable(Protocol): + """Abstract class for typing hints.""" + # pylint: disable=no-self-use,too-few-public-methods,unused-argument + def write(self, text: str) -> Any: + ... diff --git a/scripts/generate_test_code.py b/scripts/generate_test_code.py new file mode 100755 index 000000000..5f711bfb1 --- /dev/null +++ b/scripts/generate_test_code.py @@ -0,0 +1,1277 @@ +#!/usr/bin/env python3 +# Test suites code generator. +# +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +""" +This script is a key part of Mbed TLS test suites framework. For +understanding the script it is important to understand the +framework. This doc string contains a summary of the framework +and explains the function of this script. + +Mbed TLS test suites: +===================== +Scope: +------ +The test suites focus on unit testing the crypto primitives and also +include x509 parser tests. Tests can be added to test any Mbed TLS +module. However, the framework is not capable of testing SSL +protocol, since that requires full stack execution and that is best +tested as part of the system test. + +Test case definition: +--------------------- +Tests are defined in a test_suite_[.].data +file. A test definition contains: + test name + optional build macro dependencies + test function + test parameters + +Test dependencies are build macros that can be specified to indicate +the build config in which the test is valid. For example if a test +depends on a feature that is only enabled by defining a macro. Then +that macro should be specified as a dependency of the test. + +Test function is the function that implements the test steps. This +function is specified for different tests that perform same steps +with different parameters. + +Test parameters are specified in string form separated by ':'. +Parameters can be of type string, binary data specified as hex +string and integer constants specified as integer, macro or +as an expression. Following is an example test definition: + + AES 128 GCM Encrypt and decrypt 8 bytes + depends_on:MBEDTLS_AES_C:MBEDTLS_GCM_C + enc_dec_buf:MBEDTLS_CIPHER_AES_128_GCM:"AES-128-GCM":128:8:-1 + +Test functions: +--------------- +Test functions are coded in C in test_suite_.function files. +Functions file is itself not compilable and contains special +format patterns to specify test suite dependencies, start and end +of functions and function dependencies. Check any existing functions +file for example. + +Execution: +---------- +Tests are executed in 3 steps: +- Generating test_suite_[.].c file + for each corresponding .data file. +- Building each source file into executables. +- Running each executable and printing report. + +Generating C test source requires more than just the test functions. +Following extras are required: +- Process main() +- Reading .data file and dispatching test cases. +- Platform specific test case execution +- Dependency checking +- Integer expression evaluation +- Test function dispatch + +Build dependencies and integer expressions (in the test parameters) +are specified as strings in the .data file. Their run time value is +not known at the generation stage. Hence, they need to be translated +into run time evaluations. This script generates the run time checks +for dependencies and integer expressions. + +Similarly, function names have to be translated into function calls. +This script also generates code for function dispatch. + +The extra code mentioned here is either generated by this script +or it comes from the input files: helpers file, platform file and +the template file. + +Helper file: +------------ +Helpers file contains common helper/utility functions and data. + +Platform file: +-------------- +Platform file contains platform specific setup code and test case +dispatch code. For example, host_test.function reads test data +file from host's file system and dispatches tests. + +Template file: +--------- +Template file for example main_test.function is a template C file in +which generated code and code from input files is substituted to +generate a compilable C file. It also contains skeleton functions for +dependency checks, expression evaluation and function dispatch. These +functions are populated with checks and return codes by this script. + +Template file contains "replacement" fields that are formatted +strings processed by Python string.Template.substitute() method. + +This script: +============ +Core function of this script is to fill the template file with +code that is generated or read from helpers and platform files. + +This script replaces following fields in the template and generates +the test source file: + +__MBEDTLS_TEST_TEMPLATE__TEST_COMMON_HELPERS + All common code from helpers.function + is substituted here. +__MBEDTLS_TEST_TEMPLATE__FUNCTIONS_CODE + Test functions are substituted here + from the input test_suit_xyz.function + file. C preprocessor checks are generated + for the build dependencies specified + in the input file. This script also + generates wrappers for the test + functions with code to expand the + string parameters read from the data + file. +__MBEDTLS_TEST_TEMPLATE__EXPRESSION_CODE + This script enumerates the + expressions in the .data file and + generates code to handle enumerated + expression Ids and return the values. +__MBEDTLS_TEST_TEMPLATE__DEP_CHECK_CODE + This script enumerates all + build dependencies and generate + code to handle enumerated build + dependency Id and return status: if + the dependency is defined or not. +__MBEDTLS_TEST_TEMPLATE__DISPATCH_CODE + This script enumerates the functions + specified in the input test data file + and generates the initializer for the + function table in the template + file. +__MBEDTLS_TEST_TEMPLATE__PLATFORM_CODE + Platform specific setup and test + dispatch code. + +""" + + +import os +import re +import sys +import string +import argparse + + +# Types recognized as signed integer arguments in test functions. +SIGNED_INTEGER_TYPES = frozenset([ + 'char', + 'short', + 'short int', + 'int', + 'int8_t', + 'int16_t', + 'int32_t', + 'int64_t', + 'intmax_t', + 'long', + 'long int', + 'long long int', + 'mbedtls_mpi_sint', + 'psa_status_t', +]) +# Types recognized as string arguments in test functions. +STRING_TYPES = frozenset(['char*', 'const char*', 'char const*']) +# Types recognized as hex data arguments in test functions. +DATA_TYPES = frozenset(['data_t*', 'const data_t*', 'data_t const*']) + +BEGIN_HEADER_REGEX = r'/\*\s*BEGIN_HEADER\s*\*/' +END_HEADER_REGEX = r'/\*\s*END_HEADER\s*\*/' + +BEGIN_SUITE_HELPERS_REGEX = r'/\*\s*BEGIN_SUITE_HELPERS\s*\*/' +END_SUITE_HELPERS_REGEX = r'/\*\s*END_SUITE_HELPERS\s*\*/' + +BEGIN_DEP_REGEX = r'BEGIN_DEPENDENCIES' +END_DEP_REGEX = r'END_DEPENDENCIES' + +BEGIN_CASE_REGEX = r'/\*\s*BEGIN_CASE\s*(?P.*?)\s*\*/' +END_CASE_REGEX = r'/\*\s*END_CASE\s*\*/' + +DEPENDENCY_REGEX = r'depends_on:(?P.*)' +C_IDENTIFIER_REGEX = r'!?[a-z_][a-z0-9_]*' +CONDITION_OPERATOR_REGEX = r'[!=]=|[<>]=?' +# forbid 0ddd which might be accidentally octal or accidentally decimal +CONDITION_VALUE_REGEX = r'[-+]?(0x[0-9a-f]+|0|[1-9][0-9]*)' +CONDITION_REGEX = r'({})(?:\s*({})\s*({}))?$'.format(C_IDENTIFIER_REGEX, + CONDITION_OPERATOR_REGEX, + CONDITION_VALUE_REGEX) +TEST_FUNCTION_VALIDATION_REGEX = r'\s*void\s+(?P\w+)\s*\(' +FUNCTION_ARG_LIST_END_REGEX = r'.*\)' +EXIT_LABEL_REGEX = r'^exit:' + + +class GeneratorInputError(Exception): + """ + Exception to indicate error in the input files to this script. + This includes missing patterns, test function names and other + parsing errors. + """ + pass + + +class FileWrapper: + """ + This class extends the file object with attribute line_no, + that indicates line number for the line that is read. + """ + + def __init__(self, file_name) -> None: + """ + Instantiate the file object and initialize the line number to 0. + + :param file_name: File path to open. + """ + # private mix-in file object + self._f = open(file_name, 'rb') + self._line_no = 0 + + def __iter__(self): + return self + + def __next__(self): + """ + This method makes FileWrapper iterable. + It counts the line numbers as each line is read. + + :return: Line read from file. + """ + line = self._f.__next__() + self._line_no += 1 + # Convert byte array to string with correct encoding and + # strip any whitespaces added in the decoding process. + return line.decode(sys.getdefaultencoding()).rstrip()+ '\n' + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self._f.__exit__(exc_type, exc_val, exc_tb) + + @property + def line_no(self): + """ + Property that indicates line number for the line that is read. + """ + return self._line_no + + @property + def name(self): + """ + Property that indicates name of the file that is read. + """ + return self._f.name + + +def split_dep(dep): + """ + Split NOT character '!' from dependency. Used by gen_dependencies() + + :param dep: Dependency list + :return: string tuple. Ex: ('!', MACRO) for !MACRO and ('', MACRO) for + MACRO. + """ + return ('!', dep[1:]) if dep[0] == '!' else ('', dep) + + +def gen_dependencies(dependencies): + """ + Test suite data and functions specifies compile time dependencies. + This function generates C preprocessor code from the input + dependency list. Caller uses the generated preprocessor code to + wrap dependent code. + A dependency in the input list can have a leading '!' character + to negate a condition. '!' is separated from the dependency using + function split_dep() and proper preprocessor check is generated + accordingly. + + :param dependencies: List of dependencies. + :return: if defined and endif code with macro annotations for + readability. + """ + dep_start = ''.join(['#if %sdefined(%s)\n' % (x, y) for x, y in + map(split_dep, dependencies)]) + dep_end = ''.join(['#endif /* %s */\n' % + x for x in reversed(dependencies)]) + + return dep_start, dep_end + + +def gen_dependencies_one_line(dependencies): + """ + Similar to gen_dependencies() but generates dependency checks in one line. + Useful for generating code with #else block. + + :param dependencies: List of dependencies. + :return: Preprocessor check code + """ + defines = '#if ' if dependencies else '' + defines += ' && '.join(['%sdefined(%s)' % (x, y) for x, y in map( + split_dep, dependencies)]) + return defines + + +def gen_function_wrapper(name, local_vars, args_dispatch): + """ + Creates test function wrapper code. A wrapper has the code to + unpack parameters from parameters[] array. + + :param name: Test function name + :param local_vars: Local variables declaration code + :param args_dispatch: List of dispatch arguments. + Ex: ['(char *) params[0]', '*((int *) params[1])'] + :return: Test function wrapper. + """ + # Then create the wrapper + wrapper = ''' +void {name}_wrapper( void ** params ) +{{ +{unused_params}{locals} + {name}( {args} ); +}} +'''.format(name=name, + unused_params='' if args_dispatch else ' (void)params;\n', + args=', '.join(args_dispatch), + locals=local_vars) + return wrapper + + +def gen_dispatch(name, dependencies): + """ + Test suite code template main_test.function defines a C function + array to contain test case functions. This function generates an + initializer entry for a function in that array. The entry is + composed of a compile time check for the test function + dependencies. At compile time the test function is assigned when + dependencies are met, else NULL is assigned. + + :param name: Test function name + :param dependencies: List of dependencies + :return: Dispatch code. + """ + if dependencies: + preprocessor_check = gen_dependencies_one_line(dependencies) + dispatch_code = ''' +{preprocessor_check} + {name}_wrapper, +#else + NULL, +#endif +'''.format(preprocessor_check=preprocessor_check, name=name) + else: + dispatch_code = ''' + {name}_wrapper, +'''.format(name=name) + + return dispatch_code + + +def parse_until_pattern(funcs_f, end_regex): + """ + Matches pattern end_regex to the lines read from the file object. + Returns the lines read until end pattern is matched. + + :param funcs_f: file object for .function file + :param end_regex: Pattern to stop parsing + :return: Lines read before the end pattern + """ + headers = '#line %d "%s"\n' % (funcs_f.line_no + 1, funcs_f.name) + for line in funcs_f: + if re.search(end_regex, line): + break + headers += line + else: + raise GeneratorInputError("file: %s - end pattern [%s] not found!" % + (funcs_f.name, end_regex)) + + return headers + + +def validate_dependency(dependency): + """ + Validates a C macro and raises GeneratorInputError on invalid input. + :param dependency: Input macro dependency + :return: input dependency stripped of leading & trailing white spaces. + """ + dependency = dependency.strip() + if not re.match(CONDITION_REGEX, dependency, re.I): + raise GeneratorInputError('Invalid dependency %s' % dependency) + return dependency + + +def parse_dependencies(inp_str): + """ + Parses dependencies out of inp_str, validates them and returns a + list of macros. + + :param inp_str: Input string with macros delimited by ':'. + :return: list of dependencies + """ + dependencies = list(map(validate_dependency, inp_str.split(':'))) + return dependencies + + +def parse_suite_dependencies(funcs_f): + """ + Parses test suite dependencies specified at the top of a + .function file, that starts with pattern BEGIN_DEPENDENCIES + and end with END_DEPENDENCIES. Dependencies are specified + after pattern 'depends_on:' and are delimited by ':'. + + :param funcs_f: file object for .function file + :return: List of test suite dependencies. + """ + dependencies = [] + for line in funcs_f: + match = re.search(DEPENDENCY_REGEX, line.strip()) + if match: + try: + dependencies = parse_dependencies(match.group('dependencies')) + except GeneratorInputError as error: + raise GeneratorInputError( + str(error) + " - %s:%d" % (funcs_f.name, funcs_f.line_no)) + if re.search(END_DEP_REGEX, line): + break + else: + raise GeneratorInputError("file: %s - end dependency pattern [%s]" + " not found!" % (funcs_f.name, + END_DEP_REGEX)) + + return dependencies + + +def parse_function_dependencies(line): + """ + Parses function dependencies, that are in the same line as + comment BEGIN_CASE. Dependencies are specified after pattern + 'depends_on:' and are delimited by ':'. + + :param line: Line from .function file that has dependencies. + :return: List of dependencies. + """ + dependencies = [] + match = re.search(BEGIN_CASE_REGEX, line) + dep_str = match.group('depends_on') + if dep_str: + match = re.search(DEPENDENCY_REGEX, dep_str) + if match: + dependencies += parse_dependencies(match.group('dependencies')) + + return dependencies + + +ARGUMENT_DECLARATION_REGEX = re.compile(r'(.+?) ?(?:\bconst\b)? ?(\w+)\Z', re.S) +def parse_function_argument(arg, arg_idx, args, local_vars, args_dispatch): + """ + Parses one test function's argument declaration. + + :param arg: argument declaration. + :param arg_idx: current wrapper argument index. + :param args: accumulator of arguments' internal types. + :param local_vars: accumulator of internal variable declarations. + :param args_dispatch: accumulator of argument usage expressions. + :return: the number of new wrapper arguments, + or None if the argument declaration is invalid. + """ + # Normalize whitespace + arg = arg.strip() + arg = re.sub(r'\s*\*\s*', r'*', arg) + arg = re.sub(r'\s+', r' ', arg) + # Extract name and type + m = ARGUMENT_DECLARATION_REGEX.search(arg) + if not m: + # E.g. "int x[42]" + return None + typ, _ = m.groups() + if typ in SIGNED_INTEGER_TYPES: + args.append('int') + args_dispatch.append('((mbedtls_test_argument_t *) params[%d])->sint' % arg_idx) + return 1 + if typ in STRING_TYPES: + args.append('char*') + args_dispatch.append('(char *) params[%d]' % arg_idx) + return 1 + if typ in DATA_TYPES: + args.append('hex') + # create a structure + pointer_initializer = '(uint8_t *) params[%d]' % arg_idx + len_initializer = '((mbedtls_test_argument_t *) params[%d])->len' % (arg_idx+1) + local_vars.append(' data_t data%d = {%s, %s};\n' % + (arg_idx, pointer_initializer, len_initializer)) + args_dispatch.append('&data%d' % arg_idx) + return 2 + return None + +ARGUMENT_LIST_REGEX = re.compile(r'\((.*?)\)', re.S) +def parse_function_arguments(line): + """ + Parses test function signature for validation and generates + a dispatch wrapper function that translates input test vectors + read from the data file into test function arguments. + + :param line: Line from .function file that has a function + signature. + :return: argument list, local variables for + wrapper function and argument dispatch code. + """ + # Process arguments, ex: arg1, arg2 ) + # This script assumes that the argument list is terminated by ')' + # i.e. the test functions will not have a function pointer + # argument. + m = ARGUMENT_LIST_REGEX.search(line) + arg_list = m.group(1).strip() + if arg_list in ['', 'void']: + return [], '', [] + args = [] + local_vars = [] + args_dispatch = [] + arg_idx = 0 + for arg in arg_list.split(','): + indexes = parse_function_argument(arg, arg_idx, + args, local_vars, args_dispatch) + if indexes is None: + raise ValueError("Test function arguments can only be 'int', " + "'char *' or 'data_t'\n%s" % line) + arg_idx += indexes + + return args, ''.join(local_vars), args_dispatch + + +def generate_function_code(name, code, local_vars, args_dispatch, + dependencies): + """ + Generate function code with preprocessor checks and parameter dispatch + wrapper. + + :param name: Function name + :param code: Function code + :param local_vars: Local variables for function wrapper + :param args_dispatch: Argument dispatch code + :param dependencies: Preprocessor dependencies list + :return: Final function code + """ + # Add exit label if not present + if code.find('exit:') == -1: + split_code = code.rsplit('}', 1) + if len(split_code) == 2: + code = """exit: + ; +}""".join(split_code) + + code += gen_function_wrapper(name, local_vars, args_dispatch) + preprocessor_check_start, preprocessor_check_end = \ + gen_dependencies(dependencies) + return preprocessor_check_start + code + preprocessor_check_end + +COMMENT_START_REGEX = re.compile(r'/[*/]') + +def skip_comments(line, stream): + """Remove comments in line. + + If the line contains an unfinished comment, read more lines from stream + until the line that contains the comment. + + :return: The original line with inner comments replaced by spaces. + Trailing comments and whitespace may be removed completely. + """ + pos = 0 + while True: + opening = COMMENT_START_REGEX.search(line, pos) + if not opening: + break + if line[opening.start(0) + 1] == '/': # //... + continuation = line + # Count the number of line breaks, to keep line numbers aligned + # in the output. + line_count = 1 + while continuation.endswith('\\\n'): + # This errors out if the file ends with an unfinished line + # comment. That's acceptable to not complicate the code further. + continuation = next(stream) + line_count += 1 + return line[:opening.start(0)].rstrip() + '\n' * line_count + # Parsing /*...*/, looking for the end + closing = line.find('*/', opening.end(0)) + while closing == -1: + # This errors out if the file ends with an unfinished block + # comment. That's acceptable to not complicate the code further. + line += next(stream) + closing = line.find('*/', opening.end(0)) + pos = closing + 2 + # Replace inner comment by spaces. There needs to be at least one space + # for things like 'int/*ihatespaces*/foo'. Go further and preserve the + # width of the comment and line breaks, this way positions in error + # messages remain correct. + line = (line[:opening.start(0)] + + re.sub(r'.', r' ', line[opening.start(0):pos]) + + line[pos:]) + # Strip whitespace at the end of lines (it's irrelevant to error messages). + return re.sub(r' +(\n|\Z)', r'\1', line) + +def parse_function_code(funcs_f, dependencies, suite_dependencies): + """ + Parses out a function from function file object and generates + function and dispatch code. + + :param funcs_f: file object of the functions file. + :param dependencies: List of dependencies + :param suite_dependencies: List of test suite dependencies + :return: Function name, arguments, function code and dispatch code. + """ + line_directive = '#line %d "%s"\n' % (funcs_f.line_no + 1, funcs_f.name) + code = '' + has_exit_label = False + for line in funcs_f: + # Check function signature. Function signature may be split + # across multiple lines. Here we try to find the start of + # arguments list, then remove '\n's and apply the regex to + # detect function start. + line = skip_comments(line, funcs_f) + up_to_arg_list_start = code + line[:line.find('(') + 1] + match = re.match(TEST_FUNCTION_VALIDATION_REGEX, + up_to_arg_list_start.replace('\n', ' '), re.I) + if match: + # check if we have full signature i.e. split in more lines + name = match.group('func_name') + if not re.match(FUNCTION_ARG_LIST_END_REGEX, line): + for lin in funcs_f: + line += skip_comments(lin, funcs_f) + if re.search(FUNCTION_ARG_LIST_END_REGEX, line): + break + args, local_vars, args_dispatch = parse_function_arguments( + line) + code += line + break + code += line + else: + raise GeneratorInputError("file: %s - Test functions not found!" % + funcs_f.name) + + # Prefix test function name with 'test_' + code = code.replace(name, 'test_' + name, 1) + name = 'test_' + name + + # If a test function has no arguments then add 'void' argument to + # avoid "-Wstrict-prototypes" warnings from clang + if len(args) == 0: + code = code.replace('()', '(void)', 1) + + for line in funcs_f: + if re.search(END_CASE_REGEX, line): + break + if not has_exit_label: + has_exit_label = \ + re.search(EXIT_LABEL_REGEX, line.strip()) is not None + code += line + else: + raise GeneratorInputError("file: %s - end case pattern [%s] not " + "found!" % (funcs_f.name, END_CASE_REGEX)) + + code = line_directive + code + code = generate_function_code(name, code, local_vars, args_dispatch, + dependencies) + dispatch_code = gen_dispatch(name, suite_dependencies + dependencies) + return (name, args, code, dispatch_code) + + +def parse_functions(funcs_f): + """ + Parses a test_suite_xxx.function file and returns information + for generating a C source file for the test suite. + + :param funcs_f: file object of the functions file. + :return: List of test suite dependencies, test function dispatch + code, function code and a dict with function identifiers + and arguments info. + """ + suite_helpers = '' + suite_dependencies = [] + suite_functions = '' + func_info = {} + function_idx = 0 + dispatch_code = '' + for line in funcs_f: + if re.search(BEGIN_HEADER_REGEX, line): + suite_helpers += parse_until_pattern(funcs_f, END_HEADER_REGEX) + elif re.search(BEGIN_SUITE_HELPERS_REGEX, line): + suite_helpers += parse_until_pattern(funcs_f, + END_SUITE_HELPERS_REGEX) + elif re.search(BEGIN_DEP_REGEX, line): + suite_dependencies += parse_suite_dependencies(funcs_f) + elif re.search(BEGIN_CASE_REGEX, line): + try: + dependencies = parse_function_dependencies(line) + except GeneratorInputError as error: + raise GeneratorInputError( + "%s:%d: %s" % (funcs_f.name, funcs_f.line_no, + str(error))) + func_name, args, func_code, func_dispatch =\ + parse_function_code(funcs_f, dependencies, suite_dependencies) + suite_functions += func_code + # Generate dispatch code and enumeration info + if func_name in func_info: + raise GeneratorInputError( + "file: %s - function %s re-declared at line %d" % + (funcs_f.name, func_name, funcs_f.line_no)) + func_info[func_name] = (function_idx, args) + dispatch_code += '/* Function Id: %d */\n' % function_idx + dispatch_code += func_dispatch + function_idx += 1 + + func_code = (suite_helpers + + suite_functions).join(gen_dependencies(suite_dependencies)) + return suite_dependencies, dispatch_code, func_code, func_info + + +def escaped_split(inp_str, split_char): + """ + Split inp_str on character split_char but ignore if escaped. + Since, return value is used to write back to the intermediate + data file, any escape characters in the input are retained in the + output. + + :param inp_str: String to split + :param split_char: Split character + :return: List of splits + """ + if len(split_char) > 1: + raise ValueError('Expected split character. Found string!') + out = re.sub(r'(\\.)|' + split_char, + lambda m: m.group(1) or '\n', inp_str, + len(inp_str)).split('\n') + out = [x for x in out if x] + return out + + +def parse_test_data(data_f): + """ + Parses .data file for each test case name, test function name, + test dependencies and test arguments. This information is + correlated with the test functions file for generating an + intermediate data file replacing the strings for test function + names, dependencies and integer constant expressions with + identifiers. Mainly for optimising space for on-target + execution. + + :param data_f: file object of the data file. + :return: Generator that yields line number, test name, function name, + dependency list and function argument list. + """ + __state_read_name = 0 + __state_read_args = 1 + state = __state_read_name + dependencies = [] + name = '' + for line in data_f: + line = line.strip() + # Skip comments + if line.startswith('#'): + continue + + # Blank line indicates end of test + if not line: + if state == __state_read_args: + raise GeneratorInputError("[%s:%d] Newline before arguments. " + "Test function and arguments " + "missing for %s" % + (data_f.name, data_f.line_no, name)) + continue + + if state == __state_read_name: + # Read test name + name = line + state = __state_read_args + elif state == __state_read_args: + # Check dependencies + match = re.search(DEPENDENCY_REGEX, line) + if match: + try: + dependencies = parse_dependencies( + match.group('dependencies')) + except GeneratorInputError as error: + raise GeneratorInputError( + str(error) + " - %s:%d" % + (data_f.name, data_f.line_no)) + else: + # Read test vectors + parts = escaped_split(line, ':') + test_function = parts[0] + args = parts[1:] + yield data_f.line_no, name, test_function, dependencies, args + dependencies = [] + state = __state_read_name + if state == __state_read_args: + raise GeneratorInputError("[%s:%d] Newline before arguments. " + "Test function and arguments missing for " + "%s" % (data_f.name, data_f.line_no, name)) + + +def gen_dep_check(dep_id, dep): + """ + Generate code for checking dependency with the associated + identifier. + + :param dep_id: Dependency identifier + :param dep: Dependency macro + :return: Dependency check code + """ + if dep_id < 0: + raise GeneratorInputError("Dependency Id should be a positive " + "integer.") + _not, dep = ('!', dep[1:]) if dep[0] == '!' else ('', dep) + if not dep: + raise GeneratorInputError("Dependency should not be an empty string.") + + dependency = re.match(CONDITION_REGEX, dep, re.I) + if not dependency: + raise GeneratorInputError('Invalid dependency %s' % dep) + + _defined = '' if dependency.group(2) else 'defined' + _cond = dependency.group(2) if dependency.group(2) else '' + _value = dependency.group(3) if dependency.group(3) else '' + + dep_check = ''' + case {id}: + {{ +#if {_not}{_defined}({macro}{_cond}{_value}) + ret = DEPENDENCY_SUPPORTED; +#else + ret = DEPENDENCY_NOT_SUPPORTED; +#endif + }} + break;'''.format(_not=_not, _defined=_defined, + macro=dependency.group(1), id=dep_id, + _cond=_cond, _value=_value) + return dep_check + + +def gen_expression_check(exp_id, exp): + """ + Generates code for evaluating an integer expression using + associated expression Id. + + :param exp_id: Expression Identifier + :param exp: Expression/Macro + :return: Expression check code + """ + if exp_id < 0: + raise GeneratorInputError("Expression Id should be a positive " + "integer.") + if not exp: + raise GeneratorInputError("Expression should not be an empty string.") + exp_code = ''' + case {exp_id}: + {{ + *out_value = {expression}; + }} + break;'''.format(exp_id=exp_id, expression=exp) + return exp_code + + +def write_dependencies(out_data_f, test_dependencies, unique_dependencies): + """ + Write dependencies to intermediate test data file, replacing + the string form with identifiers. Also, generates dependency + check code. + + :param out_data_f: Output intermediate data file + :param test_dependencies: Dependencies + :param unique_dependencies: Mutable list to track unique dependencies + that are global to this re-entrant function. + :return: returns dependency check code. + """ + dep_check_code = '' + if test_dependencies: + out_data_f.write('depends_on') + for dep in test_dependencies: + if dep not in unique_dependencies: + unique_dependencies.append(dep) + dep_id = unique_dependencies.index(dep) + dep_check_code += gen_dep_check(dep_id, dep) + else: + dep_id = unique_dependencies.index(dep) + out_data_f.write(':' + str(dep_id)) + out_data_f.write('\n') + return dep_check_code + + +INT_VAL_REGEX = re.compile(r'-?(\d+|0x[0-9a-f]+)$', re.I) +def val_is_int(val: str) -> bool: + """Whether val is suitable as an 'int' parameter in the .datax file.""" + if not INT_VAL_REGEX.match(val): + return False + # Limit the range to what is guaranteed to get through strtol() + return abs(int(val, 0)) <= 0x7fffffff + +def write_parameters(out_data_f, test_args, func_args, unique_expressions): + """ + Writes test parameters to the intermediate data file, replacing + the string form with identifiers. Also, generates expression + check code. + + :param out_data_f: Output intermediate data file + :param test_args: Test parameters + :param func_args: Function arguments + :param unique_expressions: Mutable list to track unique + expressions that are global to this re-entrant function. + :return: Returns expression check code. + """ + expression_code = '' + for i, _ in enumerate(test_args): + typ = func_args[i] + val = test_args[i] + + # Pass small integer constants literally. This reduces the size of + # the C code. Register anything else as an expression. + if typ == 'int' and not val_is_int(val): + typ = 'exp' + if val not in unique_expressions: + unique_expressions.append(val) + # exp_id can be derived from len(). But for + # readability and consistency with case of existing + # let's use index(). + exp_id = unique_expressions.index(val) + expression_code += gen_expression_check(exp_id, val) + val = exp_id + else: + val = unique_expressions.index(val) + out_data_f.write(':' + typ + ':' + str(val)) + out_data_f.write('\n') + return expression_code + + +def gen_suite_dep_checks(suite_dependencies, dep_check_code, expression_code): + """ + Generates preprocessor checks for test suite dependencies. + + :param suite_dependencies: Test suite dependencies read from the + .function file. + :param dep_check_code: Dependency check code + :param expression_code: Expression check code + :return: Dependency and expression code guarded by test suite + dependencies. + """ + if suite_dependencies: + preprocessor_check = gen_dependencies_one_line(suite_dependencies) + dep_check_code = ''' +{preprocessor_check} +{code} +#endif +'''.format(preprocessor_check=preprocessor_check, code=dep_check_code) + expression_code = ''' +{preprocessor_check} +{code} +#endif +'''.format(preprocessor_check=preprocessor_check, code=expression_code) + return dep_check_code, expression_code + + +def get_function_info(func_info, function_name, line_no): + """Look up information about a test function by name. + + Raise an informative expression if function_name is not found. + + :param func_info: dictionary mapping function names to their information. + :param function_name: the function name as written in the .function and + .data files. + :param line_no: line number for error messages. + :return Function information (id, args). + """ + test_function_name = 'test_' + function_name + if test_function_name not in func_info: + raise GeneratorInputError("%d: Function %s not found!" % + (line_no, test_function_name)) + return func_info[test_function_name] + + +def gen_from_test_data(data_f, out_data_f, func_info, suite_dependencies): + """ + This function reads test case name, dependencies and test vectors + from the .data file. This information is correlated with the test + functions file for generating an intermediate data file replacing + the strings for test function names, dependencies and integer + constant expressions with identifiers. Mainly for optimising + space for on-target execution. + It also generates test case dependency check code and expression + evaluation code. + + :param data_f: Data file object + :param out_data_f: Output intermediate data file + :param func_info: Dict keyed by function and with function id + and arguments info + :param suite_dependencies: Test suite dependencies + :return: Returns dependency and expression check code + """ + unique_dependencies = [] + unique_expressions = [] + dep_check_code = '' + expression_code = '' + for line_no, test_name, function_name, test_dependencies, test_args in \ + parse_test_data(data_f): + out_data_f.write(test_name + '\n') + + # Write dependencies + dep_check_code += write_dependencies(out_data_f, test_dependencies, + unique_dependencies) + + # Write test function name + func_id, func_args = \ + get_function_info(func_info, function_name, line_no) + out_data_f.write(str(func_id)) + + # Write parameters + if len(test_args) != len(func_args): + raise GeneratorInputError("%d: Invalid number of arguments in test " + "%s. See function %s signature." % + (line_no, test_name, function_name)) + expression_code += write_parameters(out_data_f, test_args, func_args, + unique_expressions) + + # Write a newline as test case separator + out_data_f.write('\n') + + dep_check_code, expression_code = gen_suite_dep_checks( + suite_dependencies, dep_check_code, expression_code) + return dep_check_code, expression_code + + +def add_input_info(funcs_file, data_file, template_file, + c_file, snippets): + """ + Add generator input info in snippets. + + :param funcs_file: Functions file object + :param data_file: Data file object + :param template_file: Template file object + :param c_file: Output C file object + :param snippets: Dictionary to contain code pieces to be + substituted in the template. + :return: + """ + snippets['test_file'] = c_file + snippets['test_main_file'] = template_file + snippets['test_case_file'] = funcs_file + snippets['test_case_data_file'] = data_file + + +def read_code_from_input_files(platform_file, helpers_file, + out_data_file, snippets): + """ + Read code from input files and create substitutions for replacement + strings in the template file. + + :param platform_file: Platform file object + :param helpers_file: Helper functions file object + :param out_data_file: Output intermediate data file object + :param snippets: Dictionary to contain code pieces to be + substituted in the template. + :return: + """ + # Read helpers + with open(helpers_file, 'r') as help_f, open(platform_file, 'r') as \ + platform_f: + snippets['test_common_helper_file'] = helpers_file + snippets['test_common_helpers'] = help_f.read() + snippets['test_platform_file'] = platform_file + snippets['platform_code'] = platform_f.read().replace( + 'DATA_FILE', out_data_file.replace('\\', '\\\\')) # escape '\' + + +def write_test_source_file(template_file, c_file, snippets): + """ + Write output source file with generated source code. + + :param template_file: Template file name + :param c_file: Output source file + :param snippets: Generated and code snippets + :return: + """ + + # Create a placeholder pattern with the correct named capture groups + # to override the default provided with Template. + # Match nothing (no way of escaping placeholders). + escaped = "(?P(?!))" + # Match the "__MBEDTLS_TEST_TEMPLATE__PLACEHOLDER_NAME" pattern. + named = "__MBEDTLS_TEST_TEMPLATE__(?P[A-Z][_A-Z0-9]*)" + # Match nothing (no braced placeholder syntax). + braced = "(?P(?!))" + # If not already matched, a "__MBEDTLS_TEST_TEMPLATE__" prefix is invalid. + invalid = "(?P__MBEDTLS_TEST_TEMPLATE__)" + placeholder_pattern = re.compile("|".join([escaped, named, braced, invalid])) + + with open(template_file, 'r') as template_f, open(c_file, 'w') as c_f: + for line_no, line in enumerate(template_f.readlines(), 1): + # Update line number. +1 as #line directive sets next line number + snippets['line_no'] = line_no + 1 + template = string.Template(line) + template.pattern = placeholder_pattern + snippets = {k.upper():v for (k, v) in snippets.items()} + code = template.substitute(**snippets) + c_f.write(code) + + +def parse_function_file(funcs_file, snippets): + """ + Parse function file and generate function dispatch code. + + :param funcs_file: Functions file name + :param snippets: Dictionary to contain code pieces to be + substituted in the template. + :return: + """ + with FileWrapper(funcs_file) as funcs_f: + suite_dependencies, dispatch_code, func_code, func_info = \ + parse_functions(funcs_f) + snippets['functions_code'] = func_code + snippets['dispatch_code'] = dispatch_code + return suite_dependencies, func_info + + +def generate_intermediate_data_file(data_file, out_data_file, + suite_dependencies, func_info, snippets): + """ + Generates intermediate data file from input data file and + information read from functions file. + + :param data_file: Data file name + :param out_data_file: Output/Intermediate data file + :param suite_dependencies: List of suite dependencies. + :param func_info: Function info parsed from functions file. + :param snippets: Dictionary to contain code pieces to be + substituted in the template. + :return: + """ + with FileWrapper(data_file) as data_f, \ + open(out_data_file, 'w') as out_data_f: + dep_check_code, expression_code = gen_from_test_data( + data_f, out_data_f, func_info, suite_dependencies) + snippets['dep_check_code'] = dep_check_code + snippets['expression_code'] = expression_code + + +def generate_code(**input_info): + """ + Generates C source code from test suite file, data file, common + helpers file and platform file. + + input_info expands to following parameters: + funcs_file: Functions file object + data_file: Data file object + template_file: Template file object + platform_file: Platform file object + helpers_file: Helper functions file object + suites_dir: Test suites dir + c_file: Output C file object + out_data_file: Output intermediate data file object + :return: + """ + funcs_file = input_info['funcs_file'] + data_file = input_info['data_file'] + template_file = input_info['template_file'] + platform_file = input_info['platform_file'] + helpers_file = input_info['helpers_file'] + suites_dir = input_info['suites_dir'] + c_file = input_info['c_file'] + out_data_file = input_info['out_data_file'] + for name, path in [('Functions file', funcs_file), + ('Data file', data_file), + ('Template file', template_file), + ('Platform file', platform_file), + ('Helpers code file', helpers_file), + ('Suites dir', suites_dir)]: + if not os.path.exists(path): + raise IOError("ERROR: %s [%s] not found!" % (name, path)) + + snippets = {'generator_script': os.path.basename(__file__)} + read_code_from_input_files(platform_file, helpers_file, + out_data_file, snippets) + add_input_info(funcs_file, data_file, template_file, + c_file, snippets) + suite_dependencies, func_info = parse_function_file(funcs_file, snippets) + generate_intermediate_data_file(data_file, out_data_file, + suite_dependencies, func_info, snippets) + write_test_source_file(template_file, c_file, snippets) + + +def main(): + """ + Command line parser. + + :return: + """ + parser = argparse.ArgumentParser( + description='Dynamically generate test suite code.') + + parser.add_argument("-f", "--functions-file", + dest="funcs_file", + help="Functions file", + metavar="FUNCTIONS_FILE", + required=True) + + parser.add_argument("-d", "--data-file", + dest="data_file", + help="Data file", + metavar="DATA_FILE", + required=True) + + parser.add_argument("-t", "--template-file", + dest="template_file", + help="Template file", + metavar="TEMPLATE_FILE", + required=True) + + parser.add_argument("-s", "--suites-dir", + dest="suites_dir", + help="Suites dir", + metavar="SUITES_DIR", + required=True) + + parser.add_argument("--helpers-file", + dest="helpers_file", + help="Helpers file", + metavar="HELPERS_FILE", + required=True) + + parser.add_argument("-p", "--platform-file", + dest="platform_file", + help="Platform code file", + metavar="PLATFORM_FILE", + required=True) + + parser.add_argument("-o", "--out-dir", + dest="out_dir", + help="Dir where generated code and scripts are copied", + metavar="OUT_DIR", + required=True) + + args = parser.parse_args() + + data_file_name = os.path.basename(args.data_file) + data_name = os.path.splitext(data_file_name)[0] + + out_c_file = os.path.join(args.out_dir, data_name + '.c') + out_data_file = os.path.join(args.out_dir, data_name + '.datax') + + out_c_file_dir = os.path.dirname(out_c_file) + out_data_file_dir = os.path.dirname(out_data_file) + for directory in [out_c_file_dir, out_data_file_dir]: + if not os.path.exists(directory): + os.makedirs(directory) + + generate_code(funcs_file=args.funcs_file, data_file=args.data_file, + template_file=args.template_file, + platform_file=args.platform_file, + helpers_file=args.helpers_file, suites_dir=args.suites_dir, + c_file=out_c_file, out_data_file=out_data_file) + + +if __name__ == "__main__": + try: + main() + except GeneratorInputError as err: + sys.exit("%s: input error: %s" % + (os.path.basename(sys.argv[0]), str(err))) diff --git a/scripts/test_generate_test_code.py b/scripts/test_generate_test_code.py new file mode 100755 index 000000000..abc46a729 --- /dev/null +++ b/scripts/test_generate_test_code.py @@ -0,0 +1,1915 @@ +#!/usr/bin/env python3 +# Unit test for generate_test_code.py +# +# Copyright The Mbed TLS Contributors +# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +""" +Unit tests for generate_test_code.py +""" + +from io import StringIO +from unittest import TestCase, main as unittest_main +from unittest.mock import patch + +from generate_test_code import gen_dependencies, gen_dependencies_one_line +from generate_test_code import gen_function_wrapper, gen_dispatch +from generate_test_code import parse_until_pattern, GeneratorInputError +from generate_test_code import parse_suite_dependencies +from generate_test_code import parse_function_dependencies +from generate_test_code import parse_function_arguments, parse_function_code +from generate_test_code import parse_functions, END_HEADER_REGEX +from generate_test_code import END_SUITE_HELPERS_REGEX, escaped_split +from generate_test_code import parse_test_data, gen_dep_check +from generate_test_code import gen_expression_check, write_dependencies +from generate_test_code import write_parameters, gen_suite_dep_checks +from generate_test_code import gen_from_test_data + + +class GenDep(TestCase): + """ + Test suite for function gen_dep() + """ + + def test_dependencies_list(self): + """ + Test that gen_dep() correctly creates dependencies for given + dependency list. + :return: + """ + dependencies = ['DEP1', 'DEP2'] + dep_start, dep_end = gen_dependencies(dependencies) + preprocessor1, preprocessor2 = dep_start.splitlines() + endif1, endif2 = dep_end.splitlines() + self.assertEqual(preprocessor1, '#if defined(DEP1)', + 'Preprocessor generated incorrectly') + self.assertEqual(preprocessor2, '#if defined(DEP2)', + 'Preprocessor generated incorrectly') + self.assertEqual(endif1, '#endif /* DEP2 */', + 'Preprocessor generated incorrectly') + self.assertEqual(endif2, '#endif /* DEP1 */', + 'Preprocessor generated incorrectly') + + def test_disabled_dependencies_list(self): + """ + Test that gen_dep() correctly creates dependencies for given + dependency list. + :return: + """ + dependencies = ['!DEP1', '!DEP2'] + dep_start, dep_end = gen_dependencies(dependencies) + preprocessor1, preprocessor2 = dep_start.splitlines() + endif1, endif2 = dep_end.splitlines() + self.assertEqual(preprocessor1, '#if !defined(DEP1)', + 'Preprocessor generated incorrectly') + self.assertEqual(preprocessor2, '#if !defined(DEP2)', + 'Preprocessor generated incorrectly') + self.assertEqual(endif1, '#endif /* !DEP2 */', + 'Preprocessor generated incorrectly') + self.assertEqual(endif2, '#endif /* !DEP1 */', + 'Preprocessor generated incorrectly') + + def test_mixed_dependencies_list(self): + """ + Test that gen_dep() correctly creates dependencies for given + dependency list. + :return: + """ + dependencies = ['!DEP1', 'DEP2'] + dep_start, dep_end = gen_dependencies(dependencies) + preprocessor1, preprocessor2 = dep_start.splitlines() + endif1, endif2 = dep_end.splitlines() + self.assertEqual(preprocessor1, '#if !defined(DEP1)', + 'Preprocessor generated incorrectly') + self.assertEqual(preprocessor2, '#if defined(DEP2)', + 'Preprocessor generated incorrectly') + self.assertEqual(endif1, '#endif /* DEP2 */', + 'Preprocessor generated incorrectly') + self.assertEqual(endif2, '#endif /* !DEP1 */', + 'Preprocessor generated incorrectly') + + def test_empty_dependencies_list(self): + """ + Test that gen_dep() correctly creates dependencies for given + dependency list. + :return: + """ + dependencies = [] + dep_start, dep_end = gen_dependencies(dependencies) + self.assertEqual(dep_start, '', 'Preprocessor generated incorrectly') + self.assertEqual(dep_end, '', 'Preprocessor generated incorrectly') + + def test_large_dependencies_list(self): + """ + Test that gen_dep() correctly creates dependencies for given + dependency list. + :return: + """ + dependencies = [] + count = 10 + for i in range(count): + dependencies.append('DEP%d' % i) + dep_start, dep_end = gen_dependencies(dependencies) + self.assertEqual(len(dep_start.splitlines()), count, + 'Preprocessor generated incorrectly') + self.assertEqual(len(dep_end.splitlines()), count, + 'Preprocessor generated incorrectly') + + +class GenDepOneLine(TestCase): + """ + Test Suite for testing gen_dependencies_one_line() + """ + + def test_dependencies_list(self): + """ + Test that gen_dep() correctly creates dependencies for given + dependency list. + :return: + """ + dependencies = ['DEP1', 'DEP2'] + dep_str = gen_dependencies_one_line(dependencies) + self.assertEqual(dep_str, '#if defined(DEP1) && defined(DEP2)', + 'Preprocessor generated incorrectly') + + def test_disabled_dependencies_list(self): + """ + Test that gen_dep() correctly creates dependencies for given + dependency list. + :return: + """ + dependencies = ['!DEP1', '!DEP2'] + dep_str = gen_dependencies_one_line(dependencies) + self.assertEqual(dep_str, '#if !defined(DEP1) && !defined(DEP2)', + 'Preprocessor generated incorrectly') + + def test_mixed_dependencies_list(self): + """ + Test that gen_dep() correctly creates dependencies for given + dependency list. + :return: + """ + dependencies = ['!DEP1', 'DEP2'] + dep_str = gen_dependencies_one_line(dependencies) + self.assertEqual(dep_str, '#if !defined(DEP1) && defined(DEP2)', + 'Preprocessor generated incorrectly') + + def test_empty_dependencies_list(self): + """ + Test that gen_dep() correctly creates dependencies for given + dependency list. + :return: + """ + dependencies = [] + dep_str = gen_dependencies_one_line(dependencies) + self.assertEqual(dep_str, '', 'Preprocessor generated incorrectly') + + def test_large_dependencies_list(self): + """ + Test that gen_dep() correctly creates dependencies for given + dependency list. + :return: + """ + dependencies = [] + count = 10 + for i in range(count): + dependencies.append('DEP%d' % i) + dep_str = gen_dependencies_one_line(dependencies) + expected = '#if ' + ' && '.join(['defined(%s)' % + x for x in dependencies]) + self.assertEqual(dep_str, expected, + 'Preprocessor generated incorrectly') + + +class GenFunctionWrapper(TestCase): + """ + Test Suite for testing gen_function_wrapper() + """ + + def test_params_unpack(self): + """ + Test that params are properly unpacked in the function call. + + :return: + """ + code = gen_function_wrapper('test_a', '', ('a', 'b', 'c', 'd')) + expected = ''' +void test_a_wrapper( void ** params ) +{ + + test_a( a, b, c, d ); +} +''' + self.assertEqual(code, expected) + + def test_local(self): + """ + Test that params are properly unpacked in the function call. + + :return: + """ + code = gen_function_wrapper('test_a', + 'int x = 1;', ('x', 'b', 'c', 'd')) + expected = ''' +void test_a_wrapper( void ** params ) +{ +int x = 1; + test_a( x, b, c, d ); +} +''' + self.assertEqual(code, expected) + + def test_empty_params(self): + """ + Test that params are properly unpacked in the function call. + + :return: + """ + code = gen_function_wrapper('test_a', '', ()) + expected = ''' +void test_a_wrapper( void ** params ) +{ + (void)params; + + test_a( ); +} +''' + self.assertEqual(code, expected) + + +class GenDispatch(TestCase): + """ + Test suite for testing gen_dispatch() + """ + + def test_dispatch(self): + """ + Test that dispatch table entry is generated correctly. + :return: + """ + code = gen_dispatch('test_a', ['DEP1', 'DEP2']) + expected = ''' +#if defined(DEP1) && defined(DEP2) + test_a_wrapper, +#else + NULL, +#endif +''' + self.assertEqual(code, expected) + + def test_empty_dependencies(self): + """ + Test empty dependency list. + :return: + """ + code = gen_dispatch('test_a', []) + expected = ''' + test_a_wrapper, +''' + self.assertEqual(code, expected) + + +class StringIOWrapper(StringIO): + """ + file like class to mock file object in tests. + """ + def __init__(self, file_name, data, line_no=0): + """ + Init file handle. + + :param file_name: + :param data: + :param line_no: + """ + super(StringIOWrapper, self).__init__(data) + self.line_no = line_no + self.name = file_name + + def next(self): + """ + Iterator method. This method overrides base class's + next method and extends the next method to count the line + numbers as each line is read. + + :return: Line read from file. + """ + parent = super(StringIOWrapper, self) + line = parent.__next__() + return line + + def readline(self, _length=0): + """ + Wrap the base class readline. + + :param length: + :return: + """ + line = super(StringIOWrapper, self).readline() + if line is not None: + self.line_no += 1 + return line + + +class ParseUntilPattern(TestCase): + """ + Test Suite for testing parse_until_pattern(). + """ + + def test_suite_headers(self): + """ + Test that suite headers are parsed correctly. + + :return: + """ + data = '''#include "mbedtls/ecp.h" + +#define ECP_PF_UNKNOWN -1 +/* END_HEADER */ +''' + expected = '''#line 1 "test_suite_ut.function" +#include "mbedtls/ecp.h" + +#define ECP_PF_UNKNOWN -1 +''' + stream = StringIOWrapper('test_suite_ut.function', data, line_no=0) + headers = parse_until_pattern(stream, END_HEADER_REGEX) + self.assertEqual(headers, expected) + + def test_line_no(self): + """ + Test that #line is set to correct line no. in source .function file. + + :return: + """ + data = '''#include "mbedtls/ecp.h" + +#define ECP_PF_UNKNOWN -1 +/* END_HEADER */ +''' + offset_line_no = 5 + expected = '''#line %d "test_suite_ut.function" +#include "mbedtls/ecp.h" + +#define ECP_PF_UNKNOWN -1 +''' % (offset_line_no + 1) + stream = StringIOWrapper('test_suite_ut.function', data, + offset_line_no) + headers = parse_until_pattern(stream, END_HEADER_REGEX) + self.assertEqual(headers, expected) + + def test_no_end_header_comment(self): + """ + Test that InvalidFileFormat is raised when end header comment is + missing. + :return: + """ + data = '''#include "mbedtls/ecp.h" + +#define ECP_PF_UNKNOWN -1 + +''' + stream = StringIOWrapper('test_suite_ut.function', data) + self.assertRaises(GeneratorInputError, parse_until_pattern, stream, + END_HEADER_REGEX) + + +class ParseSuiteDependencies(TestCase): + """ + Test Suite for testing parse_suite_dependencies(). + """ + + def test_suite_dependencies(self): + """ + + :return: + """ + data = ''' + * depends_on:MBEDTLS_ECP_C + * END_DEPENDENCIES + */ +''' + expected = ['MBEDTLS_ECP_C'] + stream = StringIOWrapper('test_suite_ut.function', data) + dependencies = parse_suite_dependencies(stream) + self.assertEqual(dependencies, expected) + + def test_no_end_dep_comment(self): + """ + Test that InvalidFileFormat is raised when end dep comment is missing. + :return: + """ + data = ''' +* depends_on:MBEDTLS_ECP_C +''' + stream = StringIOWrapper('test_suite_ut.function', data) + self.assertRaises(GeneratorInputError, parse_suite_dependencies, + stream) + + def test_dependencies_split(self): + """ + Test that InvalidFileFormat is raised when end dep comment is missing. + :return: + """ + data = ''' + * depends_on:MBEDTLS_ECP_C:A:B: C : D :F : G: !H + * END_DEPENDENCIES + */ +''' + expected = ['MBEDTLS_ECP_C', 'A', 'B', 'C', 'D', 'F', 'G', '!H'] + stream = StringIOWrapper('test_suite_ut.function', data) + dependencies = parse_suite_dependencies(stream) + self.assertEqual(dependencies, expected) + + +class ParseFuncDependencies(TestCase): + """ + Test Suite for testing parse_function_dependencies() + """ + + def test_function_dependencies(self): + """ + Test that parse_function_dependencies() correctly parses function + dependencies. + :return: + """ + line = '/* BEGIN_CASE ' \ + 'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */' + expected = ['MBEDTLS_ENTROPY_NV_SEED', 'MBEDTLS_FS_IO'] + dependencies = parse_function_dependencies(line) + self.assertEqual(dependencies, expected) + + def test_no_dependencies(self): + """ + Test that parse_function_dependencies() correctly parses function + dependencies. + :return: + """ + line = '/* BEGIN_CASE */' + dependencies = parse_function_dependencies(line) + self.assertEqual(dependencies, []) + + def test_tolerance(self): + """ + Test that parse_function_dependencies() correctly parses function + dependencies. + :return: + """ + line = '/* BEGIN_CASE depends_on:MBEDTLS_FS_IO: A : !B:C : F*/' + dependencies = parse_function_dependencies(line) + self.assertEqual(dependencies, ['MBEDTLS_FS_IO', 'A', '!B', 'C', 'F']) + + +class ParseFuncSignature(TestCase): + """ + Test Suite for parse_function_arguments(). + """ + + def test_int_and_char_params(self): + """ + Test int and char parameters parsing + :return: + """ + line = 'void entropy_threshold( char * a, int b, int result )' + args, local, arg_dispatch = parse_function_arguments(line) + self.assertEqual(args, ['char*', 'int', 'int']) + self.assertEqual(local, '') + self.assertEqual(arg_dispatch, + ['(char *) params[0]', + '((mbedtls_test_argument_t *) params[1])->sint', + '((mbedtls_test_argument_t *) params[2])->sint']) + + def test_hex_params(self): + """ + Test hex parameters parsing + :return: + """ + line = 'void entropy_threshold( char * a, data_t * h, int result )' + args, local, arg_dispatch = parse_function_arguments(line) + self.assertEqual(args, ['char*', 'hex', 'int']) + self.assertEqual(local, + ' data_t data1 = {(uint8_t *) params[1], ' + '((mbedtls_test_argument_t *) params[2])->len};\n') + self.assertEqual(arg_dispatch, ['(char *) params[0]', + '&data1', + '((mbedtls_test_argument_t *) params[3])->sint']) + + def test_unsupported_arg(self): + """ + Test unsupported argument type + :return: + """ + line = 'void entropy_threshold( char * a, data_t * h, unknown_t result )' + self.assertRaises(ValueError, parse_function_arguments, line) + + def test_empty_params(self): + """ + Test no parameters (nothing between parentheses). + :return: + """ + line = 'void entropy_threshold()' + args, local, arg_dispatch = parse_function_arguments(line) + self.assertEqual(args, []) + self.assertEqual(local, '') + self.assertEqual(arg_dispatch, []) + + def test_blank_params(self): + """ + Test no parameters (space between parentheses). + :return: + """ + line = 'void entropy_threshold( )' + args, local, arg_dispatch = parse_function_arguments(line) + self.assertEqual(args, []) + self.assertEqual(local, '') + self.assertEqual(arg_dispatch, []) + + def test_void_params(self): + """ + Test no parameters (void keyword). + :return: + """ + line = 'void entropy_threshold(void)' + args, local, arg_dispatch = parse_function_arguments(line) + self.assertEqual(args, []) + self.assertEqual(local, '') + self.assertEqual(arg_dispatch, []) + + def test_void_space_params(self): + """ + Test no parameters (void with spaces). + :return: + """ + line = 'void entropy_threshold( void )' + args, local, arg_dispatch = parse_function_arguments(line) + self.assertEqual(args, []) + self.assertEqual(local, '') + self.assertEqual(arg_dispatch, []) + + +class ParseFunctionCode(TestCase): + """ + Test suite for testing parse_function_code() + """ + + def test_no_function(self): + """ + Test no test function found. + :return: + """ + data = ''' +No +test +function +''' + stream = StringIOWrapper('test_suite_ut.function', data) + err_msg = 'file: test_suite_ut.function - Test functions not found!' + self.assertRaisesRegex(GeneratorInputError, err_msg, + parse_function_code, stream, [], []) + + def test_no_end_case_comment(self): + """ + Test missing end case. + :return: + """ + data = ''' +void test_func() +{ +} +''' + stream = StringIOWrapper('test_suite_ut.function', data) + err_msg = r'file: test_suite_ut.function - '\ + 'end case pattern .*? not found!' + self.assertRaisesRegex(GeneratorInputError, err_msg, + parse_function_code, stream, [], []) + + @patch("generate_test_code.parse_function_arguments") + def test_function_called(self, + parse_function_arguments_mock): + """ + Test parse_function_code() + :return: + """ + parse_function_arguments_mock.return_value = ([], '', []) + data = ''' +void test_func() +{ +} +''' + stream = StringIOWrapper('test_suite_ut.function', data) + self.assertRaises(GeneratorInputError, parse_function_code, + stream, [], []) + self.assertTrue(parse_function_arguments_mock.called) + parse_function_arguments_mock.assert_called_with('void test_func()\n') + + @patch("generate_test_code.gen_dispatch") + @patch("generate_test_code.gen_dependencies") + @patch("generate_test_code.gen_function_wrapper") + @patch("generate_test_code.parse_function_arguments") + def test_return(self, parse_function_arguments_mock, + gen_function_wrapper_mock, + gen_dependencies_mock, + gen_dispatch_mock): + """ + Test generated code. + :return: + """ + parse_function_arguments_mock.return_value = ([], '', []) + gen_function_wrapper_mock.return_value = '' + gen_dependencies_mock.side_effect = gen_dependencies + gen_dispatch_mock.side_effect = gen_dispatch + data = ''' +void func() +{ + ba ba black sheep + have you any wool +} +/* END_CASE */ +''' + stream = StringIOWrapper('test_suite_ut.function', data) + name, arg, code, dispatch_code = parse_function_code(stream, [], []) + + self.assertTrue(parse_function_arguments_mock.called) + parse_function_arguments_mock.assert_called_with('void func()\n') + gen_function_wrapper_mock.assert_called_with('test_func', '', []) + self.assertEqual(name, 'test_func') + self.assertEqual(arg, []) + expected = '''#line 1 "test_suite_ut.function" + +void test_func(void) +{ + ba ba black sheep + have you any wool +exit: + ; +} +''' + self.assertEqual(code, expected) + self.assertEqual(dispatch_code, "\n test_func_wrapper,\n") + + @patch("generate_test_code.gen_dispatch") + @patch("generate_test_code.gen_dependencies") + @patch("generate_test_code.gen_function_wrapper") + @patch("generate_test_code.parse_function_arguments") + def test_with_exit_label(self, parse_function_arguments_mock, + gen_function_wrapper_mock, + gen_dependencies_mock, + gen_dispatch_mock): + """ + Test when exit label is present. + :return: + """ + parse_function_arguments_mock.return_value = ([], '', []) + gen_function_wrapper_mock.return_value = '' + gen_dependencies_mock.side_effect = gen_dependencies + gen_dispatch_mock.side_effect = gen_dispatch + data = ''' +void func() +{ + ba ba black sheep + have you any wool +exit: + yes sir yes sir + 3 bags full +} +/* END_CASE */ +''' + stream = StringIOWrapper('test_suite_ut.function', data) + _, _, code, _ = parse_function_code(stream, [], []) + + expected = '''#line 1 "test_suite_ut.function" + +void test_func(void) +{ + ba ba black sheep + have you any wool +exit: + yes sir yes sir + 3 bags full +} +''' + self.assertEqual(code, expected) + + def test_non_void_function(self): + """ + Test invalid signature (non void). + :return: + """ + data = 'int entropy_threshold( char * a, data_t * h, int result )' + err_msg = 'file: test_suite_ut.function - Test functions not found!' + stream = StringIOWrapper('test_suite_ut.function', data) + self.assertRaisesRegex(GeneratorInputError, err_msg, + parse_function_code, stream, [], []) + + @patch("generate_test_code.gen_dispatch") + @patch("generate_test_code.gen_dependencies") + @patch("generate_test_code.gen_function_wrapper") + @patch("generate_test_code.parse_function_arguments") + def test_function_name_on_newline(self, parse_function_arguments_mock, + gen_function_wrapper_mock, + gen_dependencies_mock, + gen_dispatch_mock): + """ + Test with line break before the function name. + :return: + """ + parse_function_arguments_mock.return_value = ([], '', []) + gen_function_wrapper_mock.return_value = '' + gen_dependencies_mock.side_effect = gen_dependencies + gen_dispatch_mock.side_effect = gen_dispatch + data = ''' +void + + +func() +{ + ba ba black sheep + have you any wool +exit: + yes sir yes sir + 3 bags full +} +/* END_CASE */ +''' + stream = StringIOWrapper('test_suite_ut.function', data) + _, _, code, _ = parse_function_code(stream, [], []) + + expected = '''#line 1 "test_suite_ut.function" + +void + + +test_func(void) +{ + ba ba black sheep + have you any wool +exit: + yes sir yes sir + 3 bags full +} +''' + self.assertEqual(code, expected) + + @patch("generate_test_code.gen_dispatch") + @patch("generate_test_code.gen_dependencies") + @patch("generate_test_code.gen_function_wrapper") + @patch("generate_test_code.parse_function_arguments") + def test_case_starting_with_comment(self, parse_function_arguments_mock, + gen_function_wrapper_mock, + gen_dependencies_mock, + gen_dispatch_mock): + """ + Test with comments before the function signature + :return: + """ + parse_function_arguments_mock.return_value = ([], '', []) + gen_function_wrapper_mock.return_value = '' + gen_dependencies_mock.side_effect = gen_dependencies + gen_dispatch_mock.side_effect = gen_dispatch + data = '''/* comment */ +/* more + * comment */ +// this is\\ +still \\ +a comment +void func() +{ + ba ba black sheep + have you any wool +exit: + yes sir yes sir + 3 bags full +} +/* END_CASE */ +''' + stream = StringIOWrapper('test_suite_ut.function', data) + _, _, code, _ = parse_function_code(stream, [], []) + + expected = '''#line 1 "test_suite_ut.function" + + + + + + +void test_func(void) +{ + ba ba black sheep + have you any wool +exit: + yes sir yes sir + 3 bags full +} +''' + self.assertEqual(code, expected) + + @patch("generate_test_code.gen_dispatch") + @patch("generate_test_code.gen_dependencies") + @patch("generate_test_code.gen_function_wrapper") + @patch("generate_test_code.parse_function_arguments") + def test_comment_in_prototype(self, parse_function_arguments_mock, + gen_function_wrapper_mock, + gen_dependencies_mock, + gen_dispatch_mock): + """ + Test with comments in the function prototype + :return: + """ + parse_function_arguments_mock.return_value = ([], '', []) + gen_function_wrapper_mock.return_value = '' + gen_dependencies_mock.side_effect = gen_dependencies + gen_dispatch_mock.side_effect = gen_dispatch + data = ''' +void func( int x, // (line \\ + comment) + int y /* lone closing parenthesis) */ ) +{ + ba ba black sheep + have you any wool +exit: + yes sir yes sir + 3 bags full +} +/* END_CASE */ +''' + stream = StringIOWrapper('test_suite_ut.function', data) + _, _, code, _ = parse_function_code(stream, [], []) + + expected = '''#line 1 "test_suite_ut.function" + +void test_func( int x, + + int y ) +{ + ba ba black sheep + have you any wool +exit: + yes sir yes sir + 3 bags full +} +''' + self.assertEqual(code, expected) + + @patch("generate_test_code.gen_dispatch") + @patch("generate_test_code.gen_dependencies") + @patch("generate_test_code.gen_function_wrapper") + @patch("generate_test_code.parse_function_arguments") + def test_line_comment_in_block_comment(self, parse_function_arguments_mock, + gen_function_wrapper_mock, + gen_dependencies_mock, + gen_dispatch_mock): + """ + Test with line comment in block comment. + :return: + """ + parse_function_arguments_mock.return_value = ([], '', []) + gen_function_wrapper_mock.return_value = '' + gen_dependencies_mock.side_effect = gen_dependencies + gen_dispatch_mock.side_effect = gen_dispatch + data = ''' +void func( int x /* // */ ) +{ + ba ba black sheep + have you any wool +exit: + yes sir yes sir + 3 bags full +} +/* END_CASE */ +''' + stream = StringIOWrapper('test_suite_ut.function', data) + _, _, code, _ = parse_function_code(stream, [], []) + + expected = '''#line 1 "test_suite_ut.function" + +void test_func( int x ) +{ + ba ba black sheep + have you any wool +exit: + yes sir yes sir + 3 bags full +} +''' + self.assertEqual(code, expected) + + @patch("generate_test_code.gen_dispatch") + @patch("generate_test_code.gen_dependencies") + @patch("generate_test_code.gen_function_wrapper") + @patch("generate_test_code.parse_function_arguments") + def test_block_comment_in_line_comment(self, parse_function_arguments_mock, + gen_function_wrapper_mock, + gen_dependencies_mock, + gen_dispatch_mock): + """ + Test with block comment in line comment. + :return: + """ + parse_function_arguments_mock.return_value = ([], '', []) + gen_function_wrapper_mock.return_value = '' + gen_dependencies_mock.side_effect = gen_dependencies + gen_dispatch_mock.side_effect = gen_dispatch + data = ''' +// /* +void func( int x ) +{ + ba ba black sheep + have you any wool +exit: + yes sir yes sir + 3 bags full +} +/* END_CASE */ +''' + stream = StringIOWrapper('test_suite_ut.function', data) + _, _, code, _ = parse_function_code(stream, [], []) + + expected = '''#line 1 "test_suite_ut.function" + + +void test_func( int x ) +{ + ba ba black sheep + have you any wool +exit: + yes sir yes sir + 3 bags full +} +''' + self.assertEqual(code, expected) + + +class ParseFunction(TestCase): + """ + Test Suite for testing parse_functions() + """ + + @patch("generate_test_code.parse_until_pattern") + def test_begin_header(self, parse_until_pattern_mock): + """ + Test that begin header is checked and parse_until_pattern() is called. + :return: + """ + def stop(*_unused): + """Stop when parse_until_pattern is called.""" + raise Exception + parse_until_pattern_mock.side_effect = stop + data = '''/* BEGIN_HEADER */ +#include "mbedtls/ecp.h" + +#define ECP_PF_UNKNOWN -1 +/* END_HEADER */ +''' + stream = StringIOWrapper('test_suite_ut.function', data) + self.assertRaises(Exception, parse_functions, stream) + parse_until_pattern_mock.assert_called_with(stream, END_HEADER_REGEX) + self.assertEqual(stream.line_no, 1) + + @patch("generate_test_code.parse_until_pattern") + def test_begin_helper(self, parse_until_pattern_mock): + """ + Test that begin helper is checked and parse_until_pattern() is called. + :return: + """ + def stop(*_unused): + """Stop when parse_until_pattern is called.""" + raise Exception + parse_until_pattern_mock.side_effect = stop + data = '''/* BEGIN_SUITE_HELPERS */ +void print_hello_world() +{ + printf("Hello World!\n"); +} +/* END_SUITE_HELPERS */ +''' + stream = StringIOWrapper('test_suite_ut.function', data) + self.assertRaises(Exception, parse_functions, stream) + parse_until_pattern_mock.assert_called_with(stream, + END_SUITE_HELPERS_REGEX) + self.assertEqual(stream.line_no, 1) + + @patch("generate_test_code.parse_suite_dependencies") + def test_begin_dep(self, parse_suite_dependencies_mock): + """ + Test that begin dep is checked and parse_suite_dependencies() is + called. + :return: + """ + def stop(*_unused): + """Stop when parse_until_pattern is called.""" + raise Exception + parse_suite_dependencies_mock.side_effect = stop + data = '''/* BEGIN_DEPENDENCIES + * depends_on:MBEDTLS_ECP_C + * END_DEPENDENCIES + */ +''' + stream = StringIOWrapper('test_suite_ut.function', data) + self.assertRaises(Exception, parse_functions, stream) + parse_suite_dependencies_mock.assert_called_with(stream) + self.assertEqual(stream.line_no, 1) + + @patch("generate_test_code.parse_function_dependencies") + def test_begin_function_dep(self, func_mock): + """ + Test that begin dep is checked and parse_function_dependencies() is + called. + :return: + """ + def stop(*_unused): + """Stop when parse_until_pattern is called.""" + raise Exception + func_mock.side_effect = stop + + dependencies_str = '/* BEGIN_CASE ' \ + 'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */\n' + data = '''%svoid test_func() +{ +} +''' % dependencies_str + stream = StringIOWrapper('test_suite_ut.function', data) + self.assertRaises(Exception, parse_functions, stream) + func_mock.assert_called_with(dependencies_str) + self.assertEqual(stream.line_no, 1) + + @patch("generate_test_code.parse_function_code") + @patch("generate_test_code.parse_function_dependencies") + def test_return(self, func_mock1, func_mock2): + """ + Test that begin case is checked and parse_function_code() is called. + :return: + """ + func_mock1.return_value = [] + in_func_code = '''void test_func() +{ +} +''' + func_dispatch = ''' + test_func_wrapper, +''' + func_mock2.return_value = 'test_func', [],\ + in_func_code, func_dispatch + dependencies_str = '/* BEGIN_CASE ' \ + 'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */\n' + data = '''%svoid test_func() +{ +} +''' % dependencies_str + stream = StringIOWrapper('test_suite_ut.function', data) + suite_dependencies, dispatch_code, func_code, func_info = \ + parse_functions(stream) + func_mock1.assert_called_with(dependencies_str) + func_mock2.assert_called_with(stream, [], []) + self.assertEqual(stream.line_no, 5) + self.assertEqual(suite_dependencies, []) + expected_dispatch_code = '''/* Function Id: 0 */ + + test_func_wrapper, +''' + self.assertEqual(dispatch_code, expected_dispatch_code) + self.assertEqual(func_code, in_func_code) + self.assertEqual(func_info, {'test_func': (0, [])}) + + def test_parsing(self): + """ + Test case parsing. + :return: + """ + data = '''/* BEGIN_HEADER */ +#include "mbedtls/ecp.h" + +#define ECP_PF_UNKNOWN -1 +/* END_HEADER */ + +/* BEGIN_DEPENDENCIES + * depends_on:MBEDTLS_ECP_C + * END_DEPENDENCIES + */ + +/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */ +void func1() +{ +} +/* END_CASE */ + +/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */ +void func2() +{ +} +/* END_CASE */ +''' + stream = StringIOWrapper('test_suite_ut.function', data) + suite_dependencies, dispatch_code, func_code, func_info = \ + parse_functions(stream) + self.assertEqual(stream.line_no, 23) + self.assertEqual(suite_dependencies, ['MBEDTLS_ECP_C']) + + expected_dispatch_code = '''/* Function Id: 0 */ + +#if defined(MBEDTLS_ECP_C) && defined(MBEDTLS_ENTROPY_NV_SEED) && defined(MBEDTLS_FS_IO) + test_func1_wrapper, +#else + NULL, +#endif +/* Function Id: 1 */ + +#if defined(MBEDTLS_ECP_C) && defined(MBEDTLS_ENTROPY_NV_SEED) && defined(MBEDTLS_FS_IO) + test_func2_wrapper, +#else + NULL, +#endif +''' + self.assertEqual(dispatch_code, expected_dispatch_code) + expected_func_code = '''#if defined(MBEDTLS_ECP_C) +#line 2 "test_suite_ut.function" +#include "mbedtls/ecp.h" + +#define ECP_PF_UNKNOWN -1 +#if defined(MBEDTLS_ENTROPY_NV_SEED) +#if defined(MBEDTLS_FS_IO) +#line 13 "test_suite_ut.function" +void test_func1(void) +{ +exit: + ; +} + +void test_func1_wrapper( void ** params ) +{ + (void)params; + + test_func1( ); +} +#endif /* MBEDTLS_FS_IO */ +#endif /* MBEDTLS_ENTROPY_NV_SEED */ +#if defined(MBEDTLS_ENTROPY_NV_SEED) +#if defined(MBEDTLS_FS_IO) +#line 19 "test_suite_ut.function" +void test_func2(void) +{ +exit: + ; +} + +void test_func2_wrapper( void ** params ) +{ + (void)params; + + test_func2( ); +} +#endif /* MBEDTLS_FS_IO */ +#endif /* MBEDTLS_ENTROPY_NV_SEED */ +#endif /* MBEDTLS_ECP_C */ +''' + self.assertEqual(func_code, expected_func_code) + self.assertEqual(func_info, {'test_func1': (0, []), + 'test_func2': (1, [])}) + + def test_same_function_name(self): + """ + Test name conflict. + :return: + """ + data = '''/* BEGIN_HEADER */ +#include "mbedtls/ecp.h" + +#define ECP_PF_UNKNOWN -1 +/* END_HEADER */ + +/* BEGIN_DEPENDENCIES + * depends_on:MBEDTLS_ECP_C + * END_DEPENDENCIES + */ + +/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */ +void func() +{ +} +/* END_CASE */ + +/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */ +void func() +{ +} +/* END_CASE */ +''' + stream = StringIOWrapper('test_suite_ut.function', data) + self.assertRaises(GeneratorInputError, parse_functions, stream) + + +class EscapedSplit(TestCase): + """ + Test suite for testing escaped_split(). + Note: Since escaped_split() output is used to write back to the + intermediate data file. Any escape characters in the input are + retained in the output. + """ + + def test_invalid_input(self): + """ + Test when input split character is not a character. + :return: + """ + self.assertRaises(ValueError, escaped_split, '', 'string') + + def test_empty_string(self): + """ + Test empty string input. + :return: + """ + splits = escaped_split('', ':') + self.assertEqual(splits, []) + + def test_no_escape(self): + """ + Test with no escape character. The behaviour should be same as + str.split() + :return: + """ + test_str = 'yahoo:google' + splits = escaped_split(test_str, ':') + self.assertEqual(splits, test_str.split(':')) + + def test_escaped_input(self): + """ + Test input that has escaped delimiter. + :return: + """ + test_str = r'yahoo\:google:facebook' + splits = escaped_split(test_str, ':') + self.assertEqual(splits, [r'yahoo\:google', 'facebook']) + + def test_escaped_escape(self): + """ + Test input that has escaped delimiter. + :return: + """ + test_str = r'yahoo\\:google:facebook' + splits = escaped_split(test_str, ':') + self.assertEqual(splits, [r'yahoo\\', 'google', 'facebook']) + + def test_all_at_once(self): + """ + Test input that has escaped delimiter. + :return: + """ + test_str = r'yahoo\\:google:facebook\:instagram\\:bbc\\:wikipedia' + splits = escaped_split(test_str, ':') + self.assertEqual(splits, [r'yahoo\\', r'google', + r'facebook\:instagram\\', + r'bbc\\', r'wikipedia']) + + +class ParseTestData(TestCase): + """ + Test suite for parse test data. + """ + + def test_parser(self): + """ + Test that tests are parsed correctly from data file. + :return: + """ + data = """ +Diffie-Hellman full exchange #1 +dhm_do_dhm:10:"23":10:"5" + +Diffie-Hellman full exchange #2 +dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622" + +Diffie-Hellman full exchange #3 +dhm_do_dhm:10:"9345098382739712938719287391879381271":10:"9345098792137312973297123912791271" + +Diffie-Hellman selftest +dhm_selftest: +""" + stream = StringIOWrapper('test_suite_ut.function', data) + # List of (name, function_name, dependencies, args) + tests = list(parse_test_data(stream)) + test1, test2, test3, test4 = tests + self.assertEqual(test1[0], 3) + self.assertEqual(test1[1], 'Diffie-Hellman full exchange #1') + self.assertEqual(test1[2], 'dhm_do_dhm') + self.assertEqual(test1[3], []) + self.assertEqual(test1[4], ['10', '"23"', '10', '"5"']) + + self.assertEqual(test2[0], 6) + self.assertEqual(test2[1], 'Diffie-Hellman full exchange #2') + self.assertEqual(test2[2], 'dhm_do_dhm') + self.assertEqual(test2[3], []) + self.assertEqual(test2[4], ['10', '"93450983094850938450983409623"', + '10', '"9345098304850938450983409622"']) + + self.assertEqual(test3[0], 9) + self.assertEqual(test3[1], 'Diffie-Hellman full exchange #3') + self.assertEqual(test3[2], 'dhm_do_dhm') + self.assertEqual(test3[3], []) + self.assertEqual(test3[4], ['10', + '"9345098382739712938719287391879381271"', + '10', + '"9345098792137312973297123912791271"']) + + self.assertEqual(test4[0], 12) + self.assertEqual(test4[1], 'Diffie-Hellman selftest') + self.assertEqual(test4[2], 'dhm_selftest') + self.assertEqual(test4[3], []) + self.assertEqual(test4[4], []) + + def test_with_dependencies(self): + """ + Test that tests with dependencies are parsed. + :return: + """ + data = """ +Diffie-Hellman full exchange #1 +depends_on:YAHOO +dhm_do_dhm:10:"23":10:"5" + +Diffie-Hellman full exchange #2 +dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622" + +""" + stream = StringIOWrapper('test_suite_ut.function', data) + # List of (name, function_name, dependencies, args) + tests = list(parse_test_data(stream)) + test1, test2 = tests + self.assertEqual(test1[0], 4) + self.assertEqual(test1[1], 'Diffie-Hellman full exchange #1') + self.assertEqual(test1[2], 'dhm_do_dhm') + self.assertEqual(test1[3], ['YAHOO']) + self.assertEqual(test1[4], ['10', '"23"', '10', '"5"']) + + self.assertEqual(test2[0], 7) + self.assertEqual(test2[1], 'Diffie-Hellman full exchange #2') + self.assertEqual(test2[2], 'dhm_do_dhm') + self.assertEqual(test2[3], []) + self.assertEqual(test2[4], ['10', '"93450983094850938450983409623"', + '10', '"9345098304850938450983409622"']) + + def test_no_args(self): + """ + Test GeneratorInputError is raised when test function name and + args line is missing. + :return: + """ + data = """ +Diffie-Hellman full exchange #1 +depends_on:YAHOO + + +Diffie-Hellman full exchange #2 +dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622" + +""" + stream = StringIOWrapper('test_suite_ut.function', data) + err = None + try: + for _, _, _, _, _ in parse_test_data(stream): + pass + except GeneratorInputError as err: + self.assertEqual(type(err), GeneratorInputError) + + def test_incomplete_data(self): + """ + Test GeneratorInputError is raised when test function name + and args line is missing. + :return: + """ + data = """ +Diffie-Hellman full exchange #1 +depends_on:YAHOO +""" + stream = StringIOWrapper('test_suite_ut.function', data) + err = None + try: + for _, _, _, _, _ in parse_test_data(stream): + pass + except GeneratorInputError as err: + self.assertEqual(type(err), GeneratorInputError) + + +class GenDepCheck(TestCase): + """ + Test suite for gen_dep_check(). It is assumed this function is + called with valid inputs. + """ + + def test_gen_dep_check(self): + """ + Test that dependency check code generated correctly. + :return: + """ + expected = """ + case 5: + { +#if defined(YAHOO) + ret = DEPENDENCY_SUPPORTED; +#else + ret = DEPENDENCY_NOT_SUPPORTED; +#endif + } + break;""" + out = gen_dep_check(5, 'YAHOO') + self.assertEqual(out, expected) + + def test_not_defined_dependency(self): + """ + Test dependency with !. + :return: + """ + expected = """ + case 5: + { +#if !defined(YAHOO) + ret = DEPENDENCY_SUPPORTED; +#else + ret = DEPENDENCY_NOT_SUPPORTED; +#endif + } + break;""" + out = gen_dep_check(5, '!YAHOO') + self.assertEqual(out, expected) + + def test_empty_dependency(self): + """ + Test invalid dependency input. + :return: + """ + self.assertRaises(GeneratorInputError, gen_dep_check, 5, '!') + + def test_negative_dep_id(self): + """ + Test invalid dependency input. + :return: + """ + self.assertRaises(GeneratorInputError, gen_dep_check, -1, 'YAHOO') + + +class GenExpCheck(TestCase): + """ + Test suite for gen_expression_check(). It is assumed this function + is called with valid inputs. + """ + + def test_gen_exp_check(self): + """ + Test that expression check code generated correctly. + :return: + """ + expected = """ + case 5: + { + *out_value = YAHOO; + } + break;""" + out = gen_expression_check(5, 'YAHOO') + self.assertEqual(out, expected) + + def test_invalid_expression(self): + """ + Test invalid expression input. + :return: + """ + self.assertRaises(GeneratorInputError, gen_expression_check, 5, '') + + def test_negative_exp_id(self): + """ + Test invalid expression id. + :return: + """ + self.assertRaises(GeneratorInputError, gen_expression_check, + -1, 'YAHOO') + + +class WriteDependencies(TestCase): + """ + Test suite for testing write_dependencies. + """ + + def test_no_test_dependencies(self): + """ + Test when test dependencies input is empty. + :return: + """ + stream = StringIOWrapper('test_suite_ut.data', '') + unique_dependencies = [] + dep_check_code = write_dependencies(stream, [], unique_dependencies) + self.assertEqual(dep_check_code, '') + self.assertEqual(len(unique_dependencies), 0) + self.assertEqual(stream.getvalue(), '') + + def test_unique_dep_ids(self): + """ + + :return: + """ + stream = StringIOWrapper('test_suite_ut.data', '') + unique_dependencies = [] + dep_check_code = write_dependencies(stream, ['DEP3', 'DEP2', 'DEP1'], + unique_dependencies) + expect_dep_check_code = ''' + case 0: + { +#if defined(DEP3) + ret = DEPENDENCY_SUPPORTED; +#else + ret = DEPENDENCY_NOT_SUPPORTED; +#endif + } + break; + case 1: + { +#if defined(DEP2) + ret = DEPENDENCY_SUPPORTED; +#else + ret = DEPENDENCY_NOT_SUPPORTED; +#endif + } + break; + case 2: + { +#if defined(DEP1) + ret = DEPENDENCY_SUPPORTED; +#else + ret = DEPENDENCY_NOT_SUPPORTED; +#endif + } + break;''' + self.assertEqual(dep_check_code, expect_dep_check_code) + self.assertEqual(len(unique_dependencies), 3) + self.assertEqual(stream.getvalue(), 'depends_on:0:1:2\n') + + def test_dep_id_repeat(self): + """ + + :return: + """ + stream = StringIOWrapper('test_suite_ut.data', '') + unique_dependencies = [] + dep_check_code = '' + dep_check_code += write_dependencies(stream, ['DEP3', 'DEP2'], + unique_dependencies) + dep_check_code += write_dependencies(stream, ['DEP2', 'DEP1'], + unique_dependencies) + dep_check_code += write_dependencies(stream, ['DEP1', 'DEP3'], + unique_dependencies) + expect_dep_check_code = ''' + case 0: + { +#if defined(DEP3) + ret = DEPENDENCY_SUPPORTED; +#else + ret = DEPENDENCY_NOT_SUPPORTED; +#endif + } + break; + case 1: + { +#if defined(DEP2) + ret = DEPENDENCY_SUPPORTED; +#else + ret = DEPENDENCY_NOT_SUPPORTED; +#endif + } + break; + case 2: + { +#if defined(DEP1) + ret = DEPENDENCY_SUPPORTED; +#else + ret = DEPENDENCY_NOT_SUPPORTED; +#endif + } + break;''' + self.assertEqual(dep_check_code, expect_dep_check_code) + self.assertEqual(len(unique_dependencies), 3) + self.assertEqual(stream.getvalue(), + 'depends_on:0:1\ndepends_on:1:2\ndepends_on:2:0\n') + + +class WriteParams(TestCase): + """ + Test Suite for testing write_parameters(). + """ + + def test_no_params(self): + """ + Test with empty test_args + :return: + """ + stream = StringIOWrapper('test_suite_ut.data', '') + unique_expressions = [] + expression_code = write_parameters(stream, [], [], unique_expressions) + self.assertEqual(len(unique_expressions), 0) + self.assertEqual(expression_code, '') + self.assertEqual(stream.getvalue(), '\n') + + def test_no_exp_param(self): + """ + Test when there is no macro or expression in the params. + :return: + """ + stream = StringIOWrapper('test_suite_ut.data', '') + unique_expressions = [] + expression_code = write_parameters(stream, ['"Yahoo"', '"abcdef00"', + '0'], + ['char*', 'hex', 'int'], + unique_expressions) + self.assertEqual(len(unique_expressions), 0) + self.assertEqual(expression_code, '') + self.assertEqual(stream.getvalue(), + ':char*:"Yahoo":hex:"abcdef00":int:0\n') + + def test_hex_format_int_param(self): + """ + Test int parameter in hex format. + :return: + """ + stream = StringIOWrapper('test_suite_ut.data', '') + unique_expressions = [] + expression_code = write_parameters(stream, + ['"Yahoo"', '"abcdef00"', '0xAA'], + ['char*', 'hex', 'int'], + unique_expressions) + self.assertEqual(len(unique_expressions), 0) + self.assertEqual(expression_code, '') + self.assertEqual(stream.getvalue(), + ':char*:"Yahoo":hex:"abcdef00":int:0xAA\n') + + def test_with_exp_param(self): + """ + Test when there is macro or expression in the params. + :return: + """ + stream = StringIOWrapper('test_suite_ut.data', '') + unique_expressions = [] + expression_code = write_parameters(stream, + ['"Yahoo"', '"abcdef00"', '0', + 'MACRO1', 'MACRO2', 'MACRO3'], + ['char*', 'hex', 'int', + 'int', 'int', 'int'], + unique_expressions) + self.assertEqual(len(unique_expressions), 3) + self.assertEqual(unique_expressions, ['MACRO1', 'MACRO2', 'MACRO3']) + expected_expression_code = ''' + case 0: + { + *out_value = MACRO1; + } + break; + case 1: + { + *out_value = MACRO2; + } + break; + case 2: + { + *out_value = MACRO3; + } + break;''' + self.assertEqual(expression_code, expected_expression_code) + self.assertEqual(stream.getvalue(), + ':char*:"Yahoo":hex:"abcdef00":int:0:exp:0:exp:1' + ':exp:2\n') + + def test_with_repeat_calls(self): + """ + Test when write_parameter() is called with same macro or expression. + :return: + """ + stream = StringIOWrapper('test_suite_ut.data', '') + unique_expressions = [] + expression_code = '' + expression_code += write_parameters(stream, + ['"Yahoo"', 'MACRO1', 'MACRO2'], + ['char*', 'int', 'int'], + unique_expressions) + expression_code += write_parameters(stream, + ['"abcdef00"', 'MACRO2', 'MACRO3'], + ['hex', 'int', 'int'], + unique_expressions) + expression_code += write_parameters(stream, + ['0', 'MACRO3', 'MACRO1'], + ['int', 'int', 'int'], + unique_expressions) + self.assertEqual(len(unique_expressions), 3) + self.assertEqual(unique_expressions, ['MACRO1', 'MACRO2', 'MACRO3']) + expected_expression_code = ''' + case 0: + { + *out_value = MACRO1; + } + break; + case 1: + { + *out_value = MACRO2; + } + break; + case 2: + { + *out_value = MACRO3; + } + break;''' + self.assertEqual(expression_code, expected_expression_code) + expected_data_file = ''':char*:"Yahoo":exp:0:exp:1 +:hex:"abcdef00":exp:1:exp:2 +:int:0:exp:2:exp:0 +''' + self.assertEqual(stream.getvalue(), expected_data_file) + + +class GenTestSuiteDependenciesChecks(TestCase): + """ + Test suite for testing gen_suite_dep_checks() + """ + def test_empty_suite_dependencies(self): + """ + Test with empty suite_dependencies list. + + :return: + """ + dep_check_code, expression_code = \ + gen_suite_dep_checks([], 'DEP_CHECK_CODE', 'EXPRESSION_CODE') + self.assertEqual(dep_check_code, 'DEP_CHECK_CODE') + self.assertEqual(expression_code, 'EXPRESSION_CODE') + + def test_suite_dependencies(self): + """ + Test with suite_dependencies list. + + :return: + """ + dep_check_code, expression_code = \ + gen_suite_dep_checks(['SUITE_DEP'], 'DEP_CHECK_CODE', + 'EXPRESSION_CODE') + expected_dep_check_code = ''' +#if defined(SUITE_DEP) +DEP_CHECK_CODE +#endif +''' + expected_expression_code = ''' +#if defined(SUITE_DEP) +EXPRESSION_CODE +#endif +''' + self.assertEqual(dep_check_code, expected_dep_check_code) + self.assertEqual(expression_code, expected_expression_code) + + def test_no_dep_no_exp(self): + """ + Test when there are no dependency and expression code. + :return: + """ + dep_check_code, expression_code = gen_suite_dep_checks([], '', '') + self.assertEqual(dep_check_code, '') + self.assertEqual(expression_code, '') + + +class GenFromTestData(TestCase): + """ + Test suite for gen_from_test_data() + """ + + @staticmethod + @patch("generate_test_code.write_dependencies") + @patch("generate_test_code.write_parameters") + @patch("generate_test_code.gen_suite_dep_checks") + def test_intermediate_data_file(func_mock1, + write_parameters_mock, + write_dependencies_mock): + """ + Test that intermediate data file is written with expected data. + :return: + """ + data = ''' +My test +depends_on:DEP1 +func1:0 +''' + data_f = StringIOWrapper('test_suite_ut.data', data) + out_data_f = StringIOWrapper('test_suite_ut.datax', '') + func_info = {'test_func1': (1, ('int',))} + suite_dependencies = [] + write_parameters_mock.side_effect = write_parameters + write_dependencies_mock.side_effect = write_dependencies + func_mock1.side_effect = gen_suite_dep_checks + gen_from_test_data(data_f, out_data_f, func_info, suite_dependencies) + write_dependencies_mock.assert_called_with(out_data_f, + ['DEP1'], ['DEP1']) + write_parameters_mock.assert_called_with(out_data_f, ['0'], + ('int',), []) + expected_dep_check_code = ''' + case 0: + { +#if defined(DEP1) + ret = DEPENDENCY_SUPPORTED; +#else + ret = DEPENDENCY_NOT_SUPPORTED; +#endif + } + break;''' + func_mock1.assert_called_with( + suite_dependencies, expected_dep_check_code, '') + + def test_function_not_found(self): + """ + Test that AssertError is raised when function info in not found. + :return: + """ + data = ''' +My test +depends_on:DEP1 +func1:0 +''' + data_f = StringIOWrapper('test_suite_ut.data', data) + out_data_f = StringIOWrapper('test_suite_ut.datax', '') + func_info = {'test_func2': (1, ('int',))} + suite_dependencies = [] + self.assertRaises(GeneratorInputError, gen_from_test_data, + data_f, out_data_f, func_info, suite_dependencies) + + def test_different_func_args(self): + """ + Test that AssertError is raised when no. of parameters and + function args differ. + :return: + """ + data = ''' +My test +depends_on:DEP1 +func1:0 +''' + data_f = StringIOWrapper('test_suite_ut.data', data) + out_data_f = StringIOWrapper('test_suite_ut.datax', '') + func_info = {'test_func2': (1, ('int', 'hex'))} + suite_dependencies = [] + self.assertRaises(GeneratorInputError, gen_from_test_data, data_f, + out_data_f, func_info, suite_dependencies) + + def test_output(self): + """ + Test that intermediate data file is written with expected data. + :return: + """ + data = ''' +My test 1 +depends_on:DEP1 +func1:0:0xfa:MACRO1:MACRO2 + +My test 2 +depends_on:DEP1:DEP2 +func2:"yahoo":88:MACRO1 +''' + data_f = StringIOWrapper('test_suite_ut.data', data) + out_data_f = StringIOWrapper('test_suite_ut.datax', '') + func_info = {'test_func1': (0, ('int', 'int', 'int', 'int')), + 'test_func2': (1, ('char*', 'int', 'int'))} + suite_dependencies = [] + dep_check_code, expression_code = \ + gen_from_test_data(data_f, out_data_f, func_info, + suite_dependencies) + expected_dep_check_code = ''' + case 0: + { +#if defined(DEP1) + ret = DEPENDENCY_SUPPORTED; +#else + ret = DEPENDENCY_NOT_SUPPORTED; +#endif + } + break; + case 1: + { +#if defined(DEP2) + ret = DEPENDENCY_SUPPORTED; +#else + ret = DEPENDENCY_NOT_SUPPORTED; +#endif + } + break;''' + expected_data = '''My test 1 +depends_on:0 +0:int:0:int:0xfa:exp:0:exp:1 + +My test 2 +depends_on:0:1 +1:char*:"yahoo":int:88:exp:0 + +''' + expected_expression_code = ''' + case 0: + { + *out_value = MACRO1; + } + break; + case 1: + { + *out_value = MACRO2; + } + break;''' + self.assertEqual(dep_check_code, expected_dep_check_code) + self.assertEqual(out_data_f.getvalue(), expected_data) + self.assertEqual(expression_code, expected_expression_code) + + +if __name__ == '__main__': + unittest_main()