Initial check in

Bug: 137197907
diff --git a/src/llvm-project/clang/tools/scan-build-py/libscanbuild/__init__.py b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/__init__.py
new file mode 100644
index 0000000..903207c
--- /dev/null
+++ b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/__init__.py
@@ -0,0 +1,208 @@
+# -*- coding: utf-8 -*-
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+""" This module is a collection of methods commonly used in this project. """
+import collections
+import functools
+import json
+import logging
+import os
+import os.path
+import re
+import shlex
+import subprocess
+import sys
+
+ENVIRONMENT_KEY = 'INTERCEPT_BUILD'
+
+Execution = collections.namedtuple('Execution', ['pid', 'cwd', 'cmd'])
+
+CtuConfig = collections.namedtuple('CtuConfig', ['collect', 'analyze', 'dir',
+                                                 'extdef_map_cmd'])
+
+
+def duplicate_check(method):
+    """ Predicate to detect duplicated entries.
+
+    Unique hash method can be use to detect duplicates. Entries are
+    represented as dictionaries, which has no default hash method.
+    This implementation uses a set datatype to store the unique hash values.
+
+    This method returns a method which can detect the duplicate values. """
+
+    def predicate(entry):
+        entry_hash = predicate.unique(entry)
+        if entry_hash not in predicate.state:
+            predicate.state.add(entry_hash)
+            return False
+        return True
+
+    predicate.unique = method
+    predicate.state = set()
+    return predicate
+
+
+def run_build(command, *args, **kwargs):
+    """ Run and report build command execution
+
+    :param command: array of tokens
+    :return: exit code of the process
+    """
+    environment = kwargs.get('env', os.environ)
+    logging.debug('run build %s, in environment: %s', command, environment)
+    exit_code = subprocess.call(command, *args, **kwargs)
+    logging.debug('build finished with exit code: %d', exit_code)
+    return exit_code
+
+
+def run_command(command, cwd=None):
+    """ Run a given command and report the execution.
+
+    :param command: array of tokens
+    :param cwd: the working directory where the command will be executed
+    :return: output of the command
+    """
+    def decode_when_needed(result):
+        """ check_output returns bytes or string depend on python version """
+        return result.decode('utf-8') if isinstance(result, bytes) else result
+
+    try:
+        directory = os.path.abspath(cwd) if cwd else os.getcwd()
+        logging.debug('exec command %s in %s', command, directory)
+        output = subprocess.check_output(command,
+                                         cwd=directory,
+                                         stderr=subprocess.STDOUT)
+        return decode_when_needed(output).splitlines()
+    except subprocess.CalledProcessError as ex:
+        ex.output = decode_when_needed(ex.output).splitlines()
+        raise ex
+
+
+def reconfigure_logging(verbose_level):
+    """ Reconfigure logging level and format based on the verbose flag.
+
+    :param verbose_level: number of `-v` flags received by the command
+    :return: no return value
+    """
+    # Exit when nothing to do.
+    if verbose_level == 0:
+        return
+
+    root = logging.getLogger()
+    # Tune logging level.
+    level = logging.WARNING - min(logging.WARNING, (10 * verbose_level))
+    root.setLevel(level)
+    # Be verbose with messages.
+    if verbose_level <= 3:
+        fmt_string = '%(name)s: %(levelname)s: %(message)s'
+    else:
+        fmt_string = '%(name)s: %(levelname)s: %(funcName)s: %(message)s'
+    handler = logging.StreamHandler(sys.stdout)
+    handler.setFormatter(logging.Formatter(fmt=fmt_string))
+    root.handlers = [handler]
+
+
+def command_entry_point(function):
+    """ Decorator for command entry methods.
+
+    The decorator initialize/shutdown logging and guard on programming
+    errors (catch exceptions).
+
+    The decorated method can have arbitrary parameters, the return value will
+    be the exit code of the process. """
+
+    @functools.wraps(function)
+    def wrapper(*args, **kwargs):
+        """ Do housekeeping tasks and execute the wrapped method. """
+
+        try:
+            logging.basicConfig(format='%(name)s: %(message)s',
+                                level=logging.WARNING,
+                                stream=sys.stdout)
+            # This hack to get the executable name as %(name).
+            logging.getLogger().name = os.path.basename(sys.argv[0])
+            return function(*args, **kwargs)
+        except KeyboardInterrupt:
+            logging.warning('Keyboard interrupt')
+            return 130  # Signal received exit code for bash.
+        except Exception:
+            logging.exception('Internal error.')
+            if logging.getLogger().isEnabledFor(logging.DEBUG):
+                logging.error("Please report this bug and attach the output "
+                              "to the bug report")
+            else:
+                logging.error("Please run this command again and turn on "
+                              "verbose mode (add '-vvvv' as argument).")
+            return 64  # Some non used exit code for internal errors.
+        finally:
+            logging.shutdown()
+
+    return wrapper
+
+
+def compiler_wrapper(function):
+    """ Implements compiler wrapper base functionality.
+
+    A compiler wrapper executes the real compiler, then implement some
+    functionality, then returns with the real compiler exit code.
+
+    :param function: the extra functionality what the wrapper want to
+    do on top of the compiler call. If it throws exception, it will be
+    caught and logged.
+    :return: the exit code of the real compiler.
+
+    The :param function: will receive the following arguments:
+
+    :param result:       the exit code of the compilation.
+    :param execution:    the command executed by the wrapper. """
+
+    def is_cxx_compiler():
+        """ Find out was it a C++ compiler call. Compiler wrapper names
+        contain the compiler type. C++ compiler wrappers ends with `c++`,
+        but might have `.exe` extension on windows. """
+
+        wrapper_command = os.path.basename(sys.argv[0])
+        return re.match(r'(.+)c\+\+(.*)', wrapper_command)
+
+    def run_compiler(executable):
+        """ Execute compilation with the real compiler. """
+
+        command = executable + sys.argv[1:]
+        logging.debug('compilation: %s', command)
+        result = subprocess.call(command)
+        logging.debug('compilation exit code: %d', result)
+        return result
+
+    # Get relevant parameters from environment.
+    parameters = json.loads(os.environ[ENVIRONMENT_KEY])
+    reconfigure_logging(parameters['verbose'])
+    # Execute the requested compilation. Do crash if anything goes wrong.
+    cxx = is_cxx_compiler()
+    compiler = parameters['cxx'] if cxx else parameters['cc']
+    result = run_compiler(compiler)
+    # Call the wrapped method and ignore it's return value.
+    try:
+        call = Execution(
+            pid=os.getpid(),
+            cwd=os.getcwd(),
+            cmd=['c++' if cxx else 'cc'] + sys.argv[1:])
+        function(result, call)
+    except:
+        logging.exception('Compiler wrapper failed complete.')
+    finally:
+        # Always return the real compiler exit code.
+        return result
+
+
+def wrapper_environment(args):
+    """ Set up environment for interpose compiler wrapper."""
+
+    return {
+        ENVIRONMENT_KEY: json.dumps({
+            'verbose': args.verbose,
+            'cc': shlex.split(args.cc),
+            'cxx': shlex.split(args.cxx)
+        })
+    }
diff --git a/src/llvm-project/clang/tools/scan-build-py/libscanbuild/analyze.py b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/analyze.py
new file mode 100644
index 0000000..ab8ea62
--- /dev/null
+++ b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/analyze.py
@@ -0,0 +1,781 @@
+# -*- coding: utf-8 -*-
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+""" This module implements the 'scan-build' command API.
+
+To run the static analyzer against a build is done in multiple steps:
+
+ -- Intercept: capture the compilation command during the build,
+ -- Analyze:   run the analyzer against the captured commands,
+ -- Report:    create a cover report from the analyzer outputs.  """
+
+import re
+import os
+import os.path
+import json
+import logging
+import multiprocessing
+import tempfile
+import functools
+import subprocess
+import contextlib
+import datetime
+import shutil
+import glob
+from collections import defaultdict
+
+from libscanbuild import command_entry_point, compiler_wrapper, \
+    wrapper_environment, run_build, run_command, CtuConfig
+from libscanbuild.arguments import parse_args_for_scan_build, \
+    parse_args_for_analyze_build
+from libscanbuild.intercept import capture
+from libscanbuild.report import document
+from libscanbuild.compilation import split_command, classify_source, \
+    compiler_language
+from libscanbuild.clang import get_version, get_arguments, get_triple_arch
+from libscanbuild.shell import decode
+
+__all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper']
+
+COMPILER_WRAPPER_CC = 'analyze-cc'
+COMPILER_WRAPPER_CXX = 'analyze-c++'
+
+CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt'
+CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps'
+
+
+@command_entry_point
+def scan_build():
+    """ Entry point for scan-build command. """
+
+    args = parse_args_for_scan_build()
+    # will re-assign the report directory as new output
+    with report_directory(args.output, args.keep_empty) as args.output:
+        # Run against a build command. there are cases, when analyzer run
+        # is not required. But we need to set up everything for the
+        # wrappers, because 'configure' needs to capture the CC/CXX values
+        # for the Makefile.
+        if args.intercept_first:
+            # Run build command with intercept module.
+            exit_code = capture(args)
+            # Run the analyzer against the captured commands.
+            if need_analyzer(args.build):
+                govern_analyzer_runs(args)
+        else:
+            # Run build command and analyzer with compiler wrappers.
+            environment = setup_environment(args)
+            exit_code = run_build(args.build, env=environment)
+        # Cover report generation and bug counting.
+        number_of_bugs = document(args)
+        # Set exit status as it was requested.
+        return number_of_bugs if args.status_bugs else exit_code
+
+
+@command_entry_point
+def analyze_build():
+    """ Entry point for analyze-build command. """
+
+    args = parse_args_for_analyze_build()
+    # will re-assign the report directory as new output
+    with report_directory(args.output, args.keep_empty) as args.output:
+        # Run the analyzer against a compilation db.
+        govern_analyzer_runs(args)
+        # Cover report generation and bug counting.
+        number_of_bugs = document(args)
+        # Set exit status as it was requested.
+        return number_of_bugs if args.status_bugs else 0
+
+
+def need_analyzer(args):
+    """ Check the intent of the build command.
+
+    When static analyzer run against project configure step, it should be
+    silent and no need to run the analyzer or generate report.
+
+    To run `scan-build` against the configure step might be necessary,
+    when compiler wrappers are used. That's the moment when build setup
+    check the compiler and capture the location for the build process. """
+
+    return len(args) and not re.search('configure|autogen', args[0])
+
+
+def prefix_with(constant, pieces):
+    """ From a sequence create another sequence where every second element
+    is from the original sequence and the odd elements are the prefix.
+
+    eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """
+
+    return [elem for piece in pieces for elem in [constant, piece]]
+
+
+def get_ctu_config_from_args(args):
+    """ CTU configuration is created from the chosen phases and dir. """
+
+    return (
+        CtuConfig(collect=args.ctu_phases.collect,
+                  analyze=args.ctu_phases.analyze,
+                  dir=args.ctu_dir,
+                  extdef_map_cmd=args.extdef_map_cmd)
+        if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir')
+        else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd=''))
+
+
+def get_ctu_config_from_json(ctu_conf_json):
+    """ CTU configuration is created from the chosen phases and dir. """
+
+    ctu_config = json.loads(ctu_conf_json)
+    # Recover namedtuple from json when coming from analyze-cc or analyze-c++
+    return CtuConfig(collect=ctu_config[0],
+                     analyze=ctu_config[1],
+                     dir=ctu_config[2],
+                     extdef_map_cmd=ctu_config[3])
+
+
+def create_global_ctu_extdef_map(extdef_map_lines):
+    """ Takes iterator of individual external definition maps and creates a
+    global map keeping only unique names. We leave conflicting names out of
+    CTU.
+
+    :param extdef_map_lines: Contains the id of a definition (mangled name) and
+    the originating source (the corresponding AST file) name.
+    :type extdef_map_lines: Iterator of str.
+    :returns: Mangled name - AST file pairs.
+    :rtype: List of (str, str) tuples.
+    """
+
+    mangled_to_asts = defaultdict(set)
+
+    for line in extdef_map_lines:
+        mangled_name, ast_file = line.strip().split(' ', 1)
+        mangled_to_asts[mangled_name].add(ast_file)
+
+    mangled_ast_pairs = []
+
+    for mangled_name, ast_files in mangled_to_asts.items():
+        if len(ast_files) == 1:
+            mangled_ast_pairs.append((mangled_name, next(iter(ast_files))))
+
+    return mangled_ast_pairs
+
+
+def merge_ctu_extdef_maps(ctudir):
+    """ Merge individual external definition maps into a global one.
+
+    As the collect phase runs parallel on multiple threads, all compilation
+    units are separately mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER.
+    These definition maps contain the mangled names and the source
+    (AST generated from the source) which had their definition.
+    These files should be merged at the end into a global map file:
+    CTU_EXTDEF_MAP_FILENAME."""
+
+    def generate_extdef_map_lines(extdefmap_dir):
+        """ Iterate over all lines of input files in a determined order. """
+
+        files = glob.glob(os.path.join(extdefmap_dir, '*'))
+        files.sort()
+        for filename in files:
+            with open(filename, 'r') as in_file:
+                for line in in_file:
+                    yield line
+
+    def write_global_map(arch, mangled_ast_pairs):
+        """ Write (mangled name, ast file) pairs into final file. """
+
+        extern_defs_map_file = os.path.join(ctudir, arch,
+                                           CTU_EXTDEF_MAP_FILENAME)
+        with open(extern_defs_map_file, 'w') as out_file:
+            for mangled_name, ast_file in mangled_ast_pairs:
+                out_file.write('%s %s\n' % (mangled_name, ast_file))
+
+    triple_arches = glob.glob(os.path.join(ctudir, '*'))
+    for triple_path in triple_arches:
+        if os.path.isdir(triple_path):
+            triple_arch = os.path.basename(triple_path)
+            extdefmap_dir = os.path.join(ctudir, triple_arch,
+                                     CTU_TEMP_DEFMAP_FOLDER)
+
+            extdef_map_lines = generate_extdef_map_lines(extdefmap_dir)
+            mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines)
+            write_global_map(triple_arch, mangled_ast_pairs)
+
+            # Remove all temporary files
+            shutil.rmtree(extdefmap_dir, ignore_errors=True)
+
+
+def run_analyzer_parallel(args):
+    """ Runs the analyzer against the given compilation database. """
+
+    def exclude(filename):
+        """ Return true when any excluded directory prefix the filename. """
+        return any(re.match(r'^' + directory, filename)
+                   for directory in args.excludes)
+
+    consts = {
+        'clang': args.clang,
+        'output_dir': args.output,
+        'output_format': args.output_format,
+        'output_failures': args.output_failures,
+        'direct_args': analyzer_params(args),
+        'force_debug': args.force_debug,
+        'ctu': get_ctu_config_from_args(args)
+    }
+
+    logging.debug('run analyzer against compilation database')
+    with open(args.cdb, 'r') as handle:
+        generator = (dict(cmd, **consts)
+                     for cmd in json.load(handle) if not exclude(cmd['file']))
+        # when verbose output requested execute sequentially
+        pool = multiprocessing.Pool(1 if args.verbose > 2 else None)
+        for current in pool.imap_unordered(run, generator):
+            if current is not None:
+                # display error message from the static analyzer
+                for line in current['error_output']:
+                    logging.info(line.rstrip())
+        pool.close()
+        pool.join()
+
+
+def govern_analyzer_runs(args):
+    """ Governs multiple runs in CTU mode or runs once in normal mode. """
+
+    ctu_config = get_ctu_config_from_args(args)
+    # If we do a CTU collect (1st phase) we remove all previous collection
+    # data first.
+    if ctu_config.collect:
+        shutil.rmtree(ctu_config.dir, ignore_errors=True)
+
+    # If the user asked for a collect (1st) and analyze (2nd) phase, we do an
+    # all-in-one run where we deliberately remove collection data before and
+    # also after the run. If the user asks only for a single phase data is
+    # left so multiple analyze runs can use the same data gathered by a single
+    # collection run.
+    if ctu_config.collect and ctu_config.analyze:
+        # CTU strings are coming from args.ctu_dir and extdef_map_cmd,
+        # so we can leave it empty
+        args.ctu_phases = CtuConfig(collect=True, analyze=False,
+                                    dir='', extdef_map_cmd='')
+        run_analyzer_parallel(args)
+        merge_ctu_extdef_maps(ctu_config.dir)
+        args.ctu_phases = CtuConfig(collect=False, analyze=True,
+                                    dir='', extdef_map_cmd='')
+        run_analyzer_parallel(args)
+        shutil.rmtree(ctu_config.dir, ignore_errors=True)
+    else:
+        # Single runs (collect or analyze) are launched from here.
+        run_analyzer_parallel(args)
+        if ctu_config.collect:
+            merge_ctu_extdef_maps(ctu_config.dir)
+
+
+def setup_environment(args):
+    """ Set up environment for build command to interpose compiler wrapper. """
+
+    environment = dict(os.environ)
+    environment.update(wrapper_environment(args))
+    environment.update({
+        'CC': COMPILER_WRAPPER_CC,
+        'CXX': COMPILER_WRAPPER_CXX,
+        'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '',
+        'ANALYZE_BUILD_REPORT_DIR': args.output,
+        'ANALYZE_BUILD_REPORT_FORMAT': args.output_format,
+        'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '',
+        'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)),
+        'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '',
+        'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args))
+    })
+    return environment
+
+
+@command_entry_point
+def analyze_compiler_wrapper():
+    """ Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """
+
+    return compiler_wrapper(analyze_compiler_wrapper_impl)
+
+
+def analyze_compiler_wrapper_impl(result, execution):
+    """ Implements analyzer compiler wrapper functionality. """
+
+    # don't run analyzer when compilation fails. or when it's not requested.
+    if result or not os.getenv('ANALYZE_BUILD_CLANG'):
+        return
+
+    # check is it a compilation?
+    compilation = split_command(execution.cmd)
+    if compilation is None:
+        return
+    # collect the needed parameters from environment, crash when missing
+    parameters = {
+        'clang': os.getenv('ANALYZE_BUILD_CLANG'),
+        'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),
+        'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),
+        'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),
+        'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',
+                                 '').split(' '),
+        'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),
+        'directory': execution.cwd,
+        'command': [execution.cmd[0], '-c'] + compilation.flags,
+        'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU'))
+    }
+    # call static analyzer against the compilation
+    for source in compilation.files:
+        parameters.update({'file': source})
+        logging.debug('analyzer parameters %s', parameters)
+        current = run(parameters)
+        # display error message from the static analyzer
+        if current is not None:
+            for line in current['error_output']:
+                logging.info(line.rstrip())
+
+
[email protected]
+def report_directory(hint, keep):
+    """ Responsible for the report directory.
+
+    hint -- could specify the parent directory of the output directory.
+    keep -- a boolean value to keep or delete the empty report directory. """
+
+    stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-'
+    stamp = datetime.datetime.now().strftime(stamp_format)
+    parent_dir = os.path.abspath(hint)
+    if not os.path.exists(parent_dir):
+        os.makedirs(parent_dir)
+    name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir)
+
+    logging.info('Report directory created: %s', name)
+
+    try:
+        yield name
+    finally:
+        if os.listdir(name):
+            msg = "Run 'scan-view %s' to examine bug reports."
+            keep = True
+        else:
+            if keep:
+                msg = "Report directory '%s' contains no report, but kept."
+            else:
+                msg = "Removing directory '%s' because it contains no report."
+        logging.warning(msg, name)
+
+        if not keep:
+            os.rmdir(name)
+
+
+def analyzer_params(args):
+    """ A group of command line arguments can mapped to command
+    line arguments of the analyzer. This method generates those. """
+
+    result = []
+
+    if args.store_model:
+        result.append('-analyzer-store={0}'.format(args.store_model))
+    if args.constraints_model:
+        result.append('-analyzer-constraints={0}'.format(
+            args.constraints_model))
+    if args.internal_stats:
+        result.append('-analyzer-stats')
+    if args.analyze_headers:
+        result.append('-analyzer-opt-analyze-headers')
+    if args.stats:
+        result.append('-analyzer-checker=debug.Stats')
+    if args.maxloop:
+        result.extend(['-analyzer-max-loop', str(args.maxloop)])
+    if args.output_format:
+        result.append('-analyzer-output={0}'.format(args.output_format))
+    if args.analyzer_config:
+        result.extend(['-analyzer-config', args.analyzer_config])
+    if args.verbose >= 4:
+        result.append('-analyzer-display-progress')
+    if args.plugins:
+        result.extend(prefix_with('-load', args.plugins))
+    if args.enable_checker:
+        checkers = ','.join(args.enable_checker)
+        result.extend(['-analyzer-checker', checkers])
+    if args.disable_checker:
+        checkers = ','.join(args.disable_checker)
+        result.extend(['-analyzer-disable-checker', checkers])
+
+    return prefix_with('-Xclang', result)
+
+
+def require(required):
+    """ Decorator for checking the required values in state.
+
+    It checks the required attributes in the passed state and stop when
+    any of those is missing. """
+
+    def decorator(function):
+        @functools.wraps(function)
+        def wrapper(*args, **kwargs):
+            for key in required:
+                if key not in args[0]:
+                    raise KeyError('{0} not passed to {1}'.format(
+                        key, function.__name__))
+
+            return function(*args, **kwargs)
+
+        return wrapper
+
+    return decorator
+
+
+@require(['command',  # entry from compilation database
+          'directory',  # entry from compilation database
+          'file',  # entry from compilation database
+          'clang',  # clang executable name (and path)
+          'direct_args',  # arguments from command line
+          'force_debug',  # kill non debug macros
+          'output_dir',  # where generated report files shall go
+          'output_format',  # it's 'plist', 'html', both or plist-multi-file
+          'output_failures',  # generate crash reports or not
+          'ctu'])  # ctu control options
+def run(opts):
+    """ Entry point to run (or not) static analyzer against a single entry
+    of the compilation database.
+
+    This complex task is decomposed into smaller methods which are calling
+    each other in chain. If the analyzis is not possible the given method
+    just return and break the chain.
+
+    The passed parameter is a python dictionary. Each method first check
+    that the needed parameters received. (This is done by the 'require'
+    decorator. It's like an 'assert' to check the contract between the
+    caller and the called method.) """
+
+    try:
+        command = opts.pop('command')
+        command = command if isinstance(command, list) else decode(command)
+        logging.debug("Run analyzer against '%s'", command)
+        opts.update(classify_parameters(command))
+
+        return arch_check(opts)
+    except Exception:
+        logging.error("Problem occurred during analyzis.", exc_info=1)
+        return None
+
+
+@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',
+          'error_output', 'exit_code'])
+def report_failure(opts):
+    """ Create report when analyzer failed.
+
+    The major report is the preprocessor output. The output filename generated
+    randomly. The compiler output also captured into '.stderr.txt' file.
+    And some more execution context also saved into '.info.txt' file. """
+
+    def extension():
+        """ Generate preprocessor file extension. """
+
+        mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}
+        return mapping.get(opts['language'], '.i')
+
+    def destination():
+        """ Creates failures directory if not exits yet. """
+
+        failures_dir = os.path.join(opts['output_dir'], 'failures')
+        if not os.path.isdir(failures_dir):
+            os.makedirs(failures_dir)
+        return failures_dir
+
+    # Classify error type: when Clang terminated by a signal it's a 'Crash'.
+    # (python subprocess Popen.returncode is negative when child terminated
+    # by signal.) Everything else is 'Other Error'.
+    error = 'crash' if opts['exit_code'] < 0 else 'other_error'
+    # Create preprocessor output file name. (This is blindly following the
+    # Perl implementation.)
+    (handle, name) = tempfile.mkstemp(suffix=extension(),
+                                      prefix='clang_' + error + '_',
+                                      dir=destination())
+    os.close(handle)
+    # Execute Clang again, but run the syntax check only.
+    cwd = opts['directory']
+    cmd = get_arguments(
+        [opts['clang'], '-fsyntax-only', '-E'
+         ] + opts['flags'] + [opts['file'], '-o', name], cwd)
+    run_command(cmd, cwd=cwd)
+    # write general information about the crash
+    with open(name + '.info.txt', 'w') as handle:
+        handle.write(opts['file'] + os.linesep)
+        handle.write(error.title().replace('_', ' ') + os.linesep)
+        handle.write(' '.join(cmd) + os.linesep)
+        handle.write(' '.join(os.uname()) + os.linesep)
+        handle.write(get_version(opts['clang']))
+        handle.close()
+    # write the captured output too
+    with open(name + '.stderr.txt', 'w') as handle:
+        handle.writelines(opts['error_output'])
+        handle.close()
+
+
+@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',
+          'output_format'])
+def run_analyzer(opts, continuation=report_failure):
+    """ It assembles the analysis command line and executes it. Capture the
+    output of the analysis and returns with it. If failure reports are
+    requested, it calls the continuation to generate it. """
+
+    def target():
+        """ Creates output file name for reports. """
+        if opts['output_format'] in {
+                'plist',
+                'plist-html',
+                'plist-multi-file'}:
+            (handle, name) = tempfile.mkstemp(prefix='report-',
+                                              suffix='.plist',
+                                              dir=opts['output_dir'])
+            os.close(handle)
+            return name
+        return opts['output_dir']
+
+    try:
+        cwd = opts['directory']
+        cmd = get_arguments([opts['clang'], '--analyze'] +
+                            opts['direct_args'] + opts['flags'] +
+                            [opts['file'], '-o', target()],
+                            cwd)
+        output = run_command(cmd, cwd=cwd)
+        return {'error_output': output, 'exit_code': 0}
+    except subprocess.CalledProcessError as ex:
+        result = {'error_output': ex.output, 'exit_code': ex.returncode}
+        if opts.get('output_failures', False):
+            opts.update(result)
+            continuation(opts)
+        return result
+
+
+def extdef_map_list_src_to_ast(extdef_src_list):
+    """ Turns textual external definition map list with source files into an
+    external definition map list with ast files. """
+
+    extdef_ast_list = []
+    for extdef_src_txt in extdef_src_list:
+        mangled_name, path = extdef_src_txt.split(" ", 1)
+        # Normalize path on windows as well
+        path = os.path.splitdrive(path)[1]
+        # Make relative path out of absolute
+        path = path[1:] if path[0] == os.sep else path
+        ast_path = os.path.join("ast", path + ".ast")
+        extdef_ast_list.append(mangled_name + " " + ast_path)
+    return extdef_ast_list
+
+
+@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu'])
+def ctu_collect_phase(opts):
+    """ Preprocess source by generating all data needed by CTU analysis. """
+
+    def generate_ast(triple_arch):
+        """ Generates ASTs for the current compilation command. """
+
+        args = opts['direct_args'] + opts['flags']
+        ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast',
+                                       os.path.realpath(opts['file'])[1:] +
+                                       '.ast')
+        ast_path = os.path.abspath(ast_joined_path)
+        ast_dir = os.path.dirname(ast_path)
+        if not os.path.isdir(ast_dir):
+            try:
+                os.makedirs(ast_dir)
+            except OSError:
+                # In case an other process already created it.
+                pass
+        ast_command = [opts['clang'], '-emit-ast']
+        ast_command.extend(args)
+        ast_command.append('-w')
+        ast_command.append(opts['file'])
+        ast_command.append('-o')
+        ast_command.append(ast_path)
+        logging.debug("Generating AST using '%s'", ast_command)
+        run_command(ast_command, cwd=opts['directory'])
+
+    def map_extdefs(triple_arch):
+        """ Generate external definition map file for the current source. """
+
+        args = opts['direct_args'] + opts['flags']
+        extdefmap_command = [opts['ctu'].extdef_map_cmd]
+        extdefmap_command.append(opts['file'])
+        extdefmap_command.append('--')
+        extdefmap_command.extend(args)
+        logging.debug("Generating external definition map using '%s'",
+                      extdefmap_command)
+        extdef_src_list = run_command(extdefmap_command, cwd=opts['directory'])
+        extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list)
+        extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch,
+                                             CTU_TEMP_DEFMAP_FOLDER)
+        if not os.path.isdir(extern_defs_map_folder):
+            try:
+                os.makedirs(extern_defs_map_folder)
+            except OSError:
+                # In case an other process already created it.
+                pass
+        if extdef_ast_list:
+            with tempfile.NamedTemporaryFile(mode='w',
+                                             dir=extern_defs_map_folder,
+                                             delete=False) as out_file:
+                out_file.write("\n".join(extdef_ast_list) + "\n")
+
+    cwd = opts['directory']
+    cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \
+        + [opts['file']]
+    triple_arch = get_triple_arch(cmd, cwd)
+    generate_ast(triple_arch)
+    map_extdefs(triple_arch)
+
+
+@require(['ctu'])
+def dispatch_ctu(opts, continuation=run_analyzer):
+    """ Execute only one phase of 2 phases of CTU if needed. """
+
+    ctu_config = opts['ctu']
+
+    if ctu_config.collect or ctu_config.analyze:
+        assert ctu_config.collect != ctu_config.analyze
+        if ctu_config.collect:
+            return ctu_collect_phase(opts)
+        if ctu_config.analyze:
+            cwd = opts['directory']
+            cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \
+                + opts['flags'] + [opts['file']]
+            triarch = get_triple_arch(cmd, cwd)
+            ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch),
+                           'experimental-enable-naive-ctu-analysis=true']
+            analyzer_options = prefix_with('-analyzer-config', ctu_options)
+            direct_options = prefix_with('-Xanalyzer', analyzer_options)
+            opts['direct_args'].extend(direct_options)
+
+    return continuation(opts)
+
+
+@require(['flags', 'force_debug'])
+def filter_debug_flags(opts, continuation=dispatch_ctu):
+    """ Filter out nondebug macros when requested. """
+
+    if opts.pop('force_debug'):
+        # lazy implementation just append an undefine macro at the end
+        opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
+
+    return continuation(opts)
+
+
+@require(['language', 'compiler', 'file', 'flags'])
+def language_check(opts, continuation=filter_debug_flags):
+    """ Find out the language from command line parameters or file name
+    extension. The decision also influenced by the compiler invocation. """
+
+    accepted = frozenset({
+        'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
+        'c++-cpp-output', 'objective-c-cpp-output'
+    })
+
+    # language can be given as a parameter...
+    language = opts.pop('language')
+    compiler = opts.pop('compiler')
+    # ... or find out from source file extension
+    if language is None and compiler is not None:
+        language = classify_source(opts['file'], compiler == 'c')
+
+    if language is None:
+        logging.debug('skip analysis, language not known')
+        return None
+    elif language not in accepted:
+        logging.debug('skip analysis, language not supported')
+        return None
+    else:
+        logging.debug('analysis, language: %s', language)
+        opts.update({'language': language,
+                     'flags': ['-x', language] + opts['flags']})
+        return continuation(opts)
+
+
+@require(['arch_list', 'flags'])
+def arch_check(opts, continuation=language_check):
+    """ Do run analyzer through one of the given architectures. """
+
+    disabled = frozenset({'ppc', 'ppc64'})
+
+    received_list = opts.pop('arch_list')
+    if received_list:
+        # filter out disabled architectures and -arch switches
+        filtered_list = [a for a in received_list if a not in disabled]
+        if filtered_list:
+            # There should be only one arch given (or the same multiple
+            # times). If there are multiple arch are given and are not
+            # the same, those should not change the pre-processing step.
+            # But that's the only pass we have before run the analyzer.
+            current = filtered_list.pop()
+            logging.debug('analysis, on arch: %s', current)
+
+            opts.update({'flags': ['-arch', current] + opts['flags']})
+            return continuation(opts)
+        else:
+            logging.debug('skip analysis, found not supported arch')
+            return None
+    else:
+        logging.debug('analysis, on default arch')
+        return continuation(opts)
+
+
+# To have good results from static analyzer certain compiler options shall be
+# omitted. The compiler flag filtering only affects the static analyzer run.
+#
+# Keys are the option name, value number of options to skip
+IGNORED_FLAGS = {
+    '-c': 0,  # compile option will be overwritten
+    '-fsyntax-only': 0,  # static analyzer option will be overwritten
+    '-o': 1,  # will set up own output file
+    # flags below are inherited from the perl implementation.
+    '-g': 0,
+    '-save-temps': 0,
+    '-install_name': 1,
+    '-exported_symbols_list': 1,
+    '-current_version': 1,
+    '-compatibility_version': 1,
+    '-init': 1,
+    '-e': 1,
+    '-seg1addr': 1,
+    '-bundle_loader': 1,
+    '-multiply_defined': 1,
+    '-sectorder': 3,
+    '--param': 1,
+    '--serialize-diagnostics': 1
+}
+
+
+def classify_parameters(command):
+    """ Prepare compiler flags (filters some and add others) and take out
+    language (-x) and architecture (-arch) flags for future processing. """
+
+    result = {
+        'flags': [],  # the filtered compiler flags
+        'arch_list': [],  # list of architecture flags
+        'language': None,  # compilation language, None, if not specified
+        'compiler': compiler_language(command)  # 'c' or 'c++'
+    }
+
+    # iterate on the compile options
+    args = iter(command[1:])
+    for arg in args:
+        # take arch flags into a separate basket
+        if arg == '-arch':
+            result['arch_list'].append(next(args))
+        # take language
+        elif arg == '-x':
+            result['language'] = next(args)
+        # parameters which looks source file are not flags
+        elif re.match(r'^[^-].+', arg) and classify_source(arg):
+            pass
+        # ignore some flags
+        elif arg in IGNORED_FLAGS:
+            count = IGNORED_FLAGS[arg]
+            for _ in range(count):
+                next(args)
+        # we don't care about extra warnings, but we should suppress ones
+        # that we don't want to see.
+        elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
+            pass
+        # and consider everything else as compilation flag.
+        else:
+            result['flags'].append(arg)
+
+    return result
diff --git a/src/llvm-project/clang/tools/scan-build-py/libscanbuild/arguments.py b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/arguments.py
new file mode 100644
index 0000000..58c56d2
--- /dev/null
+++ b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/arguments.py
@@ -0,0 +1,502 @@
+# -*- coding: utf-8 -*-
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+""" This module parses and validates arguments for command-line interfaces.
+
+It uses argparse module to create the command line parser. (This library is
+in the standard python library since 3.2 and backported to 2.7, but not
+earlier.)
+
+It also implements basic validation methods, related to the command.
+Validations are mostly calling specific help methods, or mangling values.
+"""
+from __future__ import absolute_import, division, print_function
+
+import os
+import sys
+import argparse
+import logging
+import tempfile
+from libscanbuild import reconfigure_logging, CtuConfig
+from libscanbuild.clang import get_checkers, is_ctu_capable
+
+__all__ = ['parse_args_for_intercept_build', 'parse_args_for_analyze_build',
+           'parse_args_for_scan_build']
+
+
+def parse_args_for_intercept_build():
+    """ Parse and validate command-line arguments for intercept-build. """
+
+    parser = create_intercept_parser()
+    args = parser.parse_args()
+
+    reconfigure_logging(args.verbose)
+    logging.debug('Raw arguments %s', sys.argv)
+
+    # short validation logic
+    if not args.build:
+        parser.error(message='missing build command')
+
+    logging.debug('Parsed arguments: %s', args)
+    return args
+
+
+def parse_args_for_analyze_build():
+    """ Parse and validate command-line arguments for analyze-build. """
+
+    from_build_command = False
+    parser = create_analyze_parser(from_build_command)
+    args = parser.parse_args()
+
+    reconfigure_logging(args.verbose)
+    logging.debug('Raw arguments %s', sys.argv)
+
+    normalize_args_for_analyze(args, from_build_command)
+    validate_args_for_analyze(parser, args, from_build_command)
+    logging.debug('Parsed arguments: %s', args)
+    return args
+
+
+def parse_args_for_scan_build():
+    """ Parse and validate command-line arguments for scan-build. """
+
+    from_build_command = True
+    parser = create_analyze_parser(from_build_command)
+    args = parser.parse_args()
+
+    reconfigure_logging(args.verbose)
+    logging.debug('Raw arguments %s', sys.argv)
+
+    normalize_args_for_analyze(args, from_build_command)
+    validate_args_for_analyze(parser, args, from_build_command)
+    logging.debug('Parsed arguments: %s', args)
+    return args
+
+
+def normalize_args_for_analyze(args, from_build_command):
+    """ Normalize parsed arguments for analyze-build and scan-build.
+
+    :param args: Parsed argument object. (Will be mutated.)
+    :param from_build_command: Boolean value tells is the command suppose
+    to run the analyzer against a build command or a compilation db. """
+
+    # make plugins always a list. (it might be None when not specified.)
+    if args.plugins is None:
+        args.plugins = []
+
+    # make exclude directory list unique and absolute.
+    uniq_excludes = set(os.path.abspath(entry) for entry in args.excludes)
+    args.excludes = list(uniq_excludes)
+
+    # because shared codes for all tools, some common used methods are
+    # expecting some argument to be present. so, instead of query the args
+    # object about the presence of the flag, we fake it here. to make those
+    # methods more readable. (it's an arguable choice, took it only for those
+    # which have good default value.)
+    if from_build_command:
+        # add cdb parameter invisibly to make report module working.
+        args.cdb = 'compile_commands.json'
+
+    # Make ctu_dir an abspath as it is needed inside clang
+    if not from_build_command and hasattr(args, 'ctu_phases') \
+            and hasattr(args.ctu_phases, 'dir'):
+        args.ctu_dir = os.path.abspath(args.ctu_dir)
+
+
+def validate_args_for_analyze(parser, args, from_build_command):
+    """ Command line parsing is done by the argparse module, but semantic
+    validation still needs to be done. This method is doing it for
+    analyze-build and scan-build commands.
+
+    :param parser: The command line parser object.
+    :param args: Parsed argument object.
+    :param from_build_command: Boolean value tells is the command suppose
+    to run the analyzer against a build command or a compilation db.
+    :return: No return value, but this call might throw when validation
+    fails. """
+
+    if args.help_checkers_verbose:
+        print_checkers(get_checkers(args.clang, args.plugins))
+        parser.exit(status=0)
+    elif args.help_checkers:
+        print_active_checkers(get_checkers(args.clang, args.plugins))
+        parser.exit(status=0)
+    elif from_build_command and not args.build:
+        parser.error(message='missing build command')
+    elif not from_build_command and not os.path.exists(args.cdb):
+        parser.error(message='compilation database is missing')
+
+    # If the user wants CTU mode
+    if not from_build_command and hasattr(args, 'ctu_phases') \
+            and hasattr(args.ctu_phases, 'dir'):
+        # If CTU analyze_only, the input directory should exist
+        if args.ctu_phases.analyze and not args.ctu_phases.collect \
+                and not os.path.exists(args.ctu_dir):
+            parser.error(message='missing CTU directory')
+        # Check CTU capability via checking clang-extdef-mapping
+        if not is_ctu_capable(args.extdef_map_cmd):
+            parser.error(message="""This version of clang does not support CTU
+            functionality or clang-extdef-mapping command not found.""")
+
+
+def create_intercept_parser():
+    """ Creates a parser for command-line arguments to 'intercept'. """
+
+    parser = create_default_parser()
+    parser_add_cdb(parser)
+
+    parser_add_prefer_wrapper(parser)
+    parser_add_compilers(parser)
+
+    advanced = parser.add_argument_group('advanced options')
+    group = advanced.add_mutually_exclusive_group()
+    group.add_argument(
+        '--append',
+        action='store_true',
+        help="""Extend existing compilation database with new entries.
+        Duplicate entries are detected and not present in the final output.
+        The output is not continuously updated, it's done when the build
+        command finished. """)
+
+    parser.add_argument(
+        dest='build', nargs=argparse.REMAINDER, help="""Command to run.""")
+    return parser
+
+
+def create_analyze_parser(from_build_command):
+    """ Creates a parser for command-line arguments to 'analyze'. """
+
+    parser = create_default_parser()
+
+    if from_build_command:
+        parser_add_prefer_wrapper(parser)
+        parser_add_compilers(parser)
+
+        parser.add_argument(
+            '--intercept-first',
+            action='store_true',
+            help="""Run the build commands first, intercept compiler
+            calls and then run the static analyzer afterwards.
+            Generally speaking it has better coverage on build commands.
+            With '--override-compiler' it use compiler wrapper, but does
+            not run the analyzer till the build is finished.""")
+    else:
+        parser_add_cdb(parser)
+
+    parser.add_argument(
+        '--status-bugs',
+        action='store_true',
+        help="""The exit status of '%(prog)s' is the same as the executed
+        build command. This option ignores the build exit status and sets to
+        be non zero if it found potential bugs or zero otherwise.""")
+    parser.add_argument(
+        '--exclude',
+        metavar='<directory>',
+        dest='excludes',
+        action='append',
+        default=[],
+        help="""Do not run static analyzer against files found in this
+        directory. (You can specify this option multiple times.)
+        Could be useful when project contains 3rd party libraries.""")
+
+    output = parser.add_argument_group('output control options')
+    output.add_argument(
+        '--output',
+        '-o',
+        metavar='<path>',
+        default=tempfile.gettempdir(),
+        help="""Specifies the output directory for analyzer reports.
+        Subdirectory will be created if default directory is targeted.""")
+    output.add_argument(
+        '--keep-empty',
+        action='store_true',
+        help="""Don't remove the build results directory even if no issues
+        were reported.""")
+    output.add_argument(
+        '--html-title',
+        metavar='<title>',
+        help="""Specify the title used on generated HTML pages.
+        If not specified, a default title will be used.""")
+    format_group = output.add_mutually_exclusive_group()
+    format_group.add_argument(
+        '--plist',
+        '-plist',
+        dest='output_format',
+        const='plist',
+        default='html',
+        action='store_const',
+        help="""Cause the results as a set of .plist files.""")
+    format_group.add_argument(
+        '--plist-html',
+        '-plist-html',
+        dest='output_format',
+        const='plist-html',
+        default='html',
+        action='store_const',
+        help="""Cause the results as a set of .html and .plist files.""")
+    format_group.add_argument(
+        '--plist-multi-file',
+        '-plist-multi-file',
+        dest='output_format',
+        const='plist-multi-file',
+        default='html',
+        action='store_const',
+        help="""Cause the results as a set of .plist files with extra
+        information on related files.""")
+
+    advanced = parser.add_argument_group('advanced options')
+    advanced.add_argument(
+        '--use-analyzer',
+        metavar='<path>',
+        dest='clang',
+        default='clang',
+        help="""'%(prog)s' uses the 'clang' executable relative to itself for
+        static analysis. One can override this behavior with this option by
+        using the 'clang' packaged with Xcode (on OS X) or from the PATH.""")
+    advanced.add_argument(
+        '--no-failure-reports',
+        '-no-failure-reports',
+        dest='output_failures',
+        action='store_false',
+        help="""Do not create a 'failures' subdirectory that includes analyzer
+        crash reports and preprocessed source files.""")
+    parser.add_argument(
+        '--analyze-headers',
+        action='store_true',
+        help="""Also analyze functions in #included files. By default, such
+        functions are skipped unless they are called by functions within the
+        main source file.""")
+    advanced.add_argument(
+        '--stats',
+        '-stats',
+        action='store_true',
+        help="""Generates visitation statistics for the project.""")
+    advanced.add_argument(
+        '--internal-stats',
+        action='store_true',
+        help="""Generate internal analyzer statistics.""")
+    advanced.add_argument(
+        '--maxloop',
+        '-maxloop',
+        metavar='<loop count>',
+        type=int,
+        help="""Specify the number of times a block can be visited before
+        giving up. Increase for more comprehensive coverage at a cost of
+        speed.""")
+    advanced.add_argument(
+        '--store',
+        '-store',
+        metavar='<model>',
+        dest='store_model',
+        choices=['region', 'basic'],
+        help="""Specify the store model used by the analyzer. 'region'
+        specifies a field- sensitive store model. 'basic' which is far less
+        precise but can more quickly analyze code. 'basic' was the default
+        store model for checker-0.221 and earlier.""")
+    advanced.add_argument(
+        '--constraints',
+        '-constraints',
+        metavar='<model>',
+        dest='constraints_model',
+        choices=['range', 'basic'],
+        help="""Specify the constraint engine used by the analyzer. Specifying
+        'basic' uses a simpler, less powerful constraint model used by
+        checker-0.160 and earlier.""")
+    advanced.add_argument(
+        '--analyzer-config',
+        '-analyzer-config',
+        metavar='<options>',
+        help="""Provide options to pass through to the analyzer's
+        -analyzer-config flag. Several options are separated with comma:
+        'key1=val1,key2=val2'
+
+        Available options:
+            stable-report-filename=true or false (default)
+
+        Switch the page naming to:
+        report-<filename>-<function/method name>-<id>.html
+        instead of report-XXXXXX.html""")
+    advanced.add_argument(
+        '--force-analyze-debug-code',
+        dest='force_debug',
+        action='store_true',
+        help="""Tells analyzer to enable assertions in code even if they were
+        disabled during compilation, enabling more precise results.""")
+
+    plugins = parser.add_argument_group('checker options')
+    plugins.add_argument(
+        '--load-plugin',
+        '-load-plugin',
+        metavar='<plugin library>',
+        dest='plugins',
+        action='append',
+        help="""Loading external checkers using the clang plugin interface.""")
+    plugins.add_argument(
+        '--enable-checker',
+        '-enable-checker',
+        metavar='<checker name>',
+        action=AppendCommaSeparated,
+        help="""Enable specific checker.""")
+    plugins.add_argument(
+        '--disable-checker',
+        '-disable-checker',
+        metavar='<checker name>',
+        action=AppendCommaSeparated,
+        help="""Disable specific checker.""")
+    plugins.add_argument(
+        '--help-checkers',
+        action='store_true',
+        help="""A default group of checkers is run unless explicitly disabled.
+        Exactly which checkers constitute the default group is a function of
+        the operating system in use. These can be printed with this flag.""")
+    plugins.add_argument(
+        '--help-checkers-verbose',
+        action='store_true',
+        help="""Print all available checkers and mark the enabled ones.""")
+
+    if from_build_command:
+        parser.add_argument(
+            dest='build', nargs=argparse.REMAINDER, help="""Command to run.""")
+    else:
+        ctu = parser.add_argument_group('cross translation unit analysis')
+        ctu_mutex_group = ctu.add_mutually_exclusive_group()
+        ctu_mutex_group.add_argument(
+            '--ctu',
+            action='store_const',
+            const=CtuConfig(collect=True, analyze=True,
+                            dir='', extdef_map_cmd=''),
+            dest='ctu_phases',
+            help="""Perform cross translation unit (ctu) analysis (both collect
+            and analyze phases) using default <ctu-dir> for temporary output.
+            At the end of the analysis, the temporary directory is removed.""")
+        ctu.add_argument(
+            '--ctu-dir',
+            metavar='<ctu-dir>',
+            dest='ctu_dir',
+            default='ctu-dir',
+            help="""Defines the temporary directory used between ctu
+            phases.""")
+        ctu_mutex_group.add_argument(
+            '--ctu-collect-only',
+            action='store_const',
+            const=CtuConfig(collect=True, analyze=False,
+                            dir='', extdef_map_cmd=''),
+            dest='ctu_phases',
+            help="""Perform only the collect phase of ctu.
+            Keep <ctu-dir> for further use.""")
+        ctu_mutex_group.add_argument(
+            '--ctu-analyze-only',
+            action='store_const',
+            const=CtuConfig(collect=False, analyze=True,
+                            dir='', extdef_map_cmd=''),
+            dest='ctu_phases',
+            help="""Perform only the analyze phase of ctu. <ctu-dir> should be
+            present and will not be removed after analysis.""")
+        ctu.add_argument(
+            '--use-extdef-map-cmd',
+            metavar='<path>',
+            dest='extdef_map_cmd',
+            default='clang-extdef-mapping',
+            help="""'%(prog)s' uses the 'clang-extdef-mapping' executable
+            relative to itself for generating external definition maps for
+            static analysis. One can override this behavior with this option
+            by using the 'clang-extdef-mapping' packaged with Xcode (on OS X)
+            or from the PATH.""")
+    return parser
+
+
+def create_default_parser():
+    """ Creates command line parser for all build wrapper commands. """
+
+    parser = argparse.ArgumentParser(
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+
+    parser.add_argument(
+        '--verbose',
+        '-v',
+        action='count',
+        default=0,
+        help="""Enable verbose output from '%(prog)s'. A second, third and
+        fourth flags increases verbosity.""")
+    return parser
+
+
+def parser_add_cdb(parser):
+    parser.add_argument(
+        '--cdb',
+        metavar='<file>',
+        default="compile_commands.json",
+        help="""The JSON compilation database.""")
+
+
+def parser_add_prefer_wrapper(parser):
+    parser.add_argument(
+        '--override-compiler',
+        action='store_true',
+        help="""Always resort to the compiler wrapper even when better
+        intercept methods are available.""")
+
+
+def parser_add_compilers(parser):
+    parser.add_argument(
+        '--use-cc',
+        metavar='<path>',
+        dest='cc',
+        default=os.getenv('CC', 'cc'),
+        help="""When '%(prog)s' analyzes a project by interposing a compiler
+        wrapper, which executes a real compiler for compilation and do other
+        tasks (record the compiler invocation). Because of this interposing,
+        '%(prog)s' does not know what compiler your project normally uses.
+        Instead, it simply overrides the CC environment variable, and guesses
+        your default compiler.
+
+        If you need '%(prog)s' to use a specific compiler for *compilation*
+        then you can use this option to specify a path to that compiler.""")
+    parser.add_argument(
+        '--use-c++',
+        metavar='<path>',
+        dest='cxx',
+        default=os.getenv('CXX', 'c++'),
+        help="""This is the same as "--use-cc" but for C++ code.""")
+
+
+class AppendCommaSeparated(argparse.Action):
+    """ argparse Action class to support multiple comma separated lists. """
+
+    def __call__(self, __parser, namespace, values, __option_string):
+        # getattr(obj, attr, default) does not really returns default but none
+        if getattr(namespace, self.dest, None) is None:
+            setattr(namespace, self.dest, [])
+        # once it's fixed we can use as expected
+        actual = getattr(namespace, self.dest)
+        actual.extend(values.split(','))
+        setattr(namespace, self.dest, actual)
+
+
+def print_active_checkers(checkers):
+    """ Print active checkers to stdout. """
+
+    for name in sorted(name for name, (_, active) in checkers.items()
+                       if active):
+        print(name)
+
+
+def print_checkers(checkers):
+    """ Print verbose checker help to stdout. """
+
+    print('')
+    print('available checkers:')
+    print('')
+    for name in sorted(checkers.keys()):
+        description, active = checkers[name]
+        prefix = '+' if active else ' '
+        if len(name) > 30:
+            print(' {0} {1}'.format(prefix, name))
+            print(' ' * 35 + description)
+        else:
+            print(' {0} {1: <30}  {2}'.format(prefix, name, description))
+    print('')
+    print('NOTE: "+" indicates that an analysis is enabled by default.')
+    print('')
diff --git a/src/llvm-project/clang/tools/scan-build-py/libscanbuild/clang.py b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/clang.py
new file mode 100644
index 0000000..0cbfdb6
--- /dev/null
+++ b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/clang.py
@@ -0,0 +1,179 @@
+# -*- coding: utf-8 -*-
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+""" This module is responsible for the Clang executable.
+
+Since Clang command line interface is so rich, but this project is using only
+a subset of that, it makes sense to create a function specific wrapper. """
+
+import subprocess
+import re
+from libscanbuild import run_command
+from libscanbuild.shell import decode
+
+__all__ = ['get_version', 'get_arguments', 'get_checkers', 'is_ctu_capable',
+           'get_triple_arch']
+
+# regex for activated checker
+ACTIVE_CHECKER_PATTERN = re.compile(r'^-analyzer-checker=(.*)$')
+
+
+def get_version(clang):
+    """ Returns the compiler version as string.
+
+    :param clang:   the compiler we are using
+    :return:        the version string printed to stderr """
+
+    output = run_command([clang, '-v'])
+    # the relevant version info is in the first line
+    return output[0]
+
+
+def get_arguments(command, cwd):
+    """ Capture Clang invocation.
+
+    :param command: the compilation command
+    :param cwd:     the current working directory
+    :return:        the detailed front-end invocation command """
+
+    cmd = command[:]
+    cmd.insert(1, '-###')
+
+    output = run_command(cmd, cwd=cwd)
+    # The relevant information is in the last line of the output.
+    # Don't check if finding last line fails, would throw exception anyway.
+    last_line = output[-1]
+    if re.search(r'clang(.*): error:', last_line):
+        raise Exception(last_line)
+    return decode(last_line)
+
+
+def get_active_checkers(clang, plugins):
+    """ Get the active checker list.
+
+    :param clang:   the compiler we are using
+    :param plugins: list of plugins which was requested by the user
+    :return:        list of checker names which are active
+
+    To get the default checkers we execute Clang to print how this
+    compilation would be called. And take out the enabled checker from the
+    arguments. For input file we specify stdin and pass only language
+    information. """
+
+    def get_active_checkers_for(language):
+        """ Returns a list of active checkers for the given language. """
+
+        load_args = [arg
+                     for plugin in plugins
+                     for arg in ['-Xclang', '-load', '-Xclang', plugin]]
+        cmd = [clang, '--analyze'] + load_args + ['-x', language, '-']
+        return [ACTIVE_CHECKER_PATTERN.match(arg).group(1)
+                for arg in get_arguments(cmd, '.')
+                if ACTIVE_CHECKER_PATTERN.match(arg)]
+
+    result = set()
+    for language in ['c', 'c++', 'objective-c', 'objective-c++']:
+        result.update(get_active_checkers_for(language))
+    return frozenset(result)
+
+
+def is_active(checkers):
+    """ Returns a method, which classifies the checker active or not,
+    based on the received checker name list. """
+
+    def predicate(checker):
+        """ Returns True if the given checker is active. """
+
+        return any(pattern.match(checker) for pattern in predicate.patterns)
+
+    predicate.patterns = [re.compile(r'^' + a + r'(\.|$)') for a in checkers]
+    return predicate
+
+
+def parse_checkers(stream):
+    """ Parse clang -analyzer-checker-help output.
+
+    Below the line 'CHECKERS:' are there the name description pairs.
+    Many of them are in one line, but some long named checker has the
+    name and the description in separate lines.
+
+    The checker name is always prefixed with two space character. The
+    name contains no whitespaces. Then followed by newline (if it's
+    too long) or other space characters comes the description of the
+    checker. The description ends with a newline character.
+
+    :param stream:  list of lines to parse
+    :return:        generator of tuples
+
+    (<checker name>, <checker description>) """
+
+    lines = iter(stream)
+    # find checkers header
+    for line in lines:
+        if re.match(r'^CHECKERS:', line):
+            break
+    # find entries
+    state = None
+    for line in lines:
+        if state and not re.match(r'^\s\s\S', line):
+            yield (state, line.strip())
+            state = None
+        elif re.match(r'^\s\s\S+$', line.rstrip()):
+            state = line.strip()
+        else:
+            pattern = re.compile(r'^\s\s(?P<key>\S*)\s*(?P<value>.*)')
+            match = pattern.match(line.rstrip())
+            if match:
+                current = match.groupdict()
+                yield (current['key'], current['value'])
+
+
+def get_checkers(clang, plugins):
+    """ Get all the available checkers from default and from the plugins.
+
+    :param clang:   the compiler we are using
+    :param plugins: list of plugins which was requested by the user
+    :return:        a dictionary of all available checkers and its status
+
+    {<checker name>: (<checker description>, <is active by default>)} """
+
+    load = [elem for plugin in plugins for elem in ['-load', plugin]]
+    cmd = [clang, '-cc1'] + load + ['-analyzer-checker-help']
+
+    lines = run_command(cmd)
+
+    is_active_checker = is_active(get_active_checkers(clang, plugins))
+
+    checkers = {
+        name: (description, is_active_checker(name))
+        for name, description in parse_checkers(lines)
+    }
+    if not checkers:
+        raise Exception('Could not query Clang for available checkers.')
+
+    return checkers
+
+
+def is_ctu_capable(extdef_map_cmd):
+    """ Detects if the current (or given) clang and external definition mapping
+    executables are CTU compatible. """
+
+    try:
+        run_command([extdef_map_cmd, '-version'])
+    except (OSError, subprocess.CalledProcessError):
+        return False
+    return True
+
+
+def get_triple_arch(command, cwd):
+    """Returns the architecture part of the target triple for the given
+    compilation command. """
+
+    cmd = get_arguments(command, cwd)
+    try:
+        separator = cmd.index("-triple")
+        return cmd[separator + 1]
+    except (IndexError, ValueError):
+        return ""
diff --git a/src/llvm-project/clang/tools/scan-build-py/libscanbuild/compilation.py b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/compilation.py
new file mode 100644
index 0000000..ef906fa
--- /dev/null
+++ b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/compilation.py
@@ -0,0 +1,141 @@
+# -*- coding: utf-8 -*-
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+""" This module is responsible for to parse a compiler invocation. """
+
+import re
+import os
+import collections
+
+__all__ = ['split_command', 'classify_source', 'compiler_language']
+
+# Ignored compiler options map for compilation database creation.
+# The map is used in `split_command` method. (Which does ignore and classify
+# parameters.) Please note, that these are not the only parameters which
+# might be ignored.
+#
+# Keys are the option name, value number of options to skip
+IGNORED_FLAGS = {
+    # compiling only flag, ignored because the creator of compilation
+    # database will explicitly set it.
+    '-c': 0,
+    # preprocessor macros, ignored because would cause duplicate entries in
+    # the output (the only difference would be these flags). this is actual
+    # finding from users, who suffered longer execution time caused by the
+    # duplicates.
+    '-MD': 0,
+    '-MMD': 0,
+    '-MG': 0,
+    '-MP': 0,
+    '-MF': 1,
+    '-MT': 1,
+    '-MQ': 1,
+    # linker options, ignored because for compilation database will contain
+    # compilation commands only. so, the compiler would ignore these flags
+    # anyway. the benefit to get rid of them is to make the output more
+    # readable.
+    '-static': 0,
+    '-shared': 0,
+    '-s': 0,
+    '-rdynamic': 0,
+    '-l': 1,
+    '-L': 1,
+    '-u': 1,
+    '-z': 1,
+    '-T': 1,
+    '-Xlinker': 1
+}
+
+# Known C/C++ compiler executable name patterns
+COMPILER_PATTERNS = frozenset([
+    re.compile(r'^(intercept-|analyze-|)c(c|\+\+)$'),
+    re.compile(r'^([^-]*-)*[mg](cc|\+\+)(-\d+(\.\d+){0,2})?$'),
+    re.compile(r'^([^-]*-)*clang(\+\+)?(-\d+(\.\d+){0,2})?$'),
+    re.compile(r'^llvm-g(cc|\+\+)$'),
+])
+
+
+def split_command(command):
+    """ Returns a value when the command is a compilation, None otherwise.
+
+    The value on success is a named tuple with the following attributes:
+
+        files:    list of source files
+        flags:    list of compile options
+        compiler: string value of 'c' or 'c++' """
+
+    # the result of this method
+    result = collections.namedtuple('Compilation',
+                                    ['compiler', 'flags', 'files'])
+    result.compiler = compiler_language(command)
+    result.flags = []
+    result.files = []
+    # quit right now, if the program was not a C/C++ compiler
+    if not result.compiler:
+        return None
+    # iterate on the compile options
+    args = iter(command[1:])
+    for arg in args:
+        # quit when compilation pass is not involved
+        if arg in {'-E', '-S', '-cc1', '-M', '-MM', '-###'}:
+            return None
+        # ignore some flags
+        elif arg in IGNORED_FLAGS:
+            count = IGNORED_FLAGS[arg]
+            for _ in range(count):
+                next(args)
+        elif re.match(r'^-(l|L|Wl,).+', arg):
+            pass
+        # some parameters could look like filename, take as compile option
+        elif arg in {'-D', '-I'}:
+            result.flags.extend([arg, next(args)])
+        # parameter which looks source file is taken...
+        elif re.match(r'^[^-].+', arg) and classify_source(arg):
+            result.files.append(arg)
+        # and consider everything else as compile option.
+        else:
+            result.flags.append(arg)
+    # do extra check on number of source files
+    return result if result.files else None
+
+
+def classify_source(filename, c_compiler=True):
+    """ Return the language from file name extension. """
+
+    mapping = {
+        '.c': 'c' if c_compiler else 'c++',
+        '.i': 'c-cpp-output' if c_compiler else 'c++-cpp-output',
+        '.ii': 'c++-cpp-output',
+        '.m': 'objective-c',
+        '.mi': 'objective-c-cpp-output',
+        '.mm': 'objective-c++',
+        '.mii': 'objective-c++-cpp-output',
+        '.C': 'c++',
+        '.cc': 'c++',
+        '.CC': 'c++',
+        '.cp': 'c++',
+        '.cpp': 'c++',
+        '.cxx': 'c++',
+        '.c++': 'c++',
+        '.C++': 'c++',
+        '.txx': 'c++'
+    }
+
+    __, extension = os.path.splitext(os.path.basename(filename))
+    return mapping.get(extension)
+
+
+def compiler_language(command):
+    """ A predicate to decide the command is a compiler call or not.
+
+    Returns 'c' or 'c++' when it match. None otherwise. """
+
+    cplusplus = re.compile(r'^(.+)(\+\+)(-.+|)$')
+
+    if command:
+        executable = os.path.basename(command[0])
+        if any(pattern.match(executable) for pattern in COMPILER_PATTERNS):
+            return 'c++' if cplusplus.match(executable) else 'c'
+    return None
diff --git a/src/llvm-project/clang/tools/scan-build-py/libscanbuild/intercept.py b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/intercept.py
new file mode 100644
index 0000000..b9bf9e9
--- /dev/null
+++ b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/intercept.py
@@ -0,0 +1,263 @@
+# -*- coding: utf-8 -*-
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+""" This module is responsible to capture the compiler invocation of any
+build process. The result of that should be a compilation database.
+
+This implementation is using the LD_PRELOAD or DYLD_INSERT_LIBRARIES
+mechanisms provided by the dynamic linker. The related library is implemented
+in C language and can be found under 'libear' directory.
+
+The 'libear' library is capturing all child process creation and logging the
+relevant information about it into separate files in a specified directory.
+The parameter of this process is the output directory name, where the report
+files shall be placed. This parameter is passed as an environment variable.
+
+The module also implements compiler wrappers to intercept the compiler calls.
+
+The module implements the build command execution and the post-processing of
+the output files, which will condensates into a compilation database. """
+
+import sys
+import os
+import os.path
+import re
+import itertools
+import json
+import glob
+import logging
+from libear import build_libear, TemporaryDirectory
+from libscanbuild import command_entry_point, compiler_wrapper, \
+    wrapper_environment, run_command, run_build
+from libscanbuild import duplicate_check
+from libscanbuild.compilation import split_command
+from libscanbuild.arguments import parse_args_for_intercept_build
+from libscanbuild.shell import encode, decode
+
+__all__ = ['capture', 'intercept_build', 'intercept_compiler_wrapper']
+
+GS = chr(0x1d)
+RS = chr(0x1e)
+US = chr(0x1f)
+
+COMPILER_WRAPPER_CC = 'intercept-cc'
+COMPILER_WRAPPER_CXX = 'intercept-c++'
+TRACE_FILE_EXTENSION = '.cmd'  # same as in ear.c
+WRAPPER_ONLY_PLATFORMS = frozenset({'win32', 'cygwin'})
+
+
+@command_entry_point
+def intercept_build():
+    """ Entry point for 'intercept-build' command. """
+
+    args = parse_args_for_intercept_build()
+    return capture(args)
+
+
+def capture(args):
+    """ The entry point of build command interception. """
+
+    def post_processing(commands):
+        """ To make a compilation database, it needs to filter out commands
+        which are not compiler calls. Needs to find the source file name
+        from the arguments. And do shell escaping on the command.
+
+        To support incremental builds, it is desired to read elements from
+        an existing compilation database from a previous run. These elements
+        shall be merged with the new elements. """
+
+        # create entries from the current run
+        current = itertools.chain.from_iterable(
+            # creates a sequence of entry generators from an exec,
+            format_entry(command) for command in commands)
+        # read entries from previous run
+        if 'append' in args and args.append and os.path.isfile(args.cdb):
+            with open(args.cdb) as handle:
+                previous = iter(json.load(handle))
+        else:
+            previous = iter([])
+        # filter out duplicate entries from both
+        duplicate = duplicate_check(entry_hash)
+        return (entry
+                for entry in itertools.chain(previous, current)
+                if os.path.exists(entry['file']) and not duplicate(entry))
+
+    with TemporaryDirectory(prefix='intercept-') as tmp_dir:
+        # run the build command
+        environment = setup_environment(args, tmp_dir)
+        exit_code = run_build(args.build, env=environment)
+        # read the intercepted exec calls
+        exec_traces = itertools.chain.from_iterable(
+            parse_exec_trace(os.path.join(tmp_dir, filename))
+            for filename in sorted(glob.iglob(os.path.join(tmp_dir, '*.cmd'))))
+        # do post processing
+        entries = post_processing(exec_traces)
+        # dump the compilation database
+        with open(args.cdb, 'w+') as handle:
+            json.dump(list(entries), handle, sort_keys=True, indent=4)
+        return exit_code
+
+
+def setup_environment(args, destination):
+    """ Sets up the environment for the build command.
+
+    It sets the required environment variables and execute the given command.
+    The exec calls will be logged by the 'libear' preloaded library or by the
+    'wrapper' programs. """
+
+    c_compiler = args.cc if 'cc' in args else 'cc'
+    cxx_compiler = args.cxx if 'cxx' in args else 'c++'
+
+    libear_path = None if args.override_compiler or is_preload_disabled(
+        sys.platform) else build_libear(c_compiler, destination)
+
+    environment = dict(os.environ)
+    environment.update({'INTERCEPT_BUILD_TARGET_DIR': destination})
+
+    if not libear_path:
+        logging.debug('intercept gonna use compiler wrappers')
+        environment.update(wrapper_environment(args))
+        environment.update({
+            'CC': COMPILER_WRAPPER_CC,
+            'CXX': COMPILER_WRAPPER_CXX
+        })
+    elif sys.platform == 'darwin':
+        logging.debug('intercept gonna preload libear on OSX')
+        environment.update({
+            'DYLD_INSERT_LIBRARIES': libear_path,
+            'DYLD_FORCE_FLAT_NAMESPACE': '1'
+        })
+    else:
+        logging.debug('intercept gonna preload libear on UNIX')
+        environment.update({'LD_PRELOAD': libear_path})
+
+    return environment
+
+
+@command_entry_point
+def intercept_compiler_wrapper():
+    """ Entry point for `intercept-cc` and `intercept-c++`. """
+
+    return compiler_wrapper(intercept_compiler_wrapper_impl)
+
+
+def intercept_compiler_wrapper_impl(_, execution):
+    """ Implement intercept compiler wrapper functionality.
+
+    It does generate execution report into target directory.
+    The target directory name is from environment variables. """
+
+    message_prefix = 'execution report might be incomplete: %s'
+
+    target_dir = os.getenv('INTERCEPT_BUILD_TARGET_DIR')
+    if not target_dir:
+        logging.warning(message_prefix, 'missing target directory')
+        return
+    # write current execution info to the pid file
+    try:
+        target_file_name = str(os.getpid()) + TRACE_FILE_EXTENSION
+        target_file = os.path.join(target_dir, target_file_name)
+        logging.debug('writing execution report to: %s', target_file)
+        write_exec_trace(target_file, execution)
+    except IOError:
+        logging.warning(message_prefix, 'io problem')
+
+
+def write_exec_trace(filename, entry):
+    """ Write execution report file.
+
+    This method shall be sync with the execution report writer in interception
+    library. The entry in the file is a JSON objects.
+
+    :param filename:    path to the output execution trace file,
+    :param entry:       the Execution object to append to that file. """
+
+    with open(filename, 'ab') as handler:
+        pid = str(entry.pid)
+        command = US.join(entry.cmd) + US
+        content = RS.join([pid, pid, 'wrapper', entry.cwd, command]) + GS
+        handler.write(content.encode('utf-8'))
+
+
+def parse_exec_trace(filename):
+    """ Parse the file generated by the 'libear' preloaded library.
+
+    Given filename points to a file which contains the basic report
+    generated by the interception library or wrapper command. A single
+    report file _might_ contain multiple process creation info. """
+
+    logging.debug('parse exec trace file: %s', filename)
+    with open(filename, 'r') as handler:
+        content = handler.read()
+        for group in filter(bool, content.split(GS)):
+            records = group.split(RS)
+            yield {
+                'pid': records[0],
+                'ppid': records[1],
+                'function': records[2],
+                'directory': records[3],
+                'command': records[4].split(US)[:-1]
+            }
+
+
+def format_entry(exec_trace):
+    """ Generate the desired fields for compilation database entries. """
+
+    def abspath(cwd, name):
+        """ Create normalized absolute path from input filename. """
+        fullname = name if os.path.isabs(name) else os.path.join(cwd, name)
+        return os.path.normpath(fullname)
+
+    logging.debug('format this command: %s', exec_trace['command'])
+    compilation = split_command(exec_trace['command'])
+    if compilation:
+        for source in compilation.files:
+            compiler = 'c++' if compilation.compiler == 'c++' else 'cc'
+            command = [compiler, '-c'] + compilation.flags + [source]
+            logging.debug('formated as: %s', command)
+            yield {
+                'directory': exec_trace['directory'],
+                'command': encode(command),
+                'file': abspath(exec_trace['directory'], source)
+            }
+
+
+def is_preload_disabled(platform):
+    """ Library-based interposition will fail silently if SIP is enabled,
+    so this should be detected. You can detect whether SIP is enabled on
+    Darwin by checking whether (1) there is a binary called 'csrutil' in
+    the path and, if so, (2) whether the output of executing 'csrutil status'
+    contains 'System Integrity Protection status: enabled'.
+
+    :param platform: name of the platform (returned by sys.platform),
+    :return: True if library preload will fail by the dynamic linker. """
+
+    if platform in WRAPPER_ONLY_PLATFORMS:
+        return True
+    elif platform == 'darwin':
+        command = ['csrutil', 'status']
+        pattern = re.compile(r'System Integrity Protection status:\s+enabled')
+        try:
+            return any(pattern.match(line) for line in run_command(command))
+        except:
+            return False
+    else:
+        return False
+
+
+def entry_hash(entry):
+    """ Implement unique hash method for compilation database entries. """
+
+    # For faster lookup in set filename is reverted
+    filename = entry['file'][::-1]
+    # For faster lookup in set directory is reverted
+    directory = entry['directory'][::-1]
+    # On OS X the 'cc' and 'c++' compilers are wrappers for
+    # 'clang' therefore both call would be logged. To avoid
+    # this the hash does not contain the first word of the
+    # command.
+    command = ' '.join(decode(entry['command'])[1:])
+
+    return '<>'.join([filename, directory, command])
diff --git a/src/llvm-project/clang/tools/scan-build-py/libscanbuild/report.py b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/report.py
new file mode 100644
index 0000000..b3753c1
--- /dev/null
+++ b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/report.py
@@ -0,0 +1,506 @@
+# -*- coding: utf-8 -*-
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+""" This module is responsible to generate 'index.html' for the report.
+
+The input for this step is the output directory, where individual reports
+could be found. It parses those reports and generates 'index.html'. """
+
+import re
+import os
+import os.path
+import sys
+import shutil
+import plistlib
+import glob
+import json
+import logging
+import datetime
+from libscanbuild import duplicate_check
+from libscanbuild.clang import get_version
+
+__all__ = ['document']
+
+
+def document(args):
+    """ Generates cover report and returns the number of bugs/crashes. """
+
+    html_reports_available = args.output_format in {'html', 'plist-html'}
+
+    logging.debug('count crashes and bugs')
+    crash_count = sum(1 for _ in read_crashes(args.output))
+    bug_counter = create_counters()
+    for bug in read_bugs(args.output, html_reports_available):
+        bug_counter(bug)
+    result = crash_count + bug_counter.total
+
+    if html_reports_available and result:
+        use_cdb = os.path.exists(args.cdb)
+
+        logging.debug('generate index.html file')
+        # common prefix for source files to have sorter path
+        prefix = commonprefix_from(args.cdb) if use_cdb else os.getcwd()
+        # assemble the cover from multiple fragments
+        fragments = []
+        try:
+            if bug_counter.total:
+                fragments.append(bug_summary(args.output, bug_counter))
+                fragments.append(bug_report(args.output, prefix))
+            if crash_count:
+                fragments.append(crash_report(args.output, prefix))
+            assemble_cover(args, prefix, fragments)
+            # copy additional files to the report
+            copy_resource_files(args.output)
+            if use_cdb:
+                shutil.copy(args.cdb, args.output)
+        finally:
+            for fragment in fragments:
+                os.remove(fragment)
+    return result
+
+
+def assemble_cover(args, prefix, fragments):
+    """ Put together the fragments into a final report. """
+
+    import getpass
+    import socket
+
+    if args.html_title is None:
+        args.html_title = os.path.basename(prefix) + ' - analyzer results'
+
+    with open(os.path.join(args.output, 'index.html'), 'w') as handle:
+        indent = 0
+        handle.write(reindent("""
+        |<!DOCTYPE html>
+        |<html>
+        |  <head>
+        |    <title>{html_title}</title>
+        |    <link type="text/css" rel="stylesheet" href="scanview.css"/>
+        |    <script type='text/javascript' src="sorttable.js"></script>
+        |    <script type='text/javascript' src='selectable.js'></script>
+        |  </head>""", indent).format(html_title=args.html_title))
+        handle.write(comment('SUMMARYENDHEAD'))
+        handle.write(reindent("""
+        |  <body>
+        |    <h1>{html_title}</h1>
+        |    <table>
+        |      <tr><th>User:</th><td>{user_name}@{host_name}</td></tr>
+        |      <tr><th>Working Directory:</th><td>{current_dir}</td></tr>
+        |      <tr><th>Command Line:</th><td>{cmd_args}</td></tr>
+        |      <tr><th>Clang Version:</th><td>{clang_version}</td></tr>
+        |      <tr><th>Date:</th><td>{date}</td></tr>
+        |    </table>""", indent).format(html_title=args.html_title,
+                                         user_name=getpass.getuser(),
+                                         host_name=socket.gethostname(),
+                                         current_dir=prefix,
+                                         cmd_args=' '.join(sys.argv),
+                                         clang_version=get_version(args.clang),
+                                         date=datetime.datetime.today(
+                                         ).strftime('%c')))
+        for fragment in fragments:
+            # copy the content of fragments
+            with open(fragment, 'r') as input_handle:
+                shutil.copyfileobj(input_handle, handle)
+        handle.write(reindent("""
+        |  </body>
+        |</html>""", indent))
+
+
+def bug_summary(output_dir, bug_counter):
+    """ Bug summary is a HTML table to give a better overview of the bugs. """
+
+    name = os.path.join(output_dir, 'summary.html.fragment')
+    with open(name, 'w') as handle:
+        indent = 4
+        handle.write(reindent("""
+        |<h2>Bug Summary</h2>
+        |<table>
+        |  <thead>
+        |    <tr>
+        |      <td>Bug Type</td>
+        |      <td>Quantity</td>
+        |      <td class="sorttable_nosort">Display?</td>
+        |    </tr>
+        |  </thead>
+        |  <tbody>""", indent))
+        handle.write(reindent("""
+        |    <tr style="font-weight:bold">
+        |      <td class="SUMM_DESC">All Bugs</td>
+        |      <td class="Q">{0}</td>
+        |      <td>
+        |        <center>
+        |          <input checked type="checkbox" id="AllBugsCheck"
+        |                 onClick="CopyCheckedStateToCheckButtons(this);"/>
+        |        </center>
+        |      </td>
+        |    </tr>""", indent).format(bug_counter.total))
+        for category, types in bug_counter.categories.items():
+            handle.write(reindent("""
+        |    <tr>
+        |      <th>{0}</th><th colspan=2></th>
+        |    </tr>""", indent).format(category))
+            for bug_type in types.values():
+                handle.write(reindent("""
+        |    <tr>
+        |      <td class="SUMM_DESC">{bug_type}</td>
+        |      <td class="Q">{bug_count}</td>
+        |      <td>
+        |        <center>
+        |          <input checked type="checkbox"
+        |                 onClick="ToggleDisplay(this,'{bug_type_class}');"/>
+        |        </center>
+        |      </td>
+        |    </tr>""", indent).format(**bug_type))
+        handle.write(reindent("""
+        |  </tbody>
+        |</table>""", indent))
+        handle.write(comment('SUMMARYBUGEND'))
+    return name
+
+
+def bug_report(output_dir, prefix):
+    """ Creates a fragment from the analyzer reports. """
+
+    pretty = prettify_bug(prefix, output_dir)
+    bugs = (pretty(bug) for bug in read_bugs(output_dir, True))
+
+    name = os.path.join(output_dir, 'bugs.html.fragment')
+    with open(name, 'w') as handle:
+        indent = 4
+        handle.write(reindent("""
+        |<h2>Reports</h2>
+        |<table class="sortable" style="table-layout:automatic">
+        |  <thead>
+        |    <tr>
+        |      <td>Bug Group</td>
+        |      <td class="sorttable_sorted">
+        |        Bug Type
+        |        <span id="sorttable_sortfwdind">&nbsp;&#x25BE;</span>
+        |      </td>
+        |      <td>File</td>
+        |      <td>Function/Method</td>
+        |      <td class="Q">Line</td>
+        |      <td class="Q">Path Length</td>
+        |      <td class="sorttable_nosort"></td>
+        |    </tr>
+        |  </thead>
+        |  <tbody>""", indent))
+        handle.write(comment('REPORTBUGCOL'))
+        for current in bugs:
+            handle.write(reindent("""
+        |    <tr class="{bug_type_class}">
+        |      <td class="DESC">{bug_category}</td>
+        |      <td class="DESC">{bug_type}</td>
+        |      <td>{bug_file}</td>
+        |      <td class="DESC">{bug_function}</td>
+        |      <td class="Q">{bug_line}</td>
+        |      <td class="Q">{bug_path_length}</td>
+        |      <td><a href="{report_file}#EndPath">View Report</a></td>
+        |    </tr>""", indent).format(**current))
+            handle.write(comment('REPORTBUG', {'id': current['report_file']}))
+        handle.write(reindent("""
+        |  </tbody>
+        |</table>""", indent))
+        handle.write(comment('REPORTBUGEND'))
+    return name
+
+
+def crash_report(output_dir, prefix):
+    """ Creates a fragment from the compiler crashes. """
+
+    pretty = prettify_crash(prefix, output_dir)
+    crashes = (pretty(crash) for crash in read_crashes(output_dir))
+
+    name = os.path.join(output_dir, 'crashes.html.fragment')
+    with open(name, 'w') as handle:
+        indent = 4
+        handle.write(reindent("""
+        |<h2>Analyzer Failures</h2>
+        |<p>The analyzer had problems processing the following files:</p>
+        |<table>
+        |  <thead>
+        |    <tr>
+        |      <td>Problem</td>
+        |      <td>Source File</td>
+        |      <td>Preprocessed File</td>
+        |      <td>STDERR Output</td>
+        |    </tr>
+        |  </thead>
+        |  <tbody>""", indent))
+        for current in crashes:
+            handle.write(reindent("""
+        |    <tr>
+        |      <td>{problem}</td>
+        |      <td>{source}</td>
+        |      <td><a href="{file}">preprocessor output</a></td>
+        |      <td><a href="{stderr}">analyzer std err</a></td>
+        |    </tr>""", indent).format(**current))
+            handle.write(comment('REPORTPROBLEM', current))
+        handle.write(reindent("""
+        |  </tbody>
+        |</table>""", indent))
+        handle.write(comment('REPORTCRASHES'))
+    return name
+
+
+def read_crashes(output_dir):
+    """ Generate a unique sequence of crashes from given output directory. """
+
+    return (parse_crash(filename)
+            for filename in glob.iglob(os.path.join(output_dir, 'failures',
+                                                    '*.info.txt')))
+
+
+def read_bugs(output_dir, html):
+    # type: (str, bool) -> Generator[Dict[str, Any], None, None]
+    """ Generate a unique sequence of bugs from given output directory.
+
+    Duplicates can be in a project if the same module was compiled multiple
+    times with different compiler options. These would be better to show in
+    the final report (cover) only once. """
+
+    def empty(file_name):
+        return os.stat(file_name).st_size == 0
+
+    duplicate = duplicate_check(
+        lambda bug: '{bug_line}.{bug_path_length}:{bug_file}'.format(**bug))
+
+    # get the right parser for the job.
+    parser = parse_bug_html if html else parse_bug_plist
+    # get the input files, which are not empty.
+    pattern = os.path.join(output_dir, '*.html' if html else '*.plist')
+    bug_files = (file for file in glob.iglob(pattern) if not empty(file))
+
+    for bug_file in bug_files:
+        for bug in parser(bug_file):
+            if not duplicate(bug):
+                yield bug
+
+
+def parse_bug_plist(filename):
+    """ Returns the generator of bugs from a single .plist file. """
+
+    content = plistlib.readPlist(filename)
+    files = content.get('files')
+    for bug in content.get('diagnostics', []):
+        if len(files) <= int(bug['location']['file']):
+            logging.warning('Parsing bug from "%s" failed', filename)
+            continue
+
+        yield {
+            'result': filename,
+            'bug_type': bug['type'],
+            'bug_category': bug['category'],
+            'bug_line': int(bug['location']['line']),
+            'bug_path_length': int(bug['location']['col']),
+            'bug_file': files[int(bug['location']['file'])]
+        }
+
+
+def parse_bug_html(filename):
+    """ Parse out the bug information from HTML output. """
+
+    patterns = [re.compile(r'<!-- BUGTYPE (?P<bug_type>.*) -->$'),
+                re.compile(r'<!-- BUGFILE (?P<bug_file>.*) -->$'),
+                re.compile(r'<!-- BUGPATHLENGTH (?P<bug_path_length>.*) -->$'),
+                re.compile(r'<!-- BUGLINE (?P<bug_line>.*) -->$'),
+                re.compile(r'<!-- BUGCATEGORY (?P<bug_category>.*) -->$'),
+                re.compile(r'<!-- BUGDESC (?P<bug_description>.*) -->$'),
+                re.compile(r'<!-- FUNCTIONNAME (?P<bug_function>.*) -->$')]
+    endsign = re.compile(r'<!-- BUGMETAEND -->')
+
+    bug = {
+        'report_file': filename,
+        'bug_function': 'n/a',  # compatibility with < clang-3.5
+        'bug_category': 'Other',
+        'bug_line': 0,
+        'bug_path_length': 1
+    }
+
+    with open(filename) as handler:
+        for line in handler.readlines():
+            # do not read the file further
+            if endsign.match(line):
+                break
+            # search for the right lines
+            for regex in patterns:
+                match = regex.match(line.strip())
+                if match:
+                    bug.update(match.groupdict())
+                    break
+
+    encode_value(bug, 'bug_line', int)
+    encode_value(bug, 'bug_path_length', int)
+
+    yield bug
+
+
+def parse_crash(filename):
+    """ Parse out the crash information from the report file. """
+
+    match = re.match(r'(.*)\.info\.txt', filename)
+    name = match.group(1) if match else None
+    with open(filename, mode='rb') as handler:
+        # this is a workaround to fix windows read '\r\n' as new lines.
+        lines = [line.decode().rstrip() for line in handler.readlines()]
+        return {
+            'source': lines[0],
+            'problem': lines[1],
+            'file': name,
+            'info': name + '.info.txt',
+            'stderr': name + '.stderr.txt'
+        }
+
+
+def category_type_name(bug):
+    """ Create a new bug attribute from bug by category and type.
+
+    The result will be used as CSS class selector in the final report. """
+
+    def smash(key):
+        """ Make value ready to be HTML attribute value. """
+
+        return bug.get(key, '').lower().replace(' ', '_').replace("'", '')
+
+    return escape('bt_' + smash('bug_category') + '_' + smash('bug_type'))
+
+
+def create_counters():
+    """ Create counters for bug statistics.
+
+    Two entries are maintained: 'total' is an integer, represents the
+    number of bugs. The 'categories' is a two level categorisation of bug
+    counters. The first level is 'bug category' the second is 'bug type'.
+    Each entry in this classification is a dictionary of 'count', 'type'
+    and 'label'. """
+
+    def predicate(bug):
+        bug_category = bug['bug_category']
+        bug_type = bug['bug_type']
+        current_category = predicate.categories.get(bug_category, dict())
+        current_type = current_category.get(bug_type, {
+            'bug_type': bug_type,
+            'bug_type_class': category_type_name(bug),
+            'bug_count': 0
+        })
+        current_type.update({'bug_count': current_type['bug_count'] + 1})
+        current_category.update({bug_type: current_type})
+        predicate.categories.update({bug_category: current_category})
+        predicate.total += 1
+
+    predicate.total = 0
+    predicate.categories = dict()
+    return predicate
+
+
+def prettify_bug(prefix, output_dir):
+    def predicate(bug):
+        """ Make safe this values to embed into HTML. """
+
+        bug['bug_type_class'] = category_type_name(bug)
+
+        encode_value(bug, 'bug_file', lambda x: escape(chop(prefix, x)))
+        encode_value(bug, 'bug_category', escape)
+        encode_value(bug, 'bug_type', escape)
+        encode_value(bug, 'report_file', lambda x: escape(chop(output_dir, x)))
+        return bug
+
+    return predicate
+
+
+def prettify_crash(prefix, output_dir):
+    def predicate(crash):
+        """ Make safe this values to embed into HTML. """
+
+        encode_value(crash, 'source', lambda x: escape(chop(prefix, x)))
+        encode_value(crash, 'problem', escape)
+        encode_value(crash, 'file', lambda x: escape(chop(output_dir, x)))
+        encode_value(crash, 'info', lambda x: escape(chop(output_dir, x)))
+        encode_value(crash, 'stderr', lambda x: escape(chop(output_dir, x)))
+        return crash
+
+    return predicate
+
+
+def copy_resource_files(output_dir):
+    """ Copy the javascript and css files to the report directory. """
+
+    this_dir = os.path.dirname(os.path.realpath(__file__))
+    for resource in os.listdir(os.path.join(this_dir, 'resources')):
+        shutil.copy(os.path.join(this_dir, 'resources', resource), output_dir)
+
+
+def encode_value(container, key, encode):
+    """ Run 'encode' on 'container[key]' value and update it. """
+
+    if key in container:
+        value = encode(container[key])
+        container.update({key: value})
+
+
+def chop(prefix, filename):
+    """ Create 'filename' from '/prefix/filename' """
+
+    return filename if not len(prefix) else os.path.relpath(filename, prefix)
+
+
+def escape(text):
+    """ Paranoid HTML escape method. (Python version independent) """
+
+    escape_table = {
+        '&': '&amp;',
+        '"': '&quot;',
+        "'": '&apos;',
+        '>': '&gt;',
+        '<': '&lt;'
+    }
+    return ''.join(escape_table.get(c, c) for c in text)
+
+
+def reindent(text, indent):
+    """ Utility function to format html output and keep indentation. """
+
+    result = ''
+    for line in text.splitlines():
+        if len(line.strip()):
+            result += ' ' * indent + line.split('|')[1] + os.linesep
+    return result
+
+
+def comment(name, opts=dict()):
+    """ Utility function to format meta information as comment. """
+
+    attributes = ''
+    for key, value in opts.items():
+        attributes += ' {0}="{1}"'.format(key, value)
+
+    return '<!-- {0}{1} -->{2}'.format(name, attributes, os.linesep)
+
+
+def commonprefix_from(filename):
+    """ Create file prefix from a compilation database entries. """
+
+    with open(filename, 'r') as handle:
+        return commonprefix(item['file'] for item in json.load(handle))
+
+
+def commonprefix(files):
+    """ Fixed version of os.path.commonprefix.
+
+    :param files: list of file names.
+    :return: the longest path prefix that is a prefix of all files. """
+    result = None
+    for current in files:
+        if result is not None:
+            result = os.path.commonprefix([result, current])
+        else:
+            result = current
+
+    if result is None:
+        return ''
+    elif not os.path.isdir(result):
+        return os.path.dirname(result)
+    else:
+        return os.path.abspath(result)
diff --git a/src/llvm-project/clang/tools/scan-build-py/libscanbuild/resources/scanview.css b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/resources/scanview.css
new file mode 100644
index 0000000..cf8a5a6
--- /dev/null
+++ b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/resources/scanview.css
@@ -0,0 +1,62 @@
+body { color:#000000; background-color:#ffffff }
+body { font-family: Helvetica, sans-serif; font-size:9pt }
+h1 { font-size: 14pt; }
+h2 { font-size: 12pt; }
+table { font-size:9pt }
+table { border-spacing: 0px; border: 1px solid black }
+th, table thead {
+  background-color:#eee; color:#666666;
+  font-weight: bold; cursor: default;
+  text-align:center;
+  font-weight: bold; font-family: Verdana;
+  white-space:nowrap;
+}
+.W { font-size:0px }
+th, td { padding:5px; padding-left:8px; text-align:left }
+td.SUMM_DESC { padding-left:12px }
+td.DESC { white-space:pre }
+td.Q { text-align:right }
+td { text-align:left }
+tbody.scrollContent { overflow:auto }
+
+table.form_group {
+    background-color: #ccc;
+    border: 1px solid #333;
+    padding: 2px;
+}
+
+table.form_inner_group {
+    background-color: #ccc;
+    border: 1px solid #333;
+    padding: 0px;
+}
+
+table.form {
+    background-color: #999;
+    border: 1px solid #333;
+    padding: 2px;
+}
+
+td.form_label {
+    text-align: right;
+    vertical-align: top;
+}
+/* For one line entires */
+td.form_clabel {
+    text-align: right;
+    vertical-align: center;
+}
+td.form_value {
+    text-align: left;
+    vertical-align: top;
+}
+td.form_submit {
+    text-align: right;
+    vertical-align: top;
+}
+
+h1.SubmitFail {
+    color: #f00;
+}
+h1.SubmitOk {
+}
diff --git a/src/llvm-project/clang/tools/scan-build-py/libscanbuild/resources/selectable.js b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/resources/selectable.js
new file mode 100644
index 0000000..53f6a8d
--- /dev/null
+++ b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/resources/selectable.js
@@ -0,0 +1,47 @@
+function SetDisplay(RowClass, DisplayVal)
+{
+  var Rows = document.getElementsByTagName("tr");
+  for ( var i = 0 ; i < Rows.length; ++i ) {
+    if (Rows[i].className == RowClass) {
+      Rows[i].style.display = DisplayVal;
+    }
+  }
+}
+
+function CopyCheckedStateToCheckButtons(SummaryCheckButton) {
+  var Inputs = document.getElementsByTagName("input");
+  for ( var i = 0 ; i < Inputs.length; ++i ) {
+    if (Inputs[i].type == "checkbox") {
+      if(Inputs[i] != SummaryCheckButton) {
+        Inputs[i].checked = SummaryCheckButton.checked;
+        Inputs[i].onclick();
+	  }
+    }
+  }
+}
+
+function returnObjById( id ) {
+    if (document.getElementById)
+        var returnVar = document.getElementById(id);
+    else if (document.all)
+        var returnVar = document.all[id];
+    else if (document.layers)
+        var returnVar = document.layers[id];
+    return returnVar;
+}
+
+var NumUnchecked = 0;
+
+function ToggleDisplay(CheckButton, ClassName) {
+  if (CheckButton.checked) {
+    SetDisplay(ClassName, "");
+    if (--NumUnchecked == 0) {
+      returnObjById("AllBugsCheck").checked = true;
+    }
+  }
+  else {
+    SetDisplay(ClassName, "none");
+    NumUnchecked++;
+    returnObjById("AllBugsCheck").checked = false;
+  }
+}
diff --git a/src/llvm-project/clang/tools/scan-build-py/libscanbuild/resources/sorttable.js b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/resources/sorttable.js
new file mode 100644
index 0000000..32faa07
--- /dev/null
+++ b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/resources/sorttable.js
@@ -0,0 +1,492 @@
+/*
+  SortTable
+  version 2
+  7th April 2007
+  Stuart Langridge, http://www.kryogenix.org/code/browser/sorttable/
+
+  Instructions:
+  Download this file
+  Add <script src="sorttable.js"></script> to your HTML
+  Add class="sortable" to any table you'd like to make sortable
+  Click on the headers to sort
+
+  Thanks to many, many people for contributions and suggestions.
+  Licenced as X11: http://www.kryogenix.org/code/browser/licence.html
+  This basically means: do what you want with it.
+*/
+
+
+var stIsIE = /*@cc_on!@*/false;
+
+sorttable = {
+  init: function() {
+    // quit if this function has already been called
+    if (arguments.callee.done) return;
+    // flag this function so we don't do the same thing twice
+    arguments.callee.done = true;
+    // kill the timer
+    if (_timer) clearInterval(_timer);
+
+    if (!document.createElement || !document.getElementsByTagName) return;
+
+    sorttable.DATE_RE = /^(\d\d?)[\/\.-](\d\d?)[\/\.-]((\d\d)?\d\d)$/;
+
+    forEach(document.getElementsByTagName('table'), function(table) {
+      if (table.className.search(/\bsortable\b/) != -1) {
+        sorttable.makeSortable(table);
+      }
+    });
+
+  },
+
+  makeSortable: function(table) {
+    if (table.getElementsByTagName('thead').length == 0) {
+      // table doesn't have a tHead. Since it should have, create one and
+      // put the first table row in it.
+      the = document.createElement('thead');
+      the.appendChild(table.rows[0]);
+      table.insertBefore(the,table.firstChild);
+    }
+    // Safari doesn't support table.tHead, sigh
+    if (table.tHead == null) table.tHead = table.getElementsByTagName('thead')[0];
+
+    if (table.tHead.rows.length != 1) return; // can't cope with two header rows
+
+    // Sorttable v1 put rows with a class of "sortbottom" at the bottom (as
+    // "total" rows, for example). This is B&R, since what you're supposed
+    // to do is put them in a tfoot. So, if there are sortbottom rows,
+    // for backward compatibility, move them to tfoot (creating it if needed).
+    sortbottomrows = [];
+    for (var i=0; i<table.rows.length; i++) {
+      if (table.rows[i].className.search(/\bsortbottom\b/) != -1) {
+        sortbottomrows[sortbottomrows.length] = table.rows[i];
+      }
+    }
+    if (sortbottomrows) {
+      if (table.tFoot == null) {
+        // table doesn't have a tfoot. Create one.
+        tfo = document.createElement('tfoot');
+        table.appendChild(tfo);
+      }
+      for (var i=0; i<sortbottomrows.length; i++) {
+        tfo.appendChild(sortbottomrows[i]);
+      }
+      delete sortbottomrows;
+    }
+
+    // work through each column and calculate its type
+    headrow = table.tHead.rows[0].cells;
+    for (var i=0; i<headrow.length; i++) {
+      // manually override the type with a sorttable_type attribute
+      if (!headrow[i].className.match(/\bsorttable_nosort\b/)) { // skip this col
+        mtch = headrow[i].className.match(/\bsorttable_([a-z0-9]+)\b/);
+        if (mtch) { override = mtch[1]; }
+	      if (mtch && typeof sorttable["sort_"+override] == 'function') {
+	        headrow[i].sorttable_sortfunction = sorttable["sort_"+override];
+	      } else {
+	        headrow[i].sorttable_sortfunction = sorttable.guessType(table,i);
+	      }
+	      // make it clickable to sort
+	      headrow[i].sorttable_columnindex = i;
+	      headrow[i].sorttable_tbody = table.tBodies[0];
+	      dean_addEvent(headrow[i],"click", function(e) {
+
+          if (this.className.search(/\bsorttable_sorted\b/) != -1) {
+            // if we're already sorted by this column, just
+            // reverse the table, which is quicker
+            sorttable.reverse(this.sorttable_tbody);
+            this.className = this.className.replace('sorttable_sorted',
+                                                    'sorttable_sorted_reverse');
+            this.removeChild(document.getElementById('sorttable_sortfwdind'));
+            sortrevind = document.createElement('span');
+            sortrevind.id = "sorttable_sortrevind";
+            sortrevind.innerHTML = stIsIE ? '&nbsp<font face="webdings">5</font>' : '&nbsp;&#x25B4;';
+            this.appendChild(sortrevind);
+            return;
+          }
+          if (this.className.search(/\bsorttable_sorted_reverse\b/) != -1) {
+            // if we're already sorted by this column in reverse, just
+            // re-reverse the table, which is quicker
+            sorttable.reverse(this.sorttable_tbody);
+            this.className = this.className.replace('sorttable_sorted_reverse',
+                                                    'sorttable_sorted');
+            this.removeChild(document.getElementById('sorttable_sortrevind'));
+            sortfwdind = document.createElement('span');
+            sortfwdind.id = "sorttable_sortfwdind";
+            sortfwdind.innerHTML = stIsIE ? '&nbsp<font face="webdings">6</font>' : '&nbsp;&#x25BE;';
+            this.appendChild(sortfwdind);
+            return;
+          }
+
+          // remove sorttable_sorted classes
+          theadrow = this.parentNode;
+          forEach(theadrow.childNodes, function(cell) {
+            if (cell.nodeType == 1) { // an element
+              cell.className = cell.className.replace('sorttable_sorted_reverse','');
+              cell.className = cell.className.replace('sorttable_sorted','');
+            }
+          });
+          sortfwdind = document.getElementById('sorttable_sortfwdind');
+          if (sortfwdind) { sortfwdind.parentNode.removeChild(sortfwdind); }
+          sortrevind = document.getElementById('sorttable_sortrevind');
+          if (sortrevind) { sortrevind.parentNode.removeChild(sortrevind); }
+
+          this.className += ' sorttable_sorted';
+          sortfwdind = document.createElement('span');
+          sortfwdind.id = "sorttable_sortfwdind";
+          sortfwdind.innerHTML = stIsIE ? '&nbsp<font face="webdings">6</font>' : '&nbsp;&#x25BE;';
+          this.appendChild(sortfwdind);
+
+	        // build an array to sort. This is a Schwartzian transform thing,
+	        // i.e., we "decorate" each row with the actual sort key,
+	        // sort based on the sort keys, and then put the rows back in order
+	        // which is a lot faster because you only do getInnerText once per row
+	        row_array = [];
+	        col = this.sorttable_columnindex;
+	        rows = this.sorttable_tbody.rows;
+	        for (var j=0; j<rows.length; j++) {
+	          row_array[row_array.length] = [sorttable.getInnerText(rows[j].cells[col]), rows[j]];
+	        }
+	        /* If you want a stable sort, uncomment the following line */
+	        sorttable.shaker_sort(row_array, this.sorttable_sortfunction);
+	        /* and comment out this one */
+	        //row_array.sort(this.sorttable_sortfunction);
+
+	        tb = this.sorttable_tbody;
+	        for (var j=0; j<row_array.length; j++) {
+	          tb.appendChild(row_array[j][1]);
+	        }
+
+	        delete row_array;
+	      });
+	    }
+    }
+  },
+
+  guessType: function(table, column) {
+    // guess the type of a column based on its first non-blank row
+    sortfn = sorttable.sort_alpha;
+    for (var i=0; i<table.tBodies[0].rows.length; i++) {
+      text = sorttable.getInnerText(table.tBodies[0].rows[i].cells[column]);
+      if (text != '') {
+        if (text.match(/^-?[£$¤]?[\d,.]+%?$/)) {
+          return sorttable.sort_numeric;
+        }
+        // check for a date: dd/mm/yyyy or dd/mm/yy
+        // can have / or . or - as separator
+        // can be mm/dd as well
+        possdate = text.match(sorttable.DATE_RE)
+        if (possdate) {
+          // looks like a date
+          first = parseInt(possdate[1]);
+          second = parseInt(possdate[2]);
+          if (first > 12) {
+            // definitely dd/mm
+            return sorttable.sort_ddmm;
+          } else if (second > 12) {
+            return sorttable.sort_mmdd;
+          } else {
+            // looks like a date, but we can't tell which, so assume
+            // that it's dd/mm (English imperialism!) and keep looking
+            sortfn = sorttable.sort_ddmm;
+          }
+        }
+      }
+    }
+    return sortfn;
+  },
+
+  getInnerText: function(node) {
+    // gets the text we want to use for sorting for a cell.
+    // strips leading and trailing whitespace.
+    // this is *not* a generic getInnerText function; it's special to sorttable.
+    // for example, you can override the cell text with a customkey attribute.
+    // it also gets .value for <input> fields.
+
+    hasInputs = (typeof node.getElementsByTagName == 'function') &&
+                 node.getElementsByTagName('input').length;
+
+    if (node.getAttribute("sorttable_customkey") != null) {
+      return node.getAttribute("sorttable_customkey");
+    }
+    else if (typeof node.textContent != 'undefined' && !hasInputs) {
+      return node.textContent.replace(/^\s+|\s+$/g, '');
+    }
+    else if (typeof node.innerText != 'undefined' && !hasInputs) {
+      return node.innerText.replace(/^\s+|\s+$/g, '');
+    }
+    else if (typeof node.text != 'undefined' && !hasInputs) {
+      return node.text.replace(/^\s+|\s+$/g, '');
+    }
+    else {
+      switch (node.nodeType) {
+        case 3:
+          if (node.nodeName.toLowerCase() == 'input') {
+            return node.value.replace(/^\s+|\s+$/g, '');
+          }
+        case 4:
+          return node.nodeValue.replace(/^\s+|\s+$/g, '');
+          break;
+        case 1:
+        case 11:
+          var innerText = '';
+          for (var i = 0; i < node.childNodes.length; i++) {
+            innerText += sorttable.getInnerText(node.childNodes[i]);
+          }
+          return innerText.replace(/^\s+|\s+$/g, '');
+          break;
+        default:
+          return '';
+      }
+    }
+  },
+
+  reverse: function(tbody) {
+    // reverse the rows in a tbody
+    newrows = [];
+    for (var i=0; i<tbody.rows.length; i++) {
+      newrows[newrows.length] = tbody.rows[i];
+    }
+    for (var i=newrows.length-1; i>=0; i--) {
+       tbody.appendChild(newrows[i]);
+    }
+    delete newrows;
+  },
+
+  /* sort functions
+     each sort function takes two parameters, a and b
+     you are comparing a[0] and b[0] */
+  sort_numeric: function(a,b) {
+    aa = parseFloat(a[0].replace(/[^0-9.-]/g,''));
+    if (isNaN(aa)) aa = 0;
+    bb = parseFloat(b[0].replace(/[^0-9.-]/g,''));
+    if (isNaN(bb)) bb = 0;
+    return aa-bb;
+  },
+  sort_alpha: function(a,b) {
+    if (a[0]==b[0]) return 0;
+    if (a[0]<b[0]) return -1;
+    return 1;
+  },
+  sort_ddmm: function(a,b) {
+    mtch = a[0].match(sorttable.DATE_RE);
+    y = mtch[3]; m = mtch[2]; d = mtch[1];
+    if (m.length == 1) m = '0'+m;
+    if (d.length == 1) d = '0'+d;
+    dt1 = y+m+d;
+    mtch = b[0].match(sorttable.DATE_RE);
+    y = mtch[3]; m = mtch[2]; d = mtch[1];
+    if (m.length == 1) m = '0'+m;
+    if (d.length == 1) d = '0'+d;
+    dt2 = y+m+d;
+    if (dt1==dt2) return 0;
+    if (dt1<dt2) return -1;
+    return 1;
+  },
+  sort_mmdd: function(a,b) {
+    mtch = a[0].match(sorttable.DATE_RE);
+    y = mtch[3]; d = mtch[2]; m = mtch[1];
+    if (m.length == 1) m = '0'+m;
+    if (d.length == 1) d = '0'+d;
+    dt1 = y+m+d;
+    mtch = b[0].match(sorttable.DATE_RE);
+    y = mtch[3]; d = mtch[2]; m = mtch[1];
+    if (m.length == 1) m = '0'+m;
+    if (d.length == 1) d = '0'+d;
+    dt2 = y+m+d;
+    if (dt1==dt2) return 0;
+    if (dt1<dt2) return -1;
+    return 1;
+  },
+
+  shaker_sort: function(list, comp_func) {
+    // A stable sort function to allow multi-level sorting of data
+    // see: http://en.wikipedia.org/wiki/Cocktail_sort
+    // thanks to Joseph Nahmias
+    var b = 0;
+    var t = list.length - 1;
+    var swap = true;
+
+    while(swap) {
+        swap = false;
+        for(var i = b; i < t; ++i) {
+            if ( comp_func(list[i], list[i+1]) > 0 ) {
+                var q = list[i]; list[i] = list[i+1]; list[i+1] = q;
+                swap = true;
+            }
+        } // for
+        t--;
+
+        if (!swap) break;
+
+        for(var i = t; i > b; --i) {
+            if ( comp_func(list[i], list[i-1]) < 0 ) {
+                var q = list[i]; list[i] = list[i-1]; list[i-1] = q;
+                swap = true;
+            }
+        } // for
+        b++;
+
+    } // while(swap)
+  }
+}
+
+/* ******************************************************************
+   Supporting functions: bundled here to avoid depending on a library
+   ****************************************************************** */
+
+// Dean Edwards/Matthias Miller/John Resig
+
+/* for Mozilla/Opera9 */
+if (document.addEventListener) {
+    document.addEventListener("DOMContentLoaded", sorttable.init, false);
+}
+
+/* for Internet Explorer */
+/*@cc_on @*/
+/*@if (@_win32)
+    document.write("<script id=__ie_onload defer src=javascript:void(0)><\/script>");
+    var script = document.getElementById("__ie_onload");
+    script.onreadystatechange = function() {
+        if (this.readyState == "complete") {
+            sorttable.init(); // call the onload handler
+        }
+    };
+/*@end @*/
+
+/* for Safari */
+if (/WebKit/i.test(navigator.userAgent)) { // sniff
+    var _timer = setInterval(function() {
+        if (/loaded|complete/.test(document.readyState)) {
+            sorttable.init(); // call the onload handler
+        }
+    }, 10);
+}
+
+/* for other browsers */
+window.onload = sorttable.init;
+
+// written by Dean Edwards, 2005
+// with input from Tino Zijdel, Matthias Miller, Diego Perini
+
+// http://dean.edwards.name/weblog/2005/10/add-event/
+
+function dean_addEvent(element, type, handler) {
+	if (element.addEventListener) {
+		element.addEventListener(type, handler, false);
+	} else {
+		// assign each event handler a unique ID
+		if (!handler.$$guid) handler.$$guid = dean_addEvent.guid++;
+		// create a hash table of event types for the element
+		if (!element.events) element.events = {};
+		// create a hash table of event handlers for each element/event pair
+		var handlers = element.events[type];
+		if (!handlers) {
+			handlers = element.events[type] = {};
+			// store the existing event handler (if there is one)
+			if (element["on" + type]) {
+				handlers[0] = element["on" + type];
+			}
+		}
+		// store the event handler in the hash table
+		handlers[handler.$$guid] = handler;
+		// assign a global event handler to do all the work
+		element["on" + type] = handleEvent;
+	}
+};
+// a counter used to create unique IDs
+dean_addEvent.guid = 1;
+
+function removeEvent(element, type, handler) {
+	if (element.removeEventListener) {
+		element.removeEventListener(type, handler, false);
+	} else {
+		// delete the event handler from the hash table
+		if (element.events && element.events[type]) {
+			delete element.events[type][handler.$$guid];
+		}
+	}
+};
+
+function handleEvent(event) {
+	var returnValue = true;
+	// grab the event object (IE uses a global event object)
+	event = event || fixEvent(((this.ownerDocument || this.document || this).parentWindow || window).event);
+	// get a reference to the hash table of event handlers
+	var handlers = this.events[event.type];
+	// execute each event handler
+	for (var i in handlers) {
+		this.$$handleEvent = handlers[i];
+		if (this.$$handleEvent(event) === false) {
+			returnValue = false;
+		}
+	}
+	return returnValue;
+};
+
+function fixEvent(event) {
+	// add W3C standard event methods
+	event.preventDefault = fixEvent.preventDefault;
+	event.stopPropagation = fixEvent.stopPropagation;
+	return event;
+};
+fixEvent.preventDefault = function() {
+	this.returnValue = false;
+};
+fixEvent.stopPropagation = function() {
+  this.cancelBubble = true;
+}
+
+// Dean's forEach: http://dean.edwards.name/base/forEach.js
+/*
+	forEach, version 1.0
+	Copyright 2006, Dean Edwards
+	License: http://www.opensource.org/licenses/mit-license.php
+*/
+
+// array-like enumeration
+if (!Array.forEach) { // mozilla already supports this
+	Array.forEach = function(array, block, context) {
+		for (var i = 0; i < array.length; i++) {
+			block.call(context, array[i], i, array);
+		}
+	};
+}
+
+// generic enumeration
+Function.prototype.forEach = function(object, block, context) {
+	for (var key in object) {
+		if (typeof this.prototype[key] == "undefined") {
+			block.call(context, object[key], key, object);
+		}
+	}
+};
+
+// character enumeration
+String.forEach = function(string, block, context) {
+	Array.forEach(string.split(""), function(chr, index) {
+		block.call(context, chr, index, string);
+	});
+};
+
+// globally resolve forEach enumeration
+var forEach = function(object, block, context) {
+	if (object) {
+		var resolve = Object; // default
+		if (object instanceof Function) {
+			// functions have a "length" property
+			resolve = Function;
+		} else if (object.forEach instanceof Function) {
+			// the object implements a custom forEach method so use that
+			object.forEach(block, context);
+			return;
+		} else if (typeof object == "string") {
+			// the object is a string
+			resolve = String;
+		} else if (typeof object.length == "number") {
+			// the object is array-like
+			resolve = Array;
+		}
+		resolve.forEach(object, block, context);
+	}
+};
diff --git a/src/llvm-project/clang/tools/scan-build-py/libscanbuild/shell.py b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/shell.py
new file mode 100644
index 0000000..a575946
--- /dev/null
+++ b/src/llvm-project/clang/tools/scan-build-py/libscanbuild/shell.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+""" This module implements basic shell escaping/unescaping methods. """
+
+import re
+import shlex
+
+__all__ = ['encode', 'decode']
+
+
+def encode(command):
+    """ Takes a command as list and returns a string. """
+
+    def needs_quote(word):
+        """ Returns true if arguments needs to be protected by quotes.
+
+        Previous implementation was shlex.split method, but that's not good
+        for this job. Currently is running through the string with a basic
+        state checking. """
+
+        reserved = {' ', '$', '%', '&', '(', ')', '[', ']', '{', '}', '*', '|',
+                    '<', '>', '@', '?', '!'}
+        state = 0
+        for current in word:
+            if state == 0 and current in reserved:
+                return True
+            elif state == 0 and current == '\\':
+                state = 1
+            elif state == 1 and current in reserved | {'\\'}:
+                state = 0
+            elif state == 0 and current == '"':
+                state = 2
+            elif state == 2 and current == '"':
+                state = 0
+            elif state == 0 and current == "'":
+                state = 3
+            elif state == 3 and current == "'":
+                state = 0
+        return state != 0
+
+    def escape(word):
+        """ Do protect argument if that's needed. """
+
+        table = {'\\': '\\\\', '"': '\\"'}
+        escaped = ''.join([table.get(c, c) for c in word])
+
+        return '"' + escaped + '"' if needs_quote(word) else escaped
+
+    return " ".join([escape(arg) for arg in command])
+
+
+def decode(string):
+    """ Takes a command string and returns as a list. """
+
+    def unescape(arg):
+        """ Gets rid of the escaping characters. """
+
+        if len(arg) >= 2 and arg[0] == arg[-1] and arg[0] == '"':
+            arg = arg[1:-1]
+            return re.sub(r'\\(["\\])', r'\1', arg)
+        return re.sub(r'\\([\\ $%&\(\)\[\]\{\}\*|<>@?!])', r'\1', arg)
+
+    return [unescape(arg) for arg in shlex.split(string)]