code_coverage: introduce strategies to improve coverage
After some investigation, I figured that we were very often not running
the whole corpus against a given fuzzer, but were often stopping at the
first crash. Those crashes seem to happen often on all different
corpuses (libfuzzer, fuzzilli and centipede). Because of this, we're
missing most coverage for the various existing fuzzers.
This code still tries to keep the running time as low as possible by
removing outliers from the fuzzing targets (those that are always or
almost always constantly failing).
Bug: 409238890
Change-Id: Id76573cd12ee24ae0c6fb0ea8c3d24fb2cd6478a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/6439355
Commit-Queue: Paul Semel <[email protected]>
Reviewed-by: Ali Hijazi <[email protected]>
Cr-Commit-Position: refs/heads/main@{#1445333}
NOKEYCHECK=True
GitOrigin-RevId: af8d064c5e1136059c9410153b05c52558a07f12
diff --git a/run_all_fuzzers.py b/run_all_fuzzers.py
index 2754e6c..4162fdd 100644
--- a/run_all_fuzzers.py
+++ b/run_all_fuzzers.py
@@ -13,20 +13,26 @@
import dataclasses
import glob
import json
+import logging
import math
import os
+import pathlib
import subprocess
import sys
+import shutil
import tempfile
from multiprocessing import Process, Manager, cpu_count, Pool
from typing import Mapping, Sequence, Optional
-WHOLE_CORPUS_RETRIES = 2
WHOLE_CORPUS_TIMEOUT_SECS = 1200
INDIVIDUAL_TESTCASE_TIMEOUT_SECS = 60
INDIVIDUAL_TESTCASES_MAX_TO_TRY = 500
INDIVIDUAL_TESTCASES_SUCCESSES_NEEDED = 100
+MAX_FILES_PER_CHUNK = 80
+CHUNK_EXECUTION_TIMEOUT = 400
+MIN_FILES_FOR_CHUNK_STRATEGY = 30
+MIN_CHUNK_NUMBER = 10
LIBFUZZER = 'libfuzzer'
CENTIPEDE = 'centipede'
@@ -146,19 +152,31 @@
class FuzzilliRunner(CmdRunner):
"""Runs a given target with Fuzzilli.
"""
- corpus_files: Sequence[str]
def run_full_corpus(self, env: Mapping[str, str], timeout: float,
annotation: str, corpus_dir: Optional[str]) -> bool:
- # We are not reading the whole directory, since this might generate too
- # long command lines, but we're rather using the corpus_files we were
- # passed as arguments.
- if not corpus_dir:
- corpus_dir = ""
- return self._run_command(
- self.cmd +
- [os.path.join(corpus_dir, file) for file in self.corpus_files], env,
- timeout, annotation)
+ # This is not supported for the `d8` runner, so we simply return False so
+ # that user can only use `run_testcases` inherited from CmdRunner.
+ return False
+
+ def run_testcases(self, env: Mapping[str, str], timeout: float,
+ annotation: str, testcases: Sequence[str]) -> bool:
+ # That's a tricky part here. Basically, running cases by cases takes a
+ # gigantic amount of time, and given that we split initial targets chunks
+ # into even smaller chunks, we just hope that we execute as much test cases
+ # as possible in those runs and consider it a success.
+ super().run_testcases(env, timeout, annotation, testcases)
+ return True
+
+
+class ChromeRunner(CmdRunner):
+ """Runs chrome. This needs special handling because the run will always fail,
+ but we still want to consider the run successful.
+ """
+
+ def run_full_corpus(self, env, timeout, annotation, corpus_dir):
+ super().run_full_corpus(env, timeout, annotation, corpus_dir)
+ return True
def _profdata_merge(inputs: Sequence[str], output: str) -> bool:
@@ -180,7 +198,7 @@
return True
except Exception as e:
# TODO(crbug.com/328849489: investigate failures
- print("profdata merge failed, treating this target as failed")
+ logging.warning("profdata merge failed, treating this target as failed")
finally:
for f in inputs:
if os.path.exists(f):
@@ -201,7 +219,7 @@
Returns:
True iff the command ran successfully.
"""
- print(f"Trying command: {cmd} ({annotation})")
+ logging.debug(f"Trying command: {cmd} ({annotation})")
try:
subprocess.run(cmd,
env=env,
@@ -211,11 +229,12 @@
return True
except Exception as e:
if type(e) == subprocess.TimeoutExpired:
- print(f"Command {cmd!s} ({annotation}) timed out " +
- f"after {e.timeout!s} seconds")
+ logging.warning(f"Command {cmd!s} ({annotation}) timed out " +
+ f"after {e.timeout!s} seconds")
else:
- print(f"Command {cmd!s} ({annotation}) return code: " +
- f"{e.returncode!s}\nStdout:\n{e.output}\nStderr:\n{e.stderr}")
+ logging.warning(
+ f"Command {cmd!s} ({annotation}) return code: " +
+ f"{e.returncode!s}\nStdout:\n{e.output}\nStderr:\n{e.stderr}")
return False
@@ -242,6 +261,235 @@
return [f for f in glob.iglob(pattern) if os.path.getsize(f) > 0]
+def _accumulated_profdata_merge(inputs: Sequence[str], profdata: str) -> bool:
+ """Accumulate profdata from inputs and potentially existing profdata file
+ into profdata file itself. `inputs` file will be deleted independently of the
+ function result. If this function fails and `profdata` file exists, its
+ contents will be preserved.
+
+ Args:
+ inputs: a sequence of input files.
+ profdata: the resulting profdata file (may or may not exist).
+
+ Returns:
+ whether the merge succeeded/
+ """
+ # If the profdata file doesn't exist yet, we can just run the normal merging
+ # function.
+ if not os.path.exists(profdata):
+ return _profdata_merge(inputs, profdata)
+
+ # This file will be used as a clone of the initial profdata file.
+ copy = tempfile.NamedTemporaryFile()
+ # This file will be used as a copy of the profdata file to be used as input
+ # of the _profdata_merge function. It will always be deleted by
+ # `_profdata_merge`, so we disable `delete` to avoid a warning from cpython.
+ file = tempfile.NamedTemporaryFile(delete=False)
+ shutil.copy2(profdata, copy.name)
+ shutil.copy2(profdata, file.name)
+ res = _profdata_merge(inputs + [file.name], profdata)
+ if not res:
+ # If the merge wasn't successful, let's ensure that the profdata file is
+ # reverted with its previous content. This helps keep track of the profile
+ # information gathered from the successful runs.
+ shutil.copy2(copy.name, profdata)
+ return res
+
+
+def _get_target_corpus_files(target_details) -> Sequence[str]:
+ """Lists the corpus files for the given target. This correctly handles the
+ different target setup such as the ones providing neither corpus files nor
+ corpus directory.
+
+ Args:
+ target_details: the target details.
+
+ Returns:
+ the list of corpus files associated with the target.
+ """
+ corpus_dir = target_details['corpus']
+ corpus_files = target_details['files']
+ if not corpus_dir and (not corpus_files or corpus_files == '*'):
+ return []
+
+ if corpus_files and corpus_files != '*':
+ return corpus_files
+
+ corpus_files = os.listdir(corpus_dir)
+ corpus_files = [os.path.join(corpus_dir, e) for e in corpus_files]
+ return corpus_files
+
+
+def _split_corpus_files_into_chunks(corpus_files: Sequence[str]):
+ assert len(corpus_files) >= MIN_FILES_FOR_CHUNK_STRATEGY
+ if len(corpus_files) < MAX_FILES_PER_CHUNK * (MIN_CHUNK_NUMBER - 1):
+ chunk_num = int(len(corpus_files) / MIN_CHUNK_NUMBER)
+ else:
+ chunk_num = MAX_FILES_PER_CHUNK
+ chunks = [
+ corpus_files[i:i + chunk_num]
+ for i in range(0, len(corpus_files), chunk_num)
+ ]
+ return chunks
+
+
+def _run_full_corpus(target_details) -> bool:
+ """Runs a full corpus strategy.
+
+ Args:
+ target_details: the target details.
+
+ Returns:
+ whether the strategy succeeded or not.
+ """
+ target = target_details['name']
+ cmd_runner = target_details['cmd_runner']
+ env = target_details['env']
+ corpus_dir = target_details['corpus']
+ target_profdata = target_details['profdata_file']
+
+ logging.info(f'[{target}][full corpus] starting.')
+
+ profraw_dir = tempfile.TemporaryDirectory()
+ fullcorpus_profraw = os.path.join(profraw_dir.name, target + "_%p.profraw")
+ env['LLVM_PROFILE_FILE'] = fullcorpus_profraw
+ if cmd_runner.run_full_corpus(env, WHOLE_CORPUS_TIMEOUT_SECS,
+ 'full corpus attempt', corpus_dir):
+ matching_profraws = list(_matching_profraws(fullcorpus_profraw))
+ logging.info(f'[{target}][full corpus] merging '
+ f'{matching_profraws} into {target_profdata}')
+ if _profdata_merge(matching_profraws, target_profdata):
+ logging.info(f'[{target}][full corpus] done, success.')
+ return True
+
+ logging.info(f'[{target}][full corpus] done, failure.')
+ return False
+
+
+def _run_corpus_in_chunks(target_details) -> bool:
+ """Runs the chunk strategy. This strategy consists of running the target's
+ corpora into multiple chunks in case some testcases are preventing the binary
+ from making any progress with the remaining files.
+
+ Args:
+ target_details: the target details.
+
+ Returns:
+ whether the strategy succeeded or not.
+ """
+ target = target_details['name']
+ cmd_runner = target_details['cmd_runner']
+ env = target_details['env']
+ target_profdata = target_details['profdata_file']
+
+ corpus_files = _get_target_corpus_files(target_details)
+ if not corpus_files:
+ logging.info(
+ f'[{target}][chunk strategy] cannot get corpus files, aborting')
+ return False
+
+ if len(corpus_files) < MIN_FILES_FOR_CHUNK_STRATEGY:
+ logging.info(f'[{target}][chunk strategy] number of corpus files too '
+ f'low ({len(corpus_files)})')
+ return False
+
+ chunks = _split_corpus_files_into_chunks(corpus_files)
+ profdata_dir = tempfile.TemporaryDirectory()
+ temp_target_profdata = os.path.join(profdata_dir.name, f'{target}.profdata')
+ failed_chunks = []
+ logging.info(f'[{target}][chunk strategy] starting,'
+ f' {len(chunks)} chunks to run')
+
+ # Let's run the fuzzer chunks by chunks. If it fails too much, we early bail
+ # out to avoid spending too much time on a target that most likely won't give
+ # good results.
+ for idx, chunk in enumerate(chunks):
+ logging.info(
+ f'[{target}][chunk strategy] running chunk {idx} / {len(chunks)}')
+ profraw_dir = tempfile.TemporaryDirectory()
+ fullcorpus_profraw = os.path.join(profraw_dir.name,
+ f'{target}_{idx}_%p.profraw')
+ env['LLVM_PROFILE_FILE'] = fullcorpus_profraw
+ chunk_profdata = os.path.join(profdata_dir.name, f'{target}_{idx}.profdata')
+ if cmd_runner.run_testcases(env, CHUNK_EXECUTION_TIMEOUT,
+ f"Running chunk {idx}", chunk):
+ matching_profraws = list(_matching_profraws(fullcorpus_profraw))
+ if _profdata_merge(matching_profraws, chunk_profdata):
+ # we accumulate the profile data to avoid taking too much disk space.
+ if not _accumulated_profdata_merge([chunk_profdata],
+ temp_target_profdata):
+ logging.warning(
+ f'[{target}][chunk strategy] accumulation failed for chunk {idx}')
+ continue
+ failed_chunks.append(chunk)
+ failure_rate = len(failed_chunks) / (idx + 1)
+ logging.debug(f'[{target}][chunk strategy] chunk failed '
+ f'({idx} / {len(chunks)}), failure rate {failure_rate}')
+ if idx > 4 and failure_rate > 0.75:
+ # This is mostly to exclude always failing fuzzers and avoid wasting time
+ # on that.
+ logging.warning(f'[{target}][chunk strategy] chunk failure'
+ f' rate ({failure_rate}) too high, stopping.')
+ return False
+
+ # We delay processing the failed chunk because we want to make sure the
+ # strategy hasn't failed earlier. Note that we still rely on `_run_testcases`
+ # to bail out early if the chunk contains too much test cases that runs into
+ # errors.
+ for idx, chunk in enumerate(failed_chunks):
+ chunk_profdata = os.path.join(profdata_dir.name,
+ f'{target}_{idx + len(chunks)}.profdata')
+ if _run_testcases(target, cmd_runner, env, chunk, chunk_profdata):
+ # we accumulate the profile data to avoid taking too much disk space.
+ _accumulated_profdata_merge([chunk_profdata], temp_target_profdata)
+ if os.path.exists(temp_target_profdata):
+ shutil.copy2(temp_target_profdata, target_profdata)
+ logging.info(f'[{target}][chunk strategy] done, success')
+ return os.path.exists(target_profdata)
+
+
+def _run_testcases(target: str, runner: EngineRunner, env: Mapping[str, str],
+ testcases: Sequence[str], target_profdata: str) -> bool:
+ """Runs the given testcases and tries to generate a profdata file out of the
+ runs. If the testcases are failing too frequently, the execution will be
+ stopped, but the profile file might still be generated.
+
+ Args:
+ target: the target name.
+ runner: the engine runner.
+ env: the environment.
+ testcases: the list of test cases to run.
+ target_profdata: the profdata to write to.
+
+ Returns:
+ whether it succeeded or not.
+ """
+ profraw_dir = tempfile.TemporaryDirectory()
+ profraw_file = os.path.join(profraw_dir.name, f'testcase_strategy_%p.profraw')
+ env['LLVM_PROFILE_FILE'] = profraw_file
+ failures = 0
+ total_runs = 0
+ logging.info(
+ f'[{target}][testcase strategy] starting, {len(testcases)} inputs to run')
+ for testcase in testcases:
+ if total_runs > 5 and failures / total_runs > 0.75:
+ logging.warning(
+ f'[{target}][testcase strategy] abandonning, too much failures...')
+ break
+ if not runner.run_testcases(env=env,
+ timeout=INDIVIDUAL_TESTCASE_TIMEOUT_SECS,
+ annotation="testcase runner",
+ testcases=[testcase]):
+ failures += 1
+ total_runs += 1
+ matching_profraws = list(_matching_profraws(profraw_file))
+ _accumulated_profdata_merge(matching_profraws, target_profdata)
+ res = os.path.exists(target_profdata)
+ res_str = 'success' if res else 'failure'
+ logging.info(f'[{target}][testcase strategy] done, {res_str}')
+ return res
+
+
def _run_fuzzer_target(args):
"""Runs a given fuzzer target. Designed to be called in parallel.
@@ -269,88 +517,28 @@
target = target_details['name']
cmd_runner = target_details['cmd_runner']
env = target_details['env']
- corpus_dir = target_details['corpus']
- corpus_files = target_details['files']
- profraw_dir = target_details['profraw_dir']
target_profdata = target_details['profdata_file']
- print("Starting target %s (completed %d/%d, of which %d succeeded)" %
- (target, len(verified_fuzzer_targets) + len(failed_targets),
- num_targets, len(verified_fuzzer_targets)))
+ logging.info("Starting target %s (completed %d/%d, of which %d succeeded)" %
+ (target, len(verified_fuzzer_targets) + len(failed_targets),
+ num_targets, len(verified_fuzzer_targets)))
- fullcorpus_profraw = os.path.join(profraw_dir, target + "_%p.profraw")
- env['LLVM_PROFILE_FILE'] = fullcorpus_profraw
+ res = _run_full_corpus(target_details) or _run_corpus_in_chunks(
+ target_details)
+ corpus_files = _get_target_corpus_files(target_details)
+ if not res and corpus_files:
+ res = _run_testcases(target, cmd_runner, env,
+ corpus_files[:INDIVIDUAL_TESTCASES_MAX_TO_TRY],
+ target_profdata)
- _erase_profraws(fullcorpus_profraw)
- for i in range(WHOLE_CORPUS_RETRIES):
- ok = cmd_runner.run_full_corpus(env, WHOLE_CORPUS_TIMEOUT_SECS,
- f"full corpus attempt {i}", corpus_dir)
- if ok:
- break
-
- valid_profiles = 0
- matching_profraws = list(_matching_profraws(fullcorpus_profraw))
- # There may be several if the fuzzer involved multiple processes,
- # e.g. a fuzztest with a wrapper executable
- ok = _profdata_merge(matching_profraws, target_profdata)
- if ok:
- valid_profiles = 1
-
- if valid_profiles == 0 and corpus_files is not None:
- # We failed to run the fuzzer with the whole corpus in one go. That probably
- # means one of the test cases caused a crash. Let's run each test
- # case one at a time. The resulting profraw files can be hundreds of MB
- # each so after each test case, we merge them into an accumulated
- # profdata file.
- if corpus_files == '*':
- corpus_files = os.listdir(corpus_dir)
- else:
- corpus_files = corpus_files.split()
-
- for count, corpus_entry in enumerate(corpus_files):
- specific_test_case_profraw = os.path.join(
- profraw_dir, target + "_" + str(count) + "_%p.profraw")
- test_case = os.path.join(corpus_dir, corpus_entry)
- env['LLVM_PROFILE_FILE'] = specific_test_case_profraw
- _erase_profraws(specific_test_case_profraw)
- cmd_runner.run_testcases(env, INDIVIDUAL_TESTCASE_TIMEOUT_SECS,
- f"specific test case {count}", [test_case])
- resulting_profraws = list(_matching_profraws(specific_test_case_profraw))
- if resulting_profraws:
- # We recorded valid profraws, let's merge them into
- # the accumulating profdata
- valid_profiles += 1
- temp_profdata = os.path.join(profraw_dir,
- target + "_accumlated.profraw")
- if os.path.exists(target_profdata):
- os.rename(target_profdata, temp_profdata)
- resulting_profraws.append(temp_profdata)
- ok = _profdata_merge(resulting_profraws, target_profdata)
- if not ok:
- valid_profiles = 0
- break
- # The corpus may be huge - don't keep going forever.
- if count > INDIVIDUAL_TESTCASES_MAX_TO_TRY:
- print(f"Skipping remaining test cases for {target} - >" +
- f"{INDIVIDUAL_TESTCASES_MAX_TO_TRY} tried")
- break
- # And if we've got enough valid coverage files, assume this is a
- # reasonable approximation of the total coverage. This is partly
- # to ensure the profdata command line isn't too huge, partly
- # to reduce processing time to something reasonable, and partly
- # because profraw files are huge and can fill up bot disk space.
- if valid_profiles > INDIVIDUAL_TESTCASES_SUCCESSES_NEEDED:
- print(
- f"Skipping remaining test cases for {target}, >%" +
- f"{INDIVIDUAL_TESTCASES_SUCCESSES_NEEDED} valid profiles recorded.")
- break
- if valid_profiles == 0:
+ if res:
+ verified_fuzzer_targets.append(target)
+ else:
failed_targets.append(target)
- return
- verified_fuzzer_targets.append(target)
- print("Finishing target %s (completed %d/%d, of which %d succeeded)" %
- (target, len(verified_fuzzer_targets) + len(failed_targets),
- num_targets, len(verified_fuzzer_targets)))
+
+ logging.info("Finishing target %s (completed %d/%d, of which %d succeeded)" %
+ (target, len(verified_fuzzer_targets) + len(failed_targets),
+ num_targets, len(verified_fuzzer_targets)))
def _parse_command_arguments():
@@ -396,7 +584,7 @@
centipede_target_binpath = os.path.join(args.fuzzer_binaries_dir, "centipede")
if args.fuzzer == CENTIPEDE:
if not os.path.isfile(centipede_target_binpath):
- print(f'{centipede_target_binpath} does not exist.')
+ logging.warning(f'{centipede_target_binpath} does not exist.')
return []
for fuzzer_target in os.listdir(args.fuzzer_corpora_dir):
@@ -407,7 +595,7 @@
if not (os.path.isfile(fuzzer_target_binpath)
and os.path.isdir(fuzzer_target_corporadir)):
- print((
+ logging.warning((
'Could not find binary file for %s, or, the provided corpora path is '
'not a directory') % fuzzer_target)
incomplete_targets.append(fuzzer_target)
@@ -428,8 +616,6 @@
all_target_details.append({
'name':
fuzzer_target,
- 'profraw_dir':
- REPORT_DIR,
'profdata_file':
os.path.join(REPORT_DIR, fuzzer_target + ".profdata"),
'env':
@@ -452,7 +638,7 @@
# code simply don't appear in the coverage report at all.
chrome_target_binpath = os.path.join(args.fuzzer_binaries_dir, "chrome")
if not os.path.isfile(chrome_target_binpath):
- print('Could not find binary file for Chrome itself')
+ logging.warning('Could not find binary file for Chrome itself')
else:
profraw_file = chrome_target_binpath + ".profraw"
@@ -460,20 +646,19 @@
all_target_details.append({
'name':
"chrome",
- 'profraw_dir':
- REPORT_DIR,
'profdata_file':
os.path.join(REPORT_DIR, "chrome.profdata"),
'env':
env,
'cmd_runner':
- CmdRunner([chrome_target_binpath]),
+ ChromeRunner([chrome_target_binpath]),
'corpus':
None,
'files':
None
})
- print("Incomplete targets (couldn't find binary): %s" % incomplete_targets)
+ logging.warning("Incomplete targets (couldn't find binary): %s" %
+ incomplete_targets)
return all_target_details
@@ -481,7 +666,7 @@
all_target_details = []
fuzzer_target_binpath = os.path.join(args.fuzzer_binaries_dir, 'd8')
if not os.path.isfile(fuzzer_target_binpath):
- print(f'Could not find binary file: {fuzzer_target_binpath}')
+ logging.warning(f'Could not find binary file: {fuzzer_target_binpath}')
return all_target_details
for corpora_dir in os.listdir(args.fuzzer_corpora_dir):
@@ -498,7 +683,8 @@
cmd.extend(settings['processArguments'])
path_to_js_dir = os.path.join(target_corpora_dir, 'fuzzdir', 'corpus')
jsfiles = [
- file for file in os.listdir(path_to_js_dir) if file.endswith('.js')
+ os.path.join(path_to_js_dir, file)
+ for file in os.listdir(path_to_js_dir) if file.endswith('.js')
]
files_per_chunk = 80
num_of_chunks = math.ceil(len(jsfiles) / files_per_chunk)
@@ -507,35 +693,38 @@
all_target_details.append({
'name':
f'{corpora_dir}_{i}',
- 'profraw_dir':
- REPORT_DIR,
'profdata_file':
os.path.join(REPORT_DIR, f'{corpora_dir}_{i}.profdata'),
'env':
dict(),
'cmd_runner':
- FuzzilliRunner(cmd=cmd, corpus_files=chunk),
+ FuzzilliRunner(cmd=cmd),
'corpus':
- path_to_js_dir,
+ None,
'files':
- ' '.join(chunk)
+ chunk
})
return all_target_details
def main():
+ logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
args = _parse_command_arguments()
+ # First, we make sure the report directory exists.
+ pathlib.Path(REPORT_DIR).mkdir(parents=True, exist_ok=True)
+
verified_fuzzer_targets = Manager().list()
failed_targets = Manager().list()
all_target_details = []
if not (os.path.isfile(LLVM_PROFDATA)):
- print('No valid llvm_profdata at %s' % LLVM_PROFDATA)
+ logging.warning('No valid llvm_profdata at %s' % LLVM_PROFDATA)
exit(2)
if not (os.path.isdir(args.profdata_outdir)):
- print('%s does not exist or is not a directory' % args.profdata_outdir)
+ logging.warning('%s does not exist or is not a directory' %
+ args.profdata_outdir)
exit(2)
if args.fuzzer == FUZZILLI:
@@ -546,28 +735,28 @@
# Run the fuzzers in parallel.
num_cpus = int(cpu_count())
num_targets = len(all_target_details)
- print("Running %d fuzzers across %d CPUs" % (num_targets, num_cpus))
+ logging.info("Running %d fuzzers across %d CPUs" % (num_targets, num_cpus))
with Pool(num_cpus) as p:
results = p.map(
_run_fuzzer_target,
[(target_details, verified_fuzzer_targets, failed_targets, num_targets)
for target_details in all_target_details])
- print("Successful targets: %s" % verified_fuzzer_targets)
- print("Failed targets: %s" % failed_targets)
+ logging.info("Successful targets: %s" % verified_fuzzer_targets)
+ logging.info("Failed targets: %s" % failed_targets)
- print("Finished getting coverage information. Copying to %s" %
- args.profdata_outdir)
+ logging.info("Finished getting coverage information. Copying to %s" %
+ args.profdata_outdir)
for fuzzer in verified_fuzzer_targets:
cmd = [
'cp',
os.path.join(REPORT_DIR, fuzzer + '.profdata'), args.profdata_outdir
]
- print(cmd)
+ logging.info(cmd)
try:
subprocess.check_call(cmd)
except:
- print.warning("Warning: failed to copy profdata for %s" % fuzzer)
+ logging.warning("Warning: failed to copy profdata for %s" % fuzzer)
if __name__ == '__main__':